]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.9-3.3.6-201205130001.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9-3.3.6-201205130001.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index 0c083c5..bf13011 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9 +*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13 +*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17 @@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21 +*.gmo
22 *.grep
23 *.grp
24 *.gz
25 @@ -48,9 +51,11 @@
26 *.tab.h
27 *.tex
28 *.ver
29 +*.vim
30 *.xml
31 *.xz
32 *_MODULES
33 +*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 @@ -69,6 +74,7 @@ Image
38 Module.markers
39 Module.symvers
40 PENDING
41 +PERF*
42 SCCS
43 System.map*
44 TAGS
45 @@ -92,19 +98,24 @@ bounds.h
46 bsetup
47 btfixupprep
48 build
49 +builtin-policy.h
50 bvmlinux
51 bzImage*
52 capability_names.h
53 capflags.c
54 classlist.h*
55 +clut_vga16.c
56 +common-cmds.h
57 comp*.log
58 compile.h*
59 conf
60 config
61 config-*
62 config_data.h*
63 +config.c
64 config.mak
65 config.mak.autogen
66 +config.tmp
67 conmakehash
68 consolemap_deftbl.c*
69 cpustr.h
70 @@ -115,9 +126,11 @@ devlist.h*
71 dnotify_test
72 docproc
73 dslm
74 +dtc-lexer.lex.c
75 elf2ecoff
76 elfconfig.h*
77 evergreen_reg_safe.h
78 +exception_policy.conf
79 fixdep
80 flask.h
81 fore200e_mkfirm
82 @@ -125,12 +138,15 @@ fore200e_pca_fw.c*
83 gconf
84 gconf.glade.h
85 gen-devlist
86 +gen-kdb_cmds.c
87 gen_crc32table
88 gen_init_cpio
89 generated
90 genheaders
91 genksyms
92 *_gray256.c
93 +hash
94 +hid-example
95 hpet_example
96 hugepage-mmap
97 hugepage-shm
98 @@ -145,7 +161,7 @@ int32.c
99 int4.c
100 int8.c
101 kallsyms
102 -kconfig
103 +kern_constants.h
104 keywords.c
105 ksym.c*
106 ksym.h*
107 @@ -153,7 +169,7 @@ kxgettext
108 lkc_defs.h
109 lex.c
110 lex.*.c
111 -linux
112 +lib1funcs.S
113 logo_*.c
114 logo_*_clut224.c
115 logo_*_mono.c
116 @@ -165,14 +181,15 @@ machtypes.h
117 map
118 map_hugetlb
119 maui_boot.h
120 -media
121 mconf
122 +mdp
123 miboot*
124 mk_elfconfig
125 mkboot
126 mkbugboot
127 mkcpustr
128 mkdep
129 +mkpiggy
130 mkprep
131 mkregtable
132 mktables
133 @@ -208,6 +225,7 @@ r300_reg_safe.h
134 r420_reg_safe.h
135 r600_reg_safe.h
136 recordmcount
137 +regdb.c
138 relocs
139 rlim_names.h
140 rn50_reg_safe.h
141 @@ -218,6 +236,7 @@ setup
142 setup.bin
143 setup.elf
144 sImage
145 +slabinfo
146 sm_tbl*
147 split-include
148 syscalltab.h
149 @@ -228,6 +247,7 @@ tftpboot.img
150 timeconst.h
151 times.h*
152 trix_boot.h
153 +user_constants.h
154 utsrelease.h*
155 vdso-syms.lds
156 vdso.lds
157 @@ -245,7 +265,9 @@ vmlinux
158 vmlinux-*
159 vmlinux.aout
160 vmlinux.bin.all
161 +vmlinux.bin.bz2
162 vmlinux.lds
163 +vmlinux.relocs
164 vmlinuz
165 voffset.h
166 vsyscall.lds
167 @@ -253,9 +275,11 @@ vsyscall_32.lds
168 wanxlfw.inc
169 uImage
170 unifdef
171 +utsrelease.h
172 wakeup.bin
173 wakeup.elf
174 wakeup.lds
175 zImage*
176 zconf.hash.c
177 +zconf.lex.c
178 zoffset.h
179 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
180 index d99fd9c..8689fef 100644
181 --- a/Documentation/kernel-parameters.txt
182 +++ b/Documentation/kernel-parameters.txt
183 @@ -1977,6 +1977,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
184 the specified number of seconds. This is to be used if
185 your oopses keep scrolling off the screen.
186
187 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
188 + virtualization environments that don't cope well with the
189 + expand down segment used by UDEREF on X86-32 or the frequent
190 + page table updates on X86-64.
191 +
192 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
193 +
194 pcbit= [HW,ISDN]
195
196 pcd. [PARIDE]
197 diff --git a/Makefile b/Makefile
198 index 9cd6941..92e68ff 100644
199 --- a/Makefile
200 +++ b/Makefile
201 @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
202
203 HOSTCC = gcc
204 HOSTCXX = g++
205 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
206 -HOSTCXXFLAGS = -O2
207 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
208 +HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
209 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
210
211 # Decide whether to build built-in, modular, or both.
212 # Normally, just do built-in.
213 @@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
214 # Rules shared between *config targets and build targets
215
216 # Basic helpers built in scripts/
217 -PHONY += scripts_basic
218 -scripts_basic:
219 +PHONY += scripts_basic gcc-plugins
220 +scripts_basic: gcc-plugins
221 $(Q)$(MAKE) $(build)=scripts/basic
222 $(Q)rm -f .tmp_quiet_recordmcount
223
224 @@ -564,6 +565,55 @@ else
225 KBUILD_CFLAGS += -O2
226 endif
227
228 +ifndef DISABLE_PAX_PLUGINS
229 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
230 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
231 +ifndef CONFIG_UML
232 +CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
233 +endif
234 +endif
235 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
236 +STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
237 +STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
238 +endif
239 +ifdef CONFIG_KALLOCSTAT_PLUGIN
240 +KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
241 +endif
242 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
243 +KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
244 +KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
245 +KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
246 +endif
247 +ifdef CONFIG_CHECKER_PLUGIN
248 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
249 +CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
250 +endif
251 +endif
252 +COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
253 +ifdef CONFIG_PAX_SIZE_OVERFLOW
254 +SIZE_OVERFLOW_PLUGIN := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
255 +endif
256 +GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
257 +GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS) $(SIZE_OVERFLOW_PLUGIN)
258 +GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
259 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN SIZE_OVERFLOW_PLUGIN
260 +ifeq ($(KBUILD_EXTMOD),)
261 +gcc-plugins:
262 + $(Q)$(MAKE) $(build)=tools/gcc
263 +else
264 +gcc-plugins: ;
265 +endif
266 +else
267 +gcc-plugins:
268 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
269 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
270 +else
271 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
272 +endif
273 + $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
274 +endif
275 +endif
276 +
277 include $(srctree)/arch/$(SRCARCH)/Makefile
278
279 ifneq ($(CONFIG_FRAME_WARN),0)
280 @@ -708,7 +758,7 @@ export mod_strip_cmd
281
282
283 ifeq ($(KBUILD_EXTMOD),)
284 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
285 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
286
287 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
288 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
289 @@ -932,6 +982,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
290
291 # The actual objects are generated when descending,
292 # make sure no implicit rule kicks in
293 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
294 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
295 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
296
297 # Handle descending into subdirectories listed in $(vmlinux-dirs)
298 @@ -941,7 +993,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
299 # Error messages still appears in the original language
300
301 PHONY += $(vmlinux-dirs)
302 -$(vmlinux-dirs): prepare scripts
303 +$(vmlinux-dirs): gcc-plugins prepare scripts
304 $(Q)$(MAKE) $(build)=$@
305
306 # Store (new) KERNELRELASE string in include/config/kernel.release
307 @@ -985,6 +1037,7 @@ prepare0: archprepare FORCE
308 $(Q)$(MAKE) $(build)=.
309
310 # All the preparing..
311 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
312 prepare: prepare0
313
314 # Generate some files
315 @@ -1089,6 +1142,8 @@ all: modules
316 # using awk while concatenating to the final file.
317
318 PHONY += modules
319 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
320 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
321 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
322 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
323 @$(kecho) ' Building modules, stage 2.';
324 @@ -1104,7 +1159,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
325
326 # Target to prepare building external modules
327 PHONY += modules_prepare
328 -modules_prepare: prepare scripts
329 +modules_prepare: gcc-plugins prepare scripts
330
331 # Target to install modules
332 PHONY += modules_install
333 @@ -1201,6 +1256,7 @@ distclean: mrproper
334 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
335 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
336 -o -name '.*.rej' \
337 + -o -name '.*.rej' -o -name '*.so' \
338 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
339 -type f -print | xargs rm -f
340
341 @@ -1361,6 +1417,8 @@ PHONY += $(module-dirs) modules
342 $(module-dirs): crmodverdir $(objtree)/Module.symvers
343 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
344
345 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
346 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
347 modules: $(module-dirs)
348 @$(kecho) ' Building modules, stage 2.';
349 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
350 @@ -1487,17 +1545,21 @@ else
351 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
352 endif
353
354 -%.s: %.c prepare scripts FORCE
355 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
356 +%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
357 +%.s: %.c gcc-plugins prepare scripts FORCE
358 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
359 %.i: %.c prepare scripts FORCE
360 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
361 -%.o: %.c prepare scripts FORCE
362 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
363 +%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
364 +%.o: %.c gcc-plugins prepare scripts FORCE
365 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
366 %.lst: %.c prepare scripts FORCE
367 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
368 -%.s: %.S prepare scripts FORCE
369 +%.s: %.S gcc-plugins prepare scripts FORCE
370 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
371 -%.o: %.S prepare scripts FORCE
372 +%.o: %.S gcc-plugins prepare scripts FORCE
373 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
374 %.symtypes: %.c prepare scripts FORCE
375 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
376 @@ -1507,11 +1569,15 @@ endif
377 $(cmd_crmodverdir)
378 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
379 $(build)=$(build-dir)
380 -%/: prepare scripts FORCE
381 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
382 +%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
383 +%/: gcc-plugins prepare scripts FORCE
384 $(cmd_crmodverdir)
385 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
386 $(build)=$(build-dir)
387 -%.ko: prepare scripts FORCE
388 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
389 +%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
390 +%.ko: gcc-plugins prepare scripts FORCE
391 $(cmd_crmodverdir)
392 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
393 $(build)=$(build-dir) $(@:.ko=.o)
394 diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
395 index 640f909..48b6597 100644
396 --- a/arch/alpha/include/asm/atomic.h
397 +++ b/arch/alpha/include/asm/atomic.h
398 @@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
399 #define atomic_dec(v) atomic_sub(1,(v))
400 #define atomic64_dec(v) atomic64_sub(1,(v))
401
402 +#define atomic64_read_unchecked(v) atomic64_read(v)
403 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
404 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
405 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
406 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
407 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
408 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
409 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
410 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
411 +
412 #define smp_mb__before_atomic_dec() smp_mb()
413 #define smp_mb__after_atomic_dec() smp_mb()
414 #define smp_mb__before_atomic_inc() smp_mb()
415 diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
416 index ad368a9..fbe0f25 100644
417 --- a/arch/alpha/include/asm/cache.h
418 +++ b/arch/alpha/include/asm/cache.h
419 @@ -4,19 +4,19 @@
420 #ifndef __ARCH_ALPHA_CACHE_H
421 #define __ARCH_ALPHA_CACHE_H
422
423 +#include <linux/const.h>
424
425 /* Bytes per L1 (data) cache line. */
426 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
427 -# define L1_CACHE_BYTES 64
428 # define L1_CACHE_SHIFT 6
429 #else
430 /* Both EV4 and EV5 are write-through, read-allocate,
431 direct-mapped, physical.
432 */
433 -# define L1_CACHE_BYTES 32
434 # define L1_CACHE_SHIFT 5
435 #endif
436
437 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
438 #define SMP_CACHE_BYTES L1_CACHE_BYTES
439
440 #endif
441 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
442 index da5449e..7418343 100644
443 --- a/arch/alpha/include/asm/elf.h
444 +++ b/arch/alpha/include/asm/elf.h
445 @@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
446
447 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
448
449 +#ifdef CONFIG_PAX_ASLR
450 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
451 +
452 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
453 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
454 +#endif
455 +
456 /* $0 is set by ld.so to a pointer to a function which might be
457 registered using atexit. This provides a mean for the dynamic
458 linker to call DT_FINI functions for shared libraries that have
459 diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
460 index bc2a0da..8ad11ee 100644
461 --- a/arch/alpha/include/asm/pgalloc.h
462 +++ b/arch/alpha/include/asm/pgalloc.h
463 @@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
464 pgd_set(pgd, pmd);
465 }
466
467 +static inline void
468 +pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
469 +{
470 + pgd_populate(mm, pgd, pmd);
471 +}
472 +
473 extern pgd_t *pgd_alloc(struct mm_struct *mm);
474
475 static inline void
476 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
477 index de98a73..bd4f1f8 100644
478 --- a/arch/alpha/include/asm/pgtable.h
479 +++ b/arch/alpha/include/asm/pgtable.h
480 @@ -101,6 +101,17 @@ struct vm_area_struct;
481 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
482 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
483 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
484 +
485 +#ifdef CONFIG_PAX_PAGEEXEC
486 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
487 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
488 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
489 +#else
490 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
491 +# define PAGE_COPY_NOEXEC PAGE_COPY
492 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
493 +#endif
494 +
495 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
496
497 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
498 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
499 index 2fd00b7..cfd5069 100644
500 --- a/arch/alpha/kernel/module.c
501 +++ b/arch/alpha/kernel/module.c
502 @@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
503
504 /* The small sections were sorted to the end of the segment.
505 The following should definitely cover them. */
506 - gp = (u64)me->module_core + me->core_size - 0x8000;
507 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
508 got = sechdrs[me->arch.gotsecindex].sh_addr;
509
510 for (i = 0; i < n; i++) {
511 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
512 index 01e8715..be0e80f 100644
513 --- a/arch/alpha/kernel/osf_sys.c
514 +++ b/arch/alpha/kernel/osf_sys.c
515 @@ -1147,7 +1147,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
516 /* At this point: (!vma || addr < vma->vm_end). */
517 if (limit - len < addr)
518 return -ENOMEM;
519 - if (!vma || addr + len <= vma->vm_start)
520 + if (check_heap_stack_gap(vma, addr, len))
521 return addr;
522 addr = vma->vm_end;
523 vma = vma->vm_next;
524 @@ -1183,6 +1183,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
525 merely specific addresses, but regions of memory -- perhaps
526 this feature should be incorporated into all ports? */
527
528 +#ifdef CONFIG_PAX_RANDMMAP
529 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
530 +#endif
531 +
532 if (addr) {
533 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
534 if (addr != (unsigned long) -ENOMEM)
535 @@ -1190,8 +1194,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
536 }
537
538 /* Next, try allocating at TASK_UNMAPPED_BASE. */
539 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
540 - len, limit);
541 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
542 +
543 if (addr != (unsigned long) -ENOMEM)
544 return addr;
545
546 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
547 index fadd5f8..904e73a 100644
548 --- a/arch/alpha/mm/fault.c
549 +++ b/arch/alpha/mm/fault.c
550 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
551 __reload_thread(pcb);
552 }
553
554 +#ifdef CONFIG_PAX_PAGEEXEC
555 +/*
556 + * PaX: decide what to do with offenders (regs->pc = fault address)
557 + *
558 + * returns 1 when task should be killed
559 + * 2 when patched PLT trampoline was detected
560 + * 3 when unpatched PLT trampoline was detected
561 + */
562 +static int pax_handle_fetch_fault(struct pt_regs *regs)
563 +{
564 +
565 +#ifdef CONFIG_PAX_EMUPLT
566 + int err;
567 +
568 + do { /* PaX: patched PLT emulation #1 */
569 + unsigned int ldah, ldq, jmp;
570 +
571 + err = get_user(ldah, (unsigned int *)regs->pc);
572 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
573 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
574 +
575 + if (err)
576 + break;
577 +
578 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
579 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
580 + jmp == 0x6BFB0000U)
581 + {
582 + unsigned long r27, addr;
583 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
584 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
585 +
586 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
587 + err = get_user(r27, (unsigned long *)addr);
588 + if (err)
589 + break;
590 +
591 + regs->r27 = r27;
592 + regs->pc = r27;
593 + return 2;
594 + }
595 + } while (0);
596 +
597 + do { /* PaX: patched PLT emulation #2 */
598 + unsigned int ldah, lda, br;
599 +
600 + err = get_user(ldah, (unsigned int *)regs->pc);
601 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
602 + err |= get_user(br, (unsigned int *)(regs->pc+8));
603 +
604 + if (err)
605 + break;
606 +
607 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
608 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
609 + (br & 0xFFE00000U) == 0xC3E00000U)
610 + {
611 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
612 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
613 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
614 +
615 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
616 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
617 + return 2;
618 + }
619 + } while (0);
620 +
621 + do { /* PaX: unpatched PLT emulation */
622 + unsigned int br;
623 +
624 + err = get_user(br, (unsigned int *)regs->pc);
625 +
626 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
627 + unsigned int br2, ldq, nop, jmp;
628 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
629 +
630 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
631 + err = get_user(br2, (unsigned int *)addr);
632 + err |= get_user(ldq, (unsigned int *)(addr+4));
633 + err |= get_user(nop, (unsigned int *)(addr+8));
634 + err |= get_user(jmp, (unsigned int *)(addr+12));
635 + err |= get_user(resolver, (unsigned long *)(addr+16));
636 +
637 + if (err)
638 + break;
639 +
640 + if (br2 == 0xC3600000U &&
641 + ldq == 0xA77B000CU &&
642 + nop == 0x47FF041FU &&
643 + jmp == 0x6B7B0000U)
644 + {
645 + regs->r28 = regs->pc+4;
646 + regs->r27 = addr+16;
647 + regs->pc = resolver;
648 + return 3;
649 + }
650 + }
651 + } while (0);
652 +#endif
653 +
654 + return 1;
655 +}
656 +
657 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
658 +{
659 + unsigned long i;
660 +
661 + printk(KERN_ERR "PAX: bytes at PC: ");
662 + for (i = 0; i < 5; i++) {
663 + unsigned int c;
664 + if (get_user(c, (unsigned int *)pc+i))
665 + printk(KERN_CONT "???????? ");
666 + else
667 + printk(KERN_CONT "%08x ", c);
668 + }
669 + printk("\n");
670 +}
671 +#endif
672
673 /*
674 * This routine handles page faults. It determines the address,
675 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
676 good_area:
677 si_code = SEGV_ACCERR;
678 if (cause < 0) {
679 - if (!(vma->vm_flags & VM_EXEC))
680 + if (!(vma->vm_flags & VM_EXEC)) {
681 +
682 +#ifdef CONFIG_PAX_PAGEEXEC
683 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
684 + goto bad_area;
685 +
686 + up_read(&mm->mmap_sem);
687 + switch (pax_handle_fetch_fault(regs)) {
688 +
689 +#ifdef CONFIG_PAX_EMUPLT
690 + case 2:
691 + case 3:
692 + return;
693 +#endif
694 +
695 + }
696 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
697 + do_group_exit(SIGKILL);
698 +#else
699 goto bad_area;
700 +#endif
701 +
702 + }
703 } else if (!cause) {
704 /* Allow reads even for write-only mappings */
705 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
706 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
707 index 86976d0..8e07f84 100644
708 --- a/arch/arm/include/asm/atomic.h
709 +++ b/arch/arm/include/asm/atomic.h
710 @@ -15,6 +15,10 @@
711 #include <linux/types.h>
712 #include <asm/system.h>
713
714 +#ifdef CONFIG_GENERIC_ATOMIC64
715 +#include <asm-generic/atomic64.h>
716 +#endif
717 +
718 #define ATOMIC_INIT(i) { (i) }
719
720 #ifdef __KERNEL__
721 @@ -25,7 +29,15 @@
722 * atomic_set() is the clrex or dummy strex done on every exception return.
723 */
724 #define atomic_read(v) (*(volatile int *)&(v)->counter)
725 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
726 +{
727 + return v->counter;
728 +}
729 #define atomic_set(v,i) (((v)->counter) = (i))
730 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
731 +{
732 + v->counter = i;
733 +}
734
735 #if __LINUX_ARM_ARCH__ >= 6
736
737 @@ -40,6 +52,35 @@ static inline void atomic_add(int i, atomic_t *v)
738 int result;
739
740 __asm__ __volatile__("@ atomic_add\n"
741 +"1: ldrex %1, [%3]\n"
742 +" adds %0, %1, %4\n"
743 +
744 +#ifdef CONFIG_PAX_REFCOUNT
745 +" bvc 3f\n"
746 +"2: bkpt 0xf103\n"
747 +"3:\n"
748 +#endif
749 +
750 +" strex %1, %0, [%3]\n"
751 +" teq %1, #0\n"
752 +" bne 1b"
753 +
754 +#ifdef CONFIG_PAX_REFCOUNT
755 +"\n4:\n"
756 + _ASM_EXTABLE(2b, 4b)
757 +#endif
758 +
759 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
760 + : "r" (&v->counter), "Ir" (i)
761 + : "cc");
762 +}
763 +
764 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
765 +{
766 + unsigned long tmp;
767 + int result;
768 +
769 + __asm__ __volatile__("@ atomic_add_unchecked\n"
770 "1: ldrex %0, [%3]\n"
771 " add %0, %0, %4\n"
772 " strex %1, %0, [%3]\n"
773 @@ -58,6 +99,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
774 smp_mb();
775
776 __asm__ __volatile__("@ atomic_add_return\n"
777 +"1: ldrex %1, [%3]\n"
778 +" adds %0, %1, %4\n"
779 +
780 +#ifdef CONFIG_PAX_REFCOUNT
781 +" bvc 3f\n"
782 +" mov %0, %1\n"
783 +"2: bkpt 0xf103\n"
784 +"3:\n"
785 +#endif
786 +
787 +" strex %1, %0, [%3]\n"
788 +" teq %1, #0\n"
789 +" bne 1b"
790 +
791 +#ifdef CONFIG_PAX_REFCOUNT
792 +"\n4:\n"
793 + _ASM_EXTABLE(2b, 4b)
794 +#endif
795 +
796 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
797 + : "r" (&v->counter), "Ir" (i)
798 + : "cc");
799 +
800 + smp_mb();
801 +
802 + return result;
803 +}
804 +
805 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
806 +{
807 + unsigned long tmp;
808 + int result;
809 +
810 + smp_mb();
811 +
812 + __asm__ __volatile__("@ atomic_add_return_unchecked\n"
813 "1: ldrex %0, [%3]\n"
814 " add %0, %0, %4\n"
815 " strex %1, %0, [%3]\n"
816 @@ -78,6 +155,35 @@ static inline void atomic_sub(int i, atomic_t *v)
817 int result;
818
819 __asm__ __volatile__("@ atomic_sub\n"
820 +"1: ldrex %1, [%3]\n"
821 +" subs %0, %1, %4\n"
822 +
823 +#ifdef CONFIG_PAX_REFCOUNT
824 +" bvc 3f\n"
825 +"2: bkpt 0xf103\n"
826 +"3:\n"
827 +#endif
828 +
829 +" strex %1, %0, [%3]\n"
830 +" teq %1, #0\n"
831 +" bne 1b"
832 +
833 +#ifdef CONFIG_PAX_REFCOUNT
834 +"\n4:\n"
835 + _ASM_EXTABLE(2b, 4b)
836 +#endif
837 +
838 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
839 + : "r" (&v->counter), "Ir" (i)
840 + : "cc");
841 +}
842 +
843 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
844 +{
845 + unsigned long tmp;
846 + int result;
847 +
848 + __asm__ __volatile__("@ atomic_sub_unchecked\n"
849 "1: ldrex %0, [%3]\n"
850 " sub %0, %0, %4\n"
851 " strex %1, %0, [%3]\n"
852 @@ -96,11 +202,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
853 smp_mb();
854
855 __asm__ __volatile__("@ atomic_sub_return\n"
856 -"1: ldrex %0, [%3]\n"
857 -" sub %0, %0, %4\n"
858 +"1: ldrex %1, [%3]\n"
859 +" sub %0, %1, %4\n"
860 +
861 +#ifdef CONFIG_PAX_REFCOUNT
862 +" bvc 3f\n"
863 +" mov %0, %1\n"
864 +"2: bkpt 0xf103\n"
865 +"3:\n"
866 +#endif
867 +
868 " strex %1, %0, [%3]\n"
869 " teq %1, #0\n"
870 " bne 1b"
871 +
872 +#ifdef CONFIG_PAX_REFCOUNT
873 +"\n4:\n"
874 + _ASM_EXTABLE(2b, 4b)
875 +#endif
876 +
877 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
878 : "r" (&v->counter), "Ir" (i)
879 : "cc");
880 @@ -132,6 +252,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
881 return oldval;
882 }
883
884 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
885 +{
886 + unsigned long oldval, res;
887 +
888 + smp_mb();
889 +
890 + do {
891 + __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
892 + "ldrex %1, [%3]\n"
893 + "mov %0, #0\n"
894 + "teq %1, %4\n"
895 + "strexeq %0, %5, [%3]\n"
896 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
897 + : "r" (&ptr->counter), "Ir" (old), "r" (new)
898 + : "cc");
899 + } while (res);
900 +
901 + smp_mb();
902 +
903 + return oldval;
904 +}
905 +
906 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
907 {
908 unsigned long tmp, tmp2;
909 @@ -165,7 +307,9 @@ static inline int atomic_add_return(int i, atomic_t *v)
910
911 return val;
912 }
913 +#define atomic_add_return_unchecked(i, v) atomic_add_return(i, v)
914 #define atomic_add(i, v) (void) atomic_add_return(i, v)
915 +#define atomic_add_unchecked(i, v) (void) atomic_add_return_unchecked(i, v)
916
917 static inline int atomic_sub_return(int i, atomic_t *v)
918 {
919 @@ -179,7 +323,9 @@ static inline int atomic_sub_return(int i, atomic_t *v)
920
921 return val;
922 }
923 +#define atomic_sub_return_unchecked(i, v) atomic_sub_return(i, v)
924 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
925 +#define atomic_sub_unchecked(i, v) (void) atomic_sub_return_unchecked(i, v)
926
927 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
928 {
929 @@ -194,6 +340,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
930
931 return ret;
932 }
933 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg(v, o, n)
934
935 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
936 {
937 @@ -207,6 +354,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
938 #endif /* __LINUX_ARM_ARCH__ */
939
940 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
941 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
942 +{
943 + return xchg(&v->counter, new);
944 +}
945
946 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
947 {
948 @@ -219,11 +370,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
949 }
950
951 #define atomic_inc(v) atomic_add(1, v)
952 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
953 +{
954 + atomic_add_unchecked(1, v);
955 +}
956 #define atomic_dec(v) atomic_sub(1, v)
957 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
958 +{
959 + atomic_sub_unchecked(1, v);
960 +}
961
962 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
963 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
964 +{
965 + return atomic_add_return_unchecked(1, v) == 0;
966 +}
967 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
968 #define atomic_inc_return(v) (atomic_add_return(1, v))
969 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
970 +{
971 + return atomic_add_return_unchecked(1, v);
972 +}
973 #define atomic_dec_return(v) (atomic_sub_return(1, v))
974 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
975
976 @@ -239,6 +406,14 @@ typedef struct {
977 u64 __aligned(8) counter;
978 } atomic64_t;
979
980 +#ifdef CONFIG_PAX_REFCOUNT
981 +typedef struct {
982 + u64 __aligned(8) counter;
983 +} atomic64_unchecked_t;
984 +#else
985 +typedef atomic64_t atomic64_unchecked_t;
986 +#endif
987 +
988 #define ATOMIC64_INIT(i) { (i) }
989
990 static inline u64 atomic64_read(atomic64_t *v)
991 @@ -254,6 +429,19 @@ static inline u64 atomic64_read(atomic64_t *v)
992 return result;
993 }
994
995 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
996 +{
997 + u64 result;
998 +
999 + __asm__ __volatile__("@ atomic64_read_unchecked\n"
1000 +" ldrexd %0, %H0, [%1]"
1001 + : "=&r" (result)
1002 + : "r" (&v->counter), "Qo" (v->counter)
1003 + );
1004 +
1005 + return result;
1006 +}
1007 +
1008 static inline void atomic64_set(atomic64_t *v, u64 i)
1009 {
1010 u64 tmp;
1011 @@ -268,6 +456,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1012 : "cc");
1013 }
1014
1015 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1016 +{
1017 + u64 tmp;
1018 +
1019 + __asm__ __volatile__("@ atomic64_set_unchecked\n"
1020 +"1: ldrexd %0, %H0, [%2]\n"
1021 +" strexd %0, %3, %H3, [%2]\n"
1022 +" teq %0, #0\n"
1023 +" bne 1b"
1024 + : "=&r" (tmp), "=Qo" (v->counter)
1025 + : "r" (&v->counter), "r" (i)
1026 + : "cc");
1027 +}
1028 +
1029 static inline void atomic64_add(u64 i, atomic64_t *v)
1030 {
1031 u64 result;
1032 @@ -276,6 +478,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1033 __asm__ __volatile__("@ atomic64_add\n"
1034 "1: ldrexd %0, %H0, [%3]\n"
1035 " adds %0, %0, %4\n"
1036 +" adcs %H0, %H0, %H4\n"
1037 +
1038 +#ifdef CONFIG_PAX_REFCOUNT
1039 +" bvc 3f\n"
1040 +"2: bkpt 0xf103\n"
1041 +"3:\n"
1042 +#endif
1043 +
1044 +" strexd %1, %0, %H0, [%3]\n"
1045 +" teq %1, #0\n"
1046 +" bne 1b"
1047 +
1048 +#ifdef CONFIG_PAX_REFCOUNT
1049 +"\n4:\n"
1050 + _ASM_EXTABLE(2b, 4b)
1051 +#endif
1052 +
1053 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1054 + : "r" (&v->counter), "r" (i)
1055 + : "cc");
1056 +}
1057 +
1058 +static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1059 +{
1060 + u64 result;
1061 + unsigned long tmp;
1062 +
1063 + __asm__ __volatile__("@ atomic64_add_unchecked\n"
1064 +"1: ldrexd %0, %H0, [%3]\n"
1065 +" adds %0, %0, %4\n"
1066 " adc %H0, %H0, %H4\n"
1067 " strexd %1, %0, %H0, [%3]\n"
1068 " teq %1, #0\n"
1069 @@ -287,12 +519,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1070
1071 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1072 {
1073 - u64 result;
1074 - unsigned long tmp;
1075 + u64 result, tmp;
1076
1077 smp_mb();
1078
1079 __asm__ __volatile__("@ atomic64_add_return\n"
1080 +"1: ldrexd %1, %H1, [%3]\n"
1081 +" adds %0, %1, %4\n"
1082 +" adcs %H0, %H1, %H4\n"
1083 +
1084 +#ifdef CONFIG_PAX_REFCOUNT
1085 +" bvc 3f\n"
1086 +" mov %0, %1\n"
1087 +" mov %H0, %H1\n"
1088 +"2: bkpt 0xf103\n"
1089 +"3:\n"
1090 +#endif
1091 +
1092 +" strexd %1, %0, %H0, [%3]\n"
1093 +" teq %1, #0\n"
1094 +" bne 1b"
1095 +
1096 +#ifdef CONFIG_PAX_REFCOUNT
1097 +"\n4:\n"
1098 + _ASM_EXTABLE(2b, 4b)
1099 +#endif
1100 +
1101 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1102 + : "r" (&v->counter), "r" (i)
1103 + : "cc");
1104 +
1105 + smp_mb();
1106 +
1107 + return result;
1108 +}
1109 +
1110 +static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1111 +{
1112 + u64 result;
1113 + unsigned long tmp;
1114 +
1115 + smp_mb();
1116 +
1117 + __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1118 "1: ldrexd %0, %H0, [%3]\n"
1119 " adds %0, %0, %4\n"
1120 " adc %H0, %H0, %H4\n"
1121 @@ -316,6 +585,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1122 __asm__ __volatile__("@ atomic64_sub\n"
1123 "1: ldrexd %0, %H0, [%3]\n"
1124 " subs %0, %0, %4\n"
1125 +" sbcs %H0, %H0, %H4\n"
1126 +
1127 +#ifdef CONFIG_PAX_REFCOUNT
1128 +" bvc 3f\n"
1129 +"2: bkpt 0xf103\n"
1130 +"3:\n"
1131 +#endif
1132 +
1133 +" strexd %1, %0, %H0, [%3]\n"
1134 +" teq %1, #0\n"
1135 +" bne 1b"
1136 +
1137 +#ifdef CONFIG_PAX_REFCOUNT
1138 +"\n4:\n"
1139 + _ASM_EXTABLE(2b, 4b)
1140 +#endif
1141 +
1142 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1143 + : "r" (&v->counter), "r" (i)
1144 + : "cc");
1145 +}
1146 +
1147 +static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1148 +{
1149 + u64 result;
1150 + unsigned long tmp;
1151 +
1152 + __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1153 +"1: ldrexd %0, %H0, [%3]\n"
1154 +" subs %0, %0, %4\n"
1155 " sbc %H0, %H0, %H4\n"
1156 " strexd %1, %0, %H0, [%3]\n"
1157 " teq %1, #0\n"
1158 @@ -327,18 +626,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1159
1160 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1161 {
1162 - u64 result;
1163 - unsigned long tmp;
1164 + u64 result, tmp;
1165
1166 smp_mb();
1167
1168 __asm__ __volatile__("@ atomic64_sub_return\n"
1169 -"1: ldrexd %0, %H0, [%3]\n"
1170 -" subs %0, %0, %4\n"
1171 -" sbc %H0, %H0, %H4\n"
1172 +"1: ldrexd %1, %H1, [%3]\n"
1173 +" subs %0, %1, %4\n"
1174 +" sbc %H0, %H1, %H4\n"
1175 +
1176 +#ifdef CONFIG_PAX_REFCOUNT
1177 +" bvc 3f\n"
1178 +" mov %0, %1\n"
1179 +" mov %H0, %H1\n"
1180 +"2: bkpt 0xf103\n"
1181 +"3:\n"
1182 +#endif
1183 +
1184 " strexd %1, %0, %H0, [%3]\n"
1185 " teq %1, #0\n"
1186 " bne 1b"
1187 +
1188 +#ifdef CONFIG_PAX_REFCOUNT
1189 +"\n4:\n"
1190 + _ASM_EXTABLE(2b, 4b)
1191 +#endif
1192 +
1193 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1194 : "r" (&v->counter), "r" (i)
1195 : "cc");
1196 @@ -372,6 +685,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1197 return oldval;
1198 }
1199
1200 +static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1201 +{
1202 + u64 oldval;
1203 + unsigned long res;
1204 +
1205 + smp_mb();
1206 +
1207 + do {
1208 + __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1209 + "ldrexd %1, %H1, [%3]\n"
1210 + "mov %0, #0\n"
1211 + "teq %1, %4\n"
1212 + "teqeq %H1, %H4\n"
1213 + "strexdeq %0, %5, %H5, [%3]"
1214 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1215 + : "r" (&ptr->counter), "r" (old), "r" (new)
1216 + : "cc");
1217 + } while (res);
1218 +
1219 + smp_mb();
1220 +
1221 + return oldval;
1222 +}
1223 +
1224 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1225 {
1226 u64 result;
1227 @@ -395,21 +732,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1228
1229 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1230 {
1231 - u64 result;
1232 - unsigned long tmp;
1233 + u64 result, tmp;
1234
1235 smp_mb();
1236
1237 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1238 -"1: ldrexd %0, %H0, [%3]\n"
1239 -" subs %0, %0, #1\n"
1240 -" sbc %H0, %H0, #0\n"
1241 +"1: ldrexd %1, %H1, [%3]\n"
1242 +" subs %0, %1, #1\n"
1243 +" sbc %H0, %H1, #0\n"
1244 +
1245 +#ifdef CONFIG_PAX_REFCOUNT
1246 +" bvc 3f\n"
1247 +" mov %0, %1\n"
1248 +" mov %H0, %H1\n"
1249 +"2: bkpt 0xf103\n"
1250 +"3:\n"
1251 +#endif
1252 +
1253 " teq %H0, #0\n"
1254 -" bmi 2f\n"
1255 +" bmi 4f\n"
1256 " strexd %1, %0, %H0, [%3]\n"
1257 " teq %1, #0\n"
1258 " bne 1b\n"
1259 -"2:"
1260 +"4:\n"
1261 +
1262 +#ifdef CONFIG_PAX_REFCOUNT
1263 + _ASM_EXTABLE(2b, 4b)
1264 +#endif
1265 +
1266 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1267 : "r" (&v->counter)
1268 : "cc");
1269 @@ -432,13 +782,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1270 " teq %0, %5\n"
1271 " teqeq %H0, %H5\n"
1272 " moveq %1, #0\n"
1273 -" beq 2f\n"
1274 +" beq 4f\n"
1275 " adds %0, %0, %6\n"
1276 " adc %H0, %H0, %H6\n"
1277 +
1278 +#ifdef CONFIG_PAX_REFCOUNT
1279 +" bvc 3f\n"
1280 +"2: bkpt 0xf103\n"
1281 +"3:\n"
1282 +#endif
1283 +
1284 " strexd %2, %0, %H0, [%4]\n"
1285 " teq %2, #0\n"
1286 " bne 1b\n"
1287 -"2:"
1288 +"4:\n"
1289 +
1290 +#ifdef CONFIG_PAX_REFCOUNT
1291 + _ASM_EXTABLE(2b, 4b)
1292 +#endif
1293 +
1294 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1295 : "r" (&v->counter), "r" (u), "r" (a)
1296 : "cc");
1297 @@ -451,10 +813,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1298
1299 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1300 #define atomic64_inc(v) atomic64_add(1LL, (v))
1301 +#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1302 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1303 +#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1304 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1305 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1306 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1307 +#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1308 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1309 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1310 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1311 diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1312 index 75fe66b..2255c86 100644
1313 --- a/arch/arm/include/asm/cache.h
1314 +++ b/arch/arm/include/asm/cache.h
1315 @@ -4,8 +4,10 @@
1316 #ifndef __ASMARM_CACHE_H
1317 #define __ASMARM_CACHE_H
1318
1319 +#include <linux/const.h>
1320 +
1321 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1322 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1323 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1324
1325 /*
1326 * Memory returned by kmalloc() may be used for DMA, so we must make
1327 diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1328 index d5d8d5c..ad92c96 100644
1329 --- a/arch/arm/include/asm/cacheflush.h
1330 +++ b/arch/arm/include/asm/cacheflush.h
1331 @@ -108,7 +108,7 @@ struct cpu_cache_fns {
1332 void (*dma_unmap_area)(const void *, size_t, int);
1333
1334 void (*dma_flush_range)(const void *, const void *);
1335 -};
1336 +} __no_const;
1337
1338 /*
1339 * Select the calling method
1340 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1341 index 0e9ce8d..6ef1e03 100644
1342 --- a/arch/arm/include/asm/elf.h
1343 +++ b/arch/arm/include/asm/elf.h
1344 @@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1345 the loader. We need to make sure that it is out of the way of the program
1346 that it will "exec", and that there is sufficient room for the brk. */
1347
1348 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1349 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1350 +
1351 +#ifdef CONFIG_PAX_ASLR
1352 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1353 +
1354 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1355 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1356 +#endif
1357
1358 /* When the program starts, a1 contains a pointer to a function to be
1359 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1360 @@ -126,10 +133,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1361 extern void elf_set_personality(const struct elf32_hdr *);
1362 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1363
1364 -struct mm_struct;
1365 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1366 -#define arch_randomize_brk arch_randomize_brk
1367 -
1368 extern int vectors_user_mapping(void);
1369 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
1370 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
1371 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1372 index e51b1e8..32a3113 100644
1373 --- a/arch/arm/include/asm/kmap_types.h
1374 +++ b/arch/arm/include/asm/kmap_types.h
1375 @@ -21,6 +21,7 @@ enum km_type {
1376 KM_L1_CACHE,
1377 KM_L2_CACHE,
1378 KM_KDB,
1379 + KM_CLEARPAGE,
1380 KM_TYPE_NR
1381 };
1382
1383 diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1384 index 53426c6..c7baff3 100644
1385 --- a/arch/arm/include/asm/outercache.h
1386 +++ b/arch/arm/include/asm/outercache.h
1387 @@ -35,7 +35,7 @@ struct outer_cache_fns {
1388 #endif
1389 void (*set_debug)(unsigned long);
1390 void (*resume)(void);
1391 -};
1392 +} __no_const;
1393
1394 #ifdef CONFIG_OUTER_CACHE
1395
1396 diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1397 index 97b440c..b7ff179 100644
1398 --- a/arch/arm/include/asm/page.h
1399 +++ b/arch/arm/include/asm/page.h
1400 @@ -123,7 +123,7 @@ struct cpu_user_fns {
1401 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1402 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1403 unsigned long vaddr, struct vm_area_struct *vma);
1404 -};
1405 +} __no_const;
1406
1407 #ifdef MULTI_USER
1408 extern struct cpu_user_fns cpu_user;
1409 diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1410 index 943504f..bf8d667 100644
1411 --- a/arch/arm/include/asm/pgalloc.h
1412 +++ b/arch/arm/include/asm/pgalloc.h
1413 @@ -43,6 +43,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1414 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1415 }
1416
1417 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1418 +{
1419 + pud_populate(mm, pud, pmd);
1420 +}
1421 +
1422 #else /* !CONFIG_ARM_LPAE */
1423
1424 /*
1425 @@ -51,6 +56,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1426 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1427 #define pmd_free(mm, pmd) do { } while (0)
1428 #define pud_populate(mm,pmd,pte) BUG()
1429 +#define pud_populate_kernel(mm,pmd,pte) BUG()
1430
1431 #endif /* CONFIG_ARM_LPAE */
1432
1433 diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
1434 index e4c96cc..1145653 100644
1435 --- a/arch/arm/include/asm/system.h
1436 +++ b/arch/arm/include/asm/system.h
1437 @@ -98,6 +98,8 @@ void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int,
1438
1439 #define xchg(ptr,x) \
1440 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1441 +#define xchg_unchecked(ptr,x) \
1442 + ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1443
1444 extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
1445
1446 @@ -534,6 +536,13 @@ static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
1447
1448 #endif /* __LINUX_ARM_ARCH__ >= 6 */
1449
1450 +#define _ASM_EXTABLE(from, to) \
1451 +" .pushsection __ex_table,\"a\"\n"\
1452 +" .align 3\n" \
1453 +" .long " #from ", " #to"\n" \
1454 +" .popsection"
1455 +
1456 +
1457 #endif /* __ASSEMBLY__ */
1458
1459 #define arch_align_stack(x) (x)
1460 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
1461 index 2958976..12ccac4 100644
1462 --- a/arch/arm/include/asm/uaccess.h
1463 +++ b/arch/arm/include/asm/uaccess.h
1464 @@ -22,6 +22,8 @@
1465 #define VERIFY_READ 0
1466 #define VERIFY_WRITE 1
1467
1468 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
1469 +
1470 /*
1471 * The exception table consists of pairs of addresses: the first is the
1472 * address of an instruction that is allowed to fault, and the second is
1473 @@ -387,8 +389,23 @@ do { \
1474
1475
1476 #ifdef CONFIG_MMU
1477 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
1478 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
1479 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
1480 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
1481 +
1482 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
1483 +{
1484 + if (!__builtin_constant_p(n))
1485 + check_object_size(to, n, false);
1486 + return ___copy_from_user(to, from, n);
1487 +}
1488 +
1489 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
1490 +{
1491 + if (!__builtin_constant_p(n))
1492 + check_object_size(from, n, true);
1493 + return ___copy_to_user(to, from, n);
1494 +}
1495 +
1496 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
1497 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
1498 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
1499 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
1500
1501 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1502 {
1503 + if ((long)n < 0)
1504 + return n;
1505 +
1506 if (access_ok(VERIFY_READ, from, n))
1507 n = __copy_from_user(to, from, n);
1508 else /* security hole - plug it */
1509 @@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
1510
1511 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1512 {
1513 + if ((long)n < 0)
1514 + return n;
1515 +
1516 if (access_ok(VERIFY_WRITE, to, n))
1517 n = __copy_to_user(to, from, n);
1518 return n;
1519 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
1520 index 5b0bce6..becd81c 100644
1521 --- a/arch/arm/kernel/armksyms.c
1522 +++ b/arch/arm/kernel/armksyms.c
1523 @@ -95,8 +95,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
1524 #ifdef CONFIG_MMU
1525 EXPORT_SYMBOL(copy_page);
1526
1527 -EXPORT_SYMBOL(__copy_from_user);
1528 -EXPORT_SYMBOL(__copy_to_user);
1529 +EXPORT_SYMBOL(___copy_from_user);
1530 +EXPORT_SYMBOL(___copy_to_user);
1531 EXPORT_SYMBOL(__clear_user);
1532
1533 EXPORT_SYMBOL(__get_user_1);
1534 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
1535 index 971d65c..cc936fb 100644
1536 --- a/arch/arm/kernel/process.c
1537 +++ b/arch/arm/kernel/process.c
1538 @@ -28,7 +28,6 @@
1539 #include <linux/tick.h>
1540 #include <linux/utsname.h>
1541 #include <linux/uaccess.h>
1542 -#include <linux/random.h>
1543 #include <linux/hw_breakpoint.h>
1544 #include <linux/cpuidle.h>
1545
1546 @@ -273,9 +272,10 @@ void machine_power_off(void)
1547 machine_shutdown();
1548 if (pm_power_off)
1549 pm_power_off();
1550 + BUG();
1551 }
1552
1553 -void machine_restart(char *cmd)
1554 +__noreturn void machine_restart(char *cmd)
1555 {
1556 machine_shutdown();
1557
1558 @@ -517,12 +517,6 @@ unsigned long get_wchan(struct task_struct *p)
1559 return 0;
1560 }
1561
1562 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1563 -{
1564 - unsigned long range_end = mm->brk + 0x02000000;
1565 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
1566 -}
1567 -
1568 #ifdef CONFIG_MMU
1569 /*
1570 * The vectors page is always readable from user space for the
1571 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
1572 index a255c39..4a19b25 100644
1573 --- a/arch/arm/kernel/setup.c
1574 +++ b/arch/arm/kernel/setup.c
1575 @@ -109,13 +109,13 @@ struct processor processor __read_mostly;
1576 struct cpu_tlb_fns cpu_tlb __read_mostly;
1577 #endif
1578 #ifdef MULTI_USER
1579 -struct cpu_user_fns cpu_user __read_mostly;
1580 +struct cpu_user_fns cpu_user __read_only;
1581 #endif
1582 #ifdef MULTI_CACHE
1583 -struct cpu_cache_fns cpu_cache __read_mostly;
1584 +struct cpu_cache_fns cpu_cache __read_only;
1585 #endif
1586 #ifdef CONFIG_OUTER_CACHE
1587 -struct outer_cache_fns outer_cache __read_mostly;
1588 +struct outer_cache_fns outer_cache __read_only;
1589 EXPORT_SYMBOL(outer_cache);
1590 #endif
1591
1592 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
1593 index f84dfe6..13e94f7 100644
1594 --- a/arch/arm/kernel/traps.c
1595 +++ b/arch/arm/kernel/traps.c
1596 @@ -259,6 +259,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
1597
1598 static DEFINE_RAW_SPINLOCK(die_lock);
1599
1600 +extern void gr_handle_kernel_exploit(void);
1601 +
1602 /*
1603 * This function is protected against re-entrancy.
1604 */
1605 @@ -291,6 +293,9 @@ void die(const char *str, struct pt_regs *regs, int err)
1606 panic("Fatal exception in interrupt");
1607 if (panic_on_oops)
1608 panic("Fatal exception");
1609 +
1610 + gr_handle_kernel_exploit();
1611 +
1612 if (ret != NOTIFY_STOP)
1613 do_exit(SIGSEGV);
1614 }
1615 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
1616 index 66a477a..bee61d3 100644
1617 --- a/arch/arm/lib/copy_from_user.S
1618 +++ b/arch/arm/lib/copy_from_user.S
1619 @@ -16,7 +16,7 @@
1620 /*
1621 * Prototype:
1622 *
1623 - * size_t __copy_from_user(void *to, const void *from, size_t n)
1624 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
1625 *
1626 * Purpose:
1627 *
1628 @@ -84,11 +84,11 @@
1629
1630 .text
1631
1632 -ENTRY(__copy_from_user)
1633 +ENTRY(___copy_from_user)
1634
1635 #include "copy_template.S"
1636
1637 -ENDPROC(__copy_from_user)
1638 +ENDPROC(___copy_from_user)
1639
1640 .pushsection .fixup,"ax"
1641 .align 0
1642 diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
1643 index 6ee2f67..d1cce76 100644
1644 --- a/arch/arm/lib/copy_page.S
1645 +++ b/arch/arm/lib/copy_page.S
1646 @@ -10,6 +10,7 @@
1647 * ASM optimised string functions
1648 */
1649 #include <linux/linkage.h>
1650 +#include <linux/const.h>
1651 #include <asm/assembler.h>
1652 #include <asm/asm-offsets.h>
1653 #include <asm/cache.h>
1654 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
1655 index d066df6..df28194 100644
1656 --- a/arch/arm/lib/copy_to_user.S
1657 +++ b/arch/arm/lib/copy_to_user.S
1658 @@ -16,7 +16,7 @@
1659 /*
1660 * Prototype:
1661 *
1662 - * size_t __copy_to_user(void *to, const void *from, size_t n)
1663 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
1664 *
1665 * Purpose:
1666 *
1667 @@ -88,11 +88,11 @@
1668 .text
1669
1670 ENTRY(__copy_to_user_std)
1671 -WEAK(__copy_to_user)
1672 +WEAK(___copy_to_user)
1673
1674 #include "copy_template.S"
1675
1676 -ENDPROC(__copy_to_user)
1677 +ENDPROC(___copy_to_user)
1678 ENDPROC(__copy_to_user_std)
1679
1680 .pushsection .fixup,"ax"
1681 diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
1682 index 5c908b1..e712687 100644
1683 --- a/arch/arm/lib/uaccess.S
1684 +++ b/arch/arm/lib/uaccess.S
1685 @@ -20,7 +20,7 @@
1686
1687 #define PAGE_SHIFT 12
1688
1689 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
1690 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
1691 * Purpose : copy a block to user memory from kernel memory
1692 * Params : to - user memory
1693 * : from - kernel memory
1694 @@ -40,7 +40,7 @@ USER( TUSER( strgtb) r3, [r0], #1) @ May fault
1695 sub r2, r2, ip
1696 b .Lc2u_dest_aligned
1697
1698 -ENTRY(__copy_to_user)
1699 +ENTRY(___copy_to_user)
1700 stmfd sp!, {r2, r4 - r7, lr}
1701 cmp r2, #4
1702 blt .Lc2u_not_enough
1703 @@ -278,14 +278,14 @@ USER( TUSER( strgeb) r3, [r0], #1) @ May fault
1704 ldrgtb r3, [r1], #0
1705 USER( TUSER( strgtb) r3, [r0], #1) @ May fault
1706 b .Lc2u_finished
1707 -ENDPROC(__copy_to_user)
1708 +ENDPROC(___copy_to_user)
1709
1710 .pushsection .fixup,"ax"
1711 .align 0
1712 9001: ldmfd sp!, {r0, r4 - r7, pc}
1713 .popsection
1714
1715 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
1716 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
1717 * Purpose : copy a block from user memory to kernel memory
1718 * Params : to - kernel memory
1719 * : from - user memory
1720 @@ -304,7 +304,7 @@ USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
1721 sub r2, r2, ip
1722 b .Lcfu_dest_aligned
1723
1724 -ENTRY(__copy_from_user)
1725 +ENTRY(___copy_from_user)
1726 stmfd sp!, {r0, r2, r4 - r7, lr}
1727 cmp r2, #4
1728 blt .Lcfu_not_enough
1729 @@ -544,7 +544,7 @@ USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
1730 USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
1731 strgtb r3, [r0], #1
1732 b .Lcfu_finished
1733 -ENDPROC(__copy_from_user)
1734 +ENDPROC(___copy_from_user)
1735
1736 .pushsection .fixup,"ax"
1737 .align 0
1738 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
1739 index 025f742..8432b08 100644
1740 --- a/arch/arm/lib/uaccess_with_memcpy.c
1741 +++ b/arch/arm/lib/uaccess_with_memcpy.c
1742 @@ -104,7 +104,7 @@ out:
1743 }
1744
1745 unsigned long
1746 -__copy_to_user(void __user *to, const void *from, unsigned long n)
1747 +___copy_to_user(void __user *to, const void *from, unsigned long n)
1748 {
1749 /*
1750 * This test is stubbed out of the main function above to keep
1751 diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
1752 index 6722627..8f97548c 100644
1753 --- a/arch/arm/mach-omap2/board-n8x0.c
1754 +++ b/arch/arm/mach-omap2/board-n8x0.c
1755 @@ -597,7 +597,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
1756 }
1757 #endif
1758
1759 -static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
1760 +static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
1761 .late_init = n8x0_menelaus_late_init,
1762 };
1763
1764 diff --git a/arch/arm/mach-ux500/mbox-db5500.c b/arch/arm/mach-ux500/mbox-db5500.c
1765 index 2b2d51c..0127490 100644
1766 --- a/arch/arm/mach-ux500/mbox-db5500.c
1767 +++ b/arch/arm/mach-ux500/mbox-db5500.c
1768 @@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct device *dev,
1769 return sprintf(buf, "0x%X\n", mbox_value);
1770 }
1771
1772 -static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
1773 +static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
1774
1775 static int mbox_show(struct seq_file *s, void *data)
1776 {
1777 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1778 index bb7eac3..3bade16 100644
1779 --- a/arch/arm/mm/fault.c
1780 +++ b/arch/arm/mm/fault.c
1781 @@ -172,6 +172,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1782 }
1783 #endif
1784
1785 +#ifdef CONFIG_PAX_PAGEEXEC
1786 + if (fsr & FSR_LNX_PF) {
1787 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1788 + do_group_exit(SIGKILL);
1789 + }
1790 +#endif
1791 +
1792 tsk->thread.address = addr;
1793 tsk->thread.error_code = fsr;
1794 tsk->thread.trap_no = 14;
1795 @@ -393,6 +400,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1796 }
1797 #endif /* CONFIG_MMU */
1798
1799 +#ifdef CONFIG_PAX_PAGEEXEC
1800 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1801 +{
1802 + long i;
1803 +
1804 + printk(KERN_ERR "PAX: bytes at PC: ");
1805 + for (i = 0; i < 20; i++) {
1806 + unsigned char c;
1807 + if (get_user(c, (__force unsigned char __user *)pc+i))
1808 + printk(KERN_CONT "?? ");
1809 + else
1810 + printk(KERN_CONT "%02x ", c);
1811 + }
1812 + printk("\n");
1813 +
1814 + printk(KERN_ERR "PAX: bytes at SP-4: ");
1815 + for (i = -1; i < 20; i++) {
1816 + unsigned long c;
1817 + if (get_user(c, (__force unsigned long __user *)sp+i))
1818 + printk(KERN_CONT "???????? ");
1819 + else
1820 + printk(KERN_CONT "%08lx ", c);
1821 + }
1822 + printk("\n");
1823 +}
1824 +#endif
1825 +
1826 /*
1827 * First Level Translation Fault Handler
1828 *
1829 @@ -573,6 +607,20 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
1830 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
1831 struct siginfo info;
1832
1833 +#ifdef CONFIG_PAX_REFCOUNT
1834 + if (fsr_fs(ifsr) == 2) {
1835 + unsigned int bkpt;
1836 +
1837 + if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
1838 + current->thread.error_code = ifsr;
1839 + current->thread.trap_no = 0;
1840 + pax_report_refcount_overflow(regs);
1841 + fixup_exception(regs);
1842 + return;
1843 + }
1844 + }
1845 +#endif
1846 +
1847 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
1848 return;
1849
1850 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1851 index ce8cb19..3ec539d 100644
1852 --- a/arch/arm/mm/mmap.c
1853 +++ b/arch/arm/mm/mmap.c
1854 @@ -93,6 +93,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1855 if (len > TASK_SIZE)
1856 return -ENOMEM;
1857
1858 +#ifdef CONFIG_PAX_RANDMMAP
1859 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1860 +#endif
1861 +
1862 if (addr) {
1863 if (do_align)
1864 addr = COLOUR_ALIGN(addr, pgoff);
1865 @@ -100,15 +104,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1866 addr = PAGE_ALIGN(addr);
1867
1868 vma = find_vma(mm, addr);
1869 - if (TASK_SIZE - len >= addr &&
1870 - (!vma || addr + len <= vma->vm_start))
1871 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1872 return addr;
1873 }
1874 if (len > mm->cached_hole_size) {
1875 - start_addr = addr = mm->free_area_cache;
1876 + start_addr = addr = mm->free_area_cache;
1877 } else {
1878 - start_addr = addr = mm->mmap_base;
1879 - mm->cached_hole_size = 0;
1880 + start_addr = addr = mm->mmap_base;
1881 + mm->cached_hole_size = 0;
1882 }
1883
1884 full_search:
1885 @@ -124,14 +127,14 @@ full_search:
1886 * Start a new search - just in case we missed
1887 * some holes.
1888 */
1889 - if (start_addr != TASK_UNMAPPED_BASE) {
1890 - start_addr = addr = TASK_UNMAPPED_BASE;
1891 + if (start_addr != mm->mmap_base) {
1892 + start_addr = addr = mm->mmap_base;
1893 mm->cached_hole_size = 0;
1894 goto full_search;
1895 }
1896 return -ENOMEM;
1897 }
1898 - if (!vma || addr + len <= vma->vm_start) {
1899 + if (check_heap_stack_gap(vma, addr, len)) {
1900 /*
1901 * Remember the place where we stopped the search:
1902 */
1903 @@ -266,10 +269,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
1904
1905 if (mmap_is_legacy()) {
1906 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
1907 +
1908 +#ifdef CONFIG_PAX_RANDMMAP
1909 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1910 + mm->mmap_base += mm->delta_mmap;
1911 +#endif
1912 +
1913 mm->get_unmapped_area = arch_get_unmapped_area;
1914 mm->unmap_area = arch_unmap_area;
1915 } else {
1916 mm->mmap_base = mmap_base(random_factor);
1917 +
1918 +#ifdef CONFIG_PAX_RANDMMAP
1919 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1920 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
1921 +#endif
1922 +
1923 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
1924 mm->unmap_area = arch_unmap_area_topdown;
1925 }
1926 diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
1927 index 71a6827..e7fbc23 100644
1928 --- a/arch/arm/plat-samsung/include/plat/dma-ops.h
1929 +++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
1930 @@ -43,7 +43,7 @@ struct samsung_dma_ops {
1931 int (*started)(unsigned ch);
1932 int (*flush)(unsigned ch);
1933 int (*stop)(unsigned ch);
1934 -};
1935 +} __no_const;
1936
1937 extern void *samsung_dmadev_get_ops(void);
1938 extern void *s3c_dma_get_ops(void);
1939 diff --git a/arch/arm/plat-samsung/include/plat/ehci.h b/arch/arm/plat-samsung/include/plat/ehci.h
1940 index 5f28cae..3d23723 100644
1941 --- a/arch/arm/plat-samsung/include/plat/ehci.h
1942 +++ b/arch/arm/plat-samsung/include/plat/ehci.h
1943 @@ -14,7 +14,7 @@
1944 struct s5p_ehci_platdata {
1945 int (*phy_init)(struct platform_device *pdev, int type);
1946 int (*phy_exit)(struct platform_device *pdev, int type);
1947 -};
1948 +} __no_const;
1949
1950 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
1951
1952 diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
1953 index c3a58a1..78fbf54 100644
1954 --- a/arch/avr32/include/asm/cache.h
1955 +++ b/arch/avr32/include/asm/cache.h
1956 @@ -1,8 +1,10 @@
1957 #ifndef __ASM_AVR32_CACHE_H
1958 #define __ASM_AVR32_CACHE_H
1959
1960 +#include <linux/const.h>
1961 +
1962 #define L1_CACHE_SHIFT 5
1963 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1964 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1965
1966 /*
1967 * Memory returned by kmalloc() may be used for DMA, so we must make
1968 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1969 index 3b3159b..425ea94 100644
1970 --- a/arch/avr32/include/asm/elf.h
1971 +++ b/arch/avr32/include/asm/elf.h
1972 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1973 the loader. We need to make sure that it is out of the way of the program
1974 that it will "exec", and that there is sufficient room for the brk. */
1975
1976 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1977 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1978
1979 +#ifdef CONFIG_PAX_ASLR
1980 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1981 +
1982 +#define PAX_DELTA_MMAP_LEN 15
1983 +#define PAX_DELTA_STACK_LEN 15
1984 +#endif
1985
1986 /* This yields a mask that user programs can use to figure out what
1987 instruction set this CPU supports. This could be done in user space,
1988 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1989 index b7f5c68..556135c 100644
1990 --- a/arch/avr32/include/asm/kmap_types.h
1991 +++ b/arch/avr32/include/asm/kmap_types.h
1992 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1993 D(11) KM_IRQ1,
1994 D(12) KM_SOFTIRQ0,
1995 D(13) KM_SOFTIRQ1,
1996 -D(14) KM_TYPE_NR
1997 +D(14) KM_CLEARPAGE,
1998 +D(15) KM_TYPE_NR
1999 };
2000
2001 #undef D
2002 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
2003 index f7040a1..db9f300 100644
2004 --- a/arch/avr32/mm/fault.c
2005 +++ b/arch/avr32/mm/fault.c
2006 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
2007
2008 int exception_trace = 1;
2009
2010 +#ifdef CONFIG_PAX_PAGEEXEC
2011 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2012 +{
2013 + unsigned long i;
2014 +
2015 + printk(KERN_ERR "PAX: bytes at PC: ");
2016 + for (i = 0; i < 20; i++) {
2017 + unsigned char c;
2018 + if (get_user(c, (unsigned char *)pc+i))
2019 + printk(KERN_CONT "???????? ");
2020 + else
2021 + printk(KERN_CONT "%02x ", c);
2022 + }
2023 + printk("\n");
2024 +}
2025 +#endif
2026 +
2027 /*
2028 * This routine handles page faults. It determines the address and the
2029 * problem, and then passes it off to one of the appropriate routines.
2030 @@ -156,6 +173,16 @@ bad_area:
2031 up_read(&mm->mmap_sem);
2032
2033 if (user_mode(regs)) {
2034 +
2035 +#ifdef CONFIG_PAX_PAGEEXEC
2036 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2037 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
2038 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
2039 + do_group_exit(SIGKILL);
2040 + }
2041 + }
2042 +#endif
2043 +
2044 if (exception_trace && printk_ratelimit())
2045 printk("%s%s[%d]: segfault at %08lx pc %08lx "
2046 "sp %08lx ecr %lu\n",
2047 diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
2048 index 568885a..f8008df 100644
2049 --- a/arch/blackfin/include/asm/cache.h
2050 +++ b/arch/blackfin/include/asm/cache.h
2051 @@ -7,6 +7,7 @@
2052 #ifndef __ARCH_BLACKFIN_CACHE_H
2053 #define __ARCH_BLACKFIN_CACHE_H
2054
2055 +#include <linux/const.h>
2056 #include <linux/linkage.h> /* for asmlinkage */
2057
2058 /*
2059 @@ -14,7 +15,7 @@
2060 * Blackfin loads 32 bytes for cache
2061 */
2062 #define L1_CACHE_SHIFT 5
2063 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2064 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2065 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2066
2067 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2068 diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
2069 index aea2718..3639a60 100644
2070 --- a/arch/cris/include/arch-v10/arch/cache.h
2071 +++ b/arch/cris/include/arch-v10/arch/cache.h
2072 @@ -1,8 +1,9 @@
2073 #ifndef _ASM_ARCH_CACHE_H
2074 #define _ASM_ARCH_CACHE_H
2075
2076 +#include <linux/const.h>
2077 /* Etrax 100LX have 32-byte cache-lines. */
2078 -#define L1_CACHE_BYTES 32
2079 #define L1_CACHE_SHIFT 5
2080 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2081
2082 #endif /* _ASM_ARCH_CACHE_H */
2083 diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
2084 index 1de779f..336fad3 100644
2085 --- a/arch/cris/include/arch-v32/arch/cache.h
2086 +++ b/arch/cris/include/arch-v32/arch/cache.h
2087 @@ -1,11 +1,12 @@
2088 #ifndef _ASM_CRIS_ARCH_CACHE_H
2089 #define _ASM_CRIS_ARCH_CACHE_H
2090
2091 +#include <linux/const.h>
2092 #include <arch/hwregs/dma.h>
2093
2094 /* A cache-line is 32 bytes. */
2095 -#define L1_CACHE_BYTES 32
2096 #define L1_CACHE_SHIFT 5
2097 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2098
2099 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
2100
2101 diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
2102 index 0d8a7d6..d0c9ff5 100644
2103 --- a/arch/frv/include/asm/atomic.h
2104 +++ b/arch/frv/include/asm/atomic.h
2105 @@ -241,6 +241,16 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
2106 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
2107 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
2108
2109 +#define atomic64_read_unchecked(v) atomic64_read(v)
2110 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2111 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2112 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2113 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2114 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2115 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2116 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2117 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2118 +
2119 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
2120 {
2121 int c, old;
2122 diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
2123 index 2797163..c2a401d 100644
2124 --- a/arch/frv/include/asm/cache.h
2125 +++ b/arch/frv/include/asm/cache.h
2126 @@ -12,10 +12,11 @@
2127 #ifndef __ASM_CACHE_H
2128 #define __ASM_CACHE_H
2129
2130 +#include <linux/const.h>
2131
2132 /* bytes per L1 cache line */
2133 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
2134 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2135 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2136
2137 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2138 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2139 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
2140 index f8e16b2..c73ff79 100644
2141 --- a/arch/frv/include/asm/kmap_types.h
2142 +++ b/arch/frv/include/asm/kmap_types.h
2143 @@ -23,6 +23,7 @@ enum km_type {
2144 KM_IRQ1,
2145 KM_SOFTIRQ0,
2146 KM_SOFTIRQ1,
2147 + KM_CLEARPAGE,
2148 KM_TYPE_NR
2149 };
2150
2151 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
2152 index 385fd30..6c3d97e 100644
2153 --- a/arch/frv/mm/elf-fdpic.c
2154 +++ b/arch/frv/mm/elf-fdpic.c
2155 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2156 if (addr) {
2157 addr = PAGE_ALIGN(addr);
2158 vma = find_vma(current->mm, addr);
2159 - if (TASK_SIZE - len >= addr &&
2160 - (!vma || addr + len <= vma->vm_start))
2161 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2162 goto success;
2163 }
2164
2165 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2166 for (; vma; vma = vma->vm_next) {
2167 if (addr > limit)
2168 break;
2169 - if (addr + len <= vma->vm_start)
2170 + if (check_heap_stack_gap(vma, addr, len))
2171 goto success;
2172 addr = vma->vm_end;
2173 }
2174 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2175 for (; vma; vma = vma->vm_next) {
2176 if (addr > limit)
2177 break;
2178 - if (addr + len <= vma->vm_start)
2179 + if (check_heap_stack_gap(vma, addr, len))
2180 goto success;
2181 addr = vma->vm_end;
2182 }
2183 diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h
2184 index c635028..6d9445a 100644
2185 --- a/arch/h8300/include/asm/cache.h
2186 +++ b/arch/h8300/include/asm/cache.h
2187 @@ -1,8 +1,10 @@
2188 #ifndef __ARCH_H8300_CACHE_H
2189 #define __ARCH_H8300_CACHE_H
2190
2191 +#include <linux/const.h>
2192 +
2193 /* bytes per L1 cache line */
2194 -#define L1_CACHE_BYTES 4
2195 +#define L1_CACHE_BYTES _AC(4,UL)
2196
2197 /* m68k-elf-gcc 2.95.2 doesn't like these */
2198
2199 diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
2200 index 0f01de2..d37d309 100644
2201 --- a/arch/hexagon/include/asm/cache.h
2202 +++ b/arch/hexagon/include/asm/cache.h
2203 @@ -21,9 +21,11 @@
2204 #ifndef __ASM_CACHE_H
2205 #define __ASM_CACHE_H
2206
2207 +#include <linux/const.h>
2208 +
2209 /* Bytes per L1 cache line */
2210 -#define L1_CACHE_SHIFT (5)
2211 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2212 +#define L1_CACHE_SHIFT 5
2213 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2214
2215 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
2216 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
2217 diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
2218 index 3fad89e..3047da5 100644
2219 --- a/arch/ia64/include/asm/atomic.h
2220 +++ b/arch/ia64/include/asm/atomic.h
2221 @@ -209,6 +209,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
2222 #define atomic64_inc(v) atomic64_add(1, (v))
2223 #define atomic64_dec(v) atomic64_sub(1, (v))
2224
2225 +#define atomic64_read_unchecked(v) atomic64_read(v)
2226 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2227 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2228 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2229 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2230 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2231 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2232 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2233 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2234 +
2235 /* Atomic operations are already serializing */
2236 #define smp_mb__before_atomic_dec() barrier()
2237 #define smp_mb__after_atomic_dec() barrier()
2238 diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
2239 index 988254a..e1ee885 100644
2240 --- a/arch/ia64/include/asm/cache.h
2241 +++ b/arch/ia64/include/asm/cache.h
2242 @@ -1,6 +1,7 @@
2243 #ifndef _ASM_IA64_CACHE_H
2244 #define _ASM_IA64_CACHE_H
2245
2246 +#include <linux/const.h>
2247
2248 /*
2249 * Copyright (C) 1998-2000 Hewlett-Packard Co
2250 @@ -9,7 +10,7 @@
2251
2252 /* Bytes per L1 (data) cache line. */
2253 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
2254 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2255 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2256
2257 #ifdef CONFIG_SMP
2258 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2259 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
2260 index b5298eb..67c6e62 100644
2261 --- a/arch/ia64/include/asm/elf.h
2262 +++ b/arch/ia64/include/asm/elf.h
2263 @@ -42,6 +42,13 @@
2264 */
2265 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
2266
2267 +#ifdef CONFIG_PAX_ASLR
2268 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
2269 +
2270 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2271 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2272 +#endif
2273 +
2274 #define PT_IA_64_UNWIND 0x70000001
2275
2276 /* IA-64 relocations: */
2277 diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
2278 index 96a8d92..617a1cf 100644
2279 --- a/arch/ia64/include/asm/pgalloc.h
2280 +++ b/arch/ia64/include/asm/pgalloc.h
2281 @@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
2282 pgd_val(*pgd_entry) = __pa(pud);
2283 }
2284
2285 +static inline void
2286 +pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
2287 +{
2288 + pgd_populate(mm, pgd_entry, pud);
2289 +}
2290 +
2291 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
2292 {
2293 return quicklist_alloc(0, GFP_KERNEL, NULL);
2294 @@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
2295 pud_val(*pud_entry) = __pa(pmd);
2296 }
2297
2298 +static inline void
2299 +pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
2300 +{
2301 + pud_populate(mm, pud_entry, pmd);
2302 +}
2303 +
2304 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
2305 {
2306 return quicklist_alloc(0, GFP_KERNEL, NULL);
2307 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
2308 index 1a97af3..7529d31 100644
2309 --- a/arch/ia64/include/asm/pgtable.h
2310 +++ b/arch/ia64/include/asm/pgtable.h
2311 @@ -12,7 +12,7 @@
2312 * David Mosberger-Tang <davidm@hpl.hp.com>
2313 */
2314
2315 -
2316 +#include <linux/const.h>
2317 #include <asm/mman.h>
2318 #include <asm/page.h>
2319 #include <asm/processor.h>
2320 @@ -143,6 +143,17 @@
2321 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2322 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2323 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
2324 +
2325 +#ifdef CONFIG_PAX_PAGEEXEC
2326 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
2327 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2328 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2329 +#else
2330 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
2331 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
2332 +# define PAGE_COPY_NOEXEC PAGE_COPY
2333 +#endif
2334 +
2335 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
2336 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
2337 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
2338 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
2339 index b77768d..e0795eb 100644
2340 --- a/arch/ia64/include/asm/spinlock.h
2341 +++ b/arch/ia64/include/asm/spinlock.h
2342 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
2343 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
2344
2345 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
2346 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
2347 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
2348 }
2349
2350 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
2351 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
2352 index 449c8c0..432a3d2 100644
2353 --- a/arch/ia64/include/asm/uaccess.h
2354 +++ b/arch/ia64/include/asm/uaccess.h
2355 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2356 const void *__cu_from = (from); \
2357 long __cu_len = (n); \
2358 \
2359 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
2360 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
2361 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
2362 __cu_len; \
2363 })
2364 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2365 long __cu_len = (n); \
2366 \
2367 __chk_user_ptr(__cu_from); \
2368 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
2369 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
2370 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
2371 __cu_len; \
2372 })
2373 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
2374 index 24603be..948052d 100644
2375 --- a/arch/ia64/kernel/module.c
2376 +++ b/arch/ia64/kernel/module.c
2377 @@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
2378 void
2379 module_free (struct module *mod, void *module_region)
2380 {
2381 - if (mod && mod->arch.init_unw_table &&
2382 - module_region == mod->module_init) {
2383 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
2384 unw_remove_unwind_table(mod->arch.init_unw_table);
2385 mod->arch.init_unw_table = NULL;
2386 }
2387 @@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
2388 }
2389
2390 static inline int
2391 +in_init_rx (const struct module *mod, uint64_t addr)
2392 +{
2393 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
2394 +}
2395 +
2396 +static inline int
2397 +in_init_rw (const struct module *mod, uint64_t addr)
2398 +{
2399 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
2400 +}
2401 +
2402 +static inline int
2403 in_init (const struct module *mod, uint64_t addr)
2404 {
2405 - return addr - (uint64_t) mod->module_init < mod->init_size;
2406 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
2407 +}
2408 +
2409 +static inline int
2410 +in_core_rx (const struct module *mod, uint64_t addr)
2411 +{
2412 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
2413 +}
2414 +
2415 +static inline int
2416 +in_core_rw (const struct module *mod, uint64_t addr)
2417 +{
2418 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
2419 }
2420
2421 static inline int
2422 in_core (const struct module *mod, uint64_t addr)
2423 {
2424 - return addr - (uint64_t) mod->module_core < mod->core_size;
2425 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
2426 }
2427
2428 static inline int
2429 @@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
2430 break;
2431
2432 case RV_BDREL:
2433 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
2434 + if (in_init_rx(mod, val))
2435 + val -= (uint64_t) mod->module_init_rx;
2436 + else if (in_init_rw(mod, val))
2437 + val -= (uint64_t) mod->module_init_rw;
2438 + else if (in_core_rx(mod, val))
2439 + val -= (uint64_t) mod->module_core_rx;
2440 + else if (in_core_rw(mod, val))
2441 + val -= (uint64_t) mod->module_core_rw;
2442 break;
2443
2444 case RV_LTV:
2445 @@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
2446 * addresses have been selected...
2447 */
2448 uint64_t gp;
2449 - if (mod->core_size > MAX_LTOFF)
2450 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
2451 /*
2452 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
2453 * at the end of the module.
2454 */
2455 - gp = mod->core_size - MAX_LTOFF / 2;
2456 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
2457 else
2458 - gp = mod->core_size / 2;
2459 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
2460 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
2461 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
2462 mod->arch.gp = gp;
2463 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
2464 }
2465 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
2466 index 609d500..7dde2a8 100644
2467 --- a/arch/ia64/kernel/sys_ia64.c
2468 +++ b/arch/ia64/kernel/sys_ia64.c
2469 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2470 if (REGION_NUMBER(addr) == RGN_HPAGE)
2471 addr = 0;
2472 #endif
2473 +
2474 +#ifdef CONFIG_PAX_RANDMMAP
2475 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2476 + addr = mm->free_area_cache;
2477 + else
2478 +#endif
2479 +
2480 if (!addr)
2481 addr = mm->free_area_cache;
2482
2483 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2484 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
2485 /* At this point: (!vma || addr < vma->vm_end). */
2486 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
2487 - if (start_addr != TASK_UNMAPPED_BASE) {
2488 + if (start_addr != mm->mmap_base) {
2489 /* Start a new search --- just in case we missed some holes. */
2490 - addr = TASK_UNMAPPED_BASE;
2491 + addr = mm->mmap_base;
2492 goto full_search;
2493 }
2494 return -ENOMEM;
2495 }
2496 - if (!vma || addr + len <= vma->vm_start) {
2497 + if (check_heap_stack_gap(vma, addr, len)) {
2498 /* Remember the address where we stopped this search: */
2499 mm->free_area_cache = addr + len;
2500 return addr;
2501 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
2502 index 53c0ba0..2accdde 100644
2503 --- a/arch/ia64/kernel/vmlinux.lds.S
2504 +++ b/arch/ia64/kernel/vmlinux.lds.S
2505 @@ -199,7 +199,7 @@ SECTIONS {
2506 /* Per-cpu data: */
2507 . = ALIGN(PERCPU_PAGE_SIZE);
2508 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
2509 - __phys_per_cpu_start = __per_cpu_load;
2510 + __phys_per_cpu_start = per_cpu_load;
2511 /*
2512 * ensure percpu data fits
2513 * into percpu page size
2514 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
2515 index 20b3593..1ce77f0 100644
2516 --- a/arch/ia64/mm/fault.c
2517 +++ b/arch/ia64/mm/fault.c
2518 @@ -73,6 +73,23 @@ mapped_kernel_page_is_present (unsigned long address)
2519 return pte_present(pte);
2520 }
2521
2522 +#ifdef CONFIG_PAX_PAGEEXEC
2523 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2524 +{
2525 + unsigned long i;
2526 +
2527 + printk(KERN_ERR "PAX: bytes at PC: ");
2528 + for (i = 0; i < 8; i++) {
2529 + unsigned int c;
2530 + if (get_user(c, (unsigned int *)pc+i))
2531 + printk(KERN_CONT "???????? ");
2532 + else
2533 + printk(KERN_CONT "%08x ", c);
2534 + }
2535 + printk("\n");
2536 +}
2537 +#endif
2538 +
2539 void __kprobes
2540 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
2541 {
2542 @@ -146,9 +163,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
2543 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
2544 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
2545
2546 - if ((vma->vm_flags & mask) != mask)
2547 + if ((vma->vm_flags & mask) != mask) {
2548 +
2549 +#ifdef CONFIG_PAX_PAGEEXEC
2550 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
2551 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
2552 + goto bad_area;
2553 +
2554 + up_read(&mm->mmap_sem);
2555 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
2556 + do_group_exit(SIGKILL);
2557 + }
2558 +#endif
2559 +
2560 goto bad_area;
2561
2562 + }
2563 +
2564 /*
2565 * If for any reason at all we couldn't handle the fault, make
2566 * sure we exit gracefully rather than endlessly redo the
2567 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
2568 index 5ca674b..e0e1b70 100644
2569 --- a/arch/ia64/mm/hugetlbpage.c
2570 +++ b/arch/ia64/mm/hugetlbpage.c
2571 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
2572 /* At this point: (!vmm || addr < vmm->vm_end). */
2573 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
2574 return -ENOMEM;
2575 - if (!vmm || (addr + len) <= vmm->vm_start)
2576 + if (check_heap_stack_gap(vmm, addr, len))
2577 return addr;
2578 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
2579 }
2580 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
2581 index 13df239d..cb52116 100644
2582 --- a/arch/ia64/mm/init.c
2583 +++ b/arch/ia64/mm/init.c
2584 @@ -121,6 +121,19 @@ ia64_init_addr_space (void)
2585 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
2586 vma->vm_end = vma->vm_start + PAGE_SIZE;
2587 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
2588 +
2589 +#ifdef CONFIG_PAX_PAGEEXEC
2590 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
2591 + vma->vm_flags &= ~VM_EXEC;
2592 +
2593 +#ifdef CONFIG_PAX_MPROTECT
2594 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
2595 + vma->vm_flags &= ~VM_MAYEXEC;
2596 +#endif
2597 +
2598 + }
2599 +#endif
2600 +
2601 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2602 down_write(&current->mm->mmap_sem);
2603 if (insert_vm_struct(current->mm, vma)) {
2604 diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
2605 index 40b3ee9..8c2c112 100644
2606 --- a/arch/m32r/include/asm/cache.h
2607 +++ b/arch/m32r/include/asm/cache.h
2608 @@ -1,8 +1,10 @@
2609 #ifndef _ASM_M32R_CACHE_H
2610 #define _ASM_M32R_CACHE_H
2611
2612 +#include <linux/const.h>
2613 +
2614 /* L1 cache line size */
2615 #define L1_CACHE_SHIFT 4
2616 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2617 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2618
2619 #endif /* _ASM_M32R_CACHE_H */
2620 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
2621 index 82abd15..d95ae5d 100644
2622 --- a/arch/m32r/lib/usercopy.c
2623 +++ b/arch/m32r/lib/usercopy.c
2624 @@ -14,6 +14,9 @@
2625 unsigned long
2626 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2627 {
2628 + if ((long)n < 0)
2629 + return n;
2630 +
2631 prefetch(from);
2632 if (access_ok(VERIFY_WRITE, to, n))
2633 __copy_user(to,from,n);
2634 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2635 unsigned long
2636 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
2637 {
2638 + if ((long)n < 0)
2639 + return n;
2640 +
2641 prefetchw(to);
2642 if (access_ok(VERIFY_READ, from, n))
2643 __copy_user_zeroing(to,from,n);
2644 diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
2645 index 0395c51..5f26031 100644
2646 --- a/arch/m68k/include/asm/cache.h
2647 +++ b/arch/m68k/include/asm/cache.h
2648 @@ -4,9 +4,11 @@
2649 #ifndef __ARCH_M68K_CACHE_H
2650 #define __ARCH_M68K_CACHE_H
2651
2652 +#include <linux/const.h>
2653 +
2654 /* bytes per L1 cache line */
2655 #define L1_CACHE_SHIFT 4
2656 -#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
2657 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2658
2659 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2660
2661 diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
2662 index 4efe96a..60e8699 100644
2663 --- a/arch/microblaze/include/asm/cache.h
2664 +++ b/arch/microblaze/include/asm/cache.h
2665 @@ -13,11 +13,12 @@
2666 #ifndef _ASM_MICROBLAZE_CACHE_H
2667 #define _ASM_MICROBLAZE_CACHE_H
2668
2669 +#include <linux/const.h>
2670 #include <asm/registers.h>
2671
2672 #define L1_CACHE_SHIFT 5
2673 /* word-granular cache in microblaze */
2674 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2675 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2676
2677 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2678
2679 diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
2680 index 1d93f81..67794d0 100644
2681 --- a/arch/mips/include/asm/atomic.h
2682 +++ b/arch/mips/include/asm/atomic.h
2683 @@ -21,6 +21,10 @@
2684 #include <asm/war.h>
2685 #include <asm/system.h>
2686
2687 +#ifdef CONFIG_GENERIC_ATOMIC64
2688 +#include <asm-generic/atomic64.h>
2689 +#endif
2690 +
2691 #define ATOMIC_INIT(i) { (i) }
2692
2693 /*
2694 @@ -765,6 +769,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2695 */
2696 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
2697
2698 +#define atomic64_read_unchecked(v) atomic64_read(v)
2699 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2700 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2701 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2702 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2703 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2704 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2705 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2706 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2707 +
2708 #endif /* CONFIG_64BIT */
2709
2710 /*
2711 diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
2712 index b4db69f..8f3b093 100644
2713 --- a/arch/mips/include/asm/cache.h
2714 +++ b/arch/mips/include/asm/cache.h
2715 @@ -9,10 +9,11 @@
2716 #ifndef _ASM_CACHE_H
2717 #define _ASM_CACHE_H
2718
2719 +#include <linux/const.h>
2720 #include <kmalloc.h>
2721
2722 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
2723 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2724 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2725
2726 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2727 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2728 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
2729 index 455c0ac..ad65fbe 100644
2730 --- a/arch/mips/include/asm/elf.h
2731 +++ b/arch/mips/include/asm/elf.h
2732 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
2733 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2734 #endif
2735
2736 +#ifdef CONFIG_PAX_ASLR
2737 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2738 +
2739 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2740 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2741 +#endif
2742 +
2743 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2744 struct linux_binprm;
2745 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2746 int uses_interp);
2747
2748 -struct mm_struct;
2749 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2750 -#define arch_randomize_brk arch_randomize_brk
2751 -
2752 #endif /* _ASM_ELF_H */
2753 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
2754 index da9bd7d..91aa7ab 100644
2755 --- a/arch/mips/include/asm/page.h
2756 +++ b/arch/mips/include/asm/page.h
2757 @@ -98,7 +98,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
2758 #ifdef CONFIG_CPU_MIPS32
2759 typedef struct { unsigned long pte_low, pte_high; } pte_t;
2760 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
2761 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
2762 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
2763 #else
2764 typedef struct { unsigned long long pte; } pte_t;
2765 #define pte_val(x) ((x).pte)
2766 diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
2767 index 881d18b..cea38bc 100644
2768 --- a/arch/mips/include/asm/pgalloc.h
2769 +++ b/arch/mips/include/asm/pgalloc.h
2770 @@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2771 {
2772 set_pud(pud, __pud((unsigned long)pmd));
2773 }
2774 +
2775 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2776 +{
2777 + pud_populate(mm, pud, pmd);
2778 +}
2779 #endif
2780
2781 /*
2782 diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
2783 index 6018c80..7c37203 100644
2784 --- a/arch/mips/include/asm/system.h
2785 +++ b/arch/mips/include/asm/system.h
2786 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
2787 */
2788 #define __ARCH_WANT_UNLOCKED_CTXSW
2789
2790 -extern unsigned long arch_align_stack(unsigned long sp);
2791 +#define arch_align_stack(x) ((x) & ~0xfUL)
2792
2793 #endif /* _ASM_SYSTEM_H */
2794 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
2795 index 9fdd8bc..4bd7f1a 100644
2796 --- a/arch/mips/kernel/binfmt_elfn32.c
2797 +++ b/arch/mips/kernel/binfmt_elfn32.c
2798 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2799 #undef ELF_ET_DYN_BASE
2800 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2801
2802 +#ifdef CONFIG_PAX_ASLR
2803 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2804 +
2805 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2806 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2807 +#endif
2808 +
2809 #include <asm/processor.h>
2810 #include <linux/module.h>
2811 #include <linux/elfcore.h>
2812 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2813 index ff44823..97f8906 100644
2814 --- a/arch/mips/kernel/binfmt_elfo32.c
2815 +++ b/arch/mips/kernel/binfmt_elfo32.c
2816 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2817 #undef ELF_ET_DYN_BASE
2818 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2819
2820 +#ifdef CONFIG_PAX_ASLR
2821 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2822 +
2823 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2824 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2825 +#endif
2826 +
2827 #include <asm/processor.h>
2828
2829 /*
2830 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2831 index 7955409..ceaea7c 100644
2832 --- a/arch/mips/kernel/process.c
2833 +++ b/arch/mips/kernel/process.c
2834 @@ -483,15 +483,3 @@ unsigned long get_wchan(struct task_struct *task)
2835 out:
2836 return pc;
2837 }
2838 -
2839 -/*
2840 - * Don't forget that the stack pointer must be aligned on a 8 bytes
2841 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2842 - */
2843 -unsigned long arch_align_stack(unsigned long sp)
2844 -{
2845 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2846 - sp -= get_random_int() & ~PAGE_MASK;
2847 -
2848 - return sp & ALMASK;
2849 -}
2850 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
2851 index 69ebd58..e4bff83 100644
2852 --- a/arch/mips/mm/fault.c
2853 +++ b/arch/mips/mm/fault.c
2854 @@ -28,6 +28,23 @@
2855 #include <asm/highmem.h> /* For VMALLOC_END */
2856 #include <linux/kdebug.h>
2857
2858 +#ifdef CONFIG_PAX_PAGEEXEC
2859 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2860 +{
2861 + unsigned long i;
2862 +
2863 + printk(KERN_ERR "PAX: bytes at PC: ");
2864 + for (i = 0; i < 5; i++) {
2865 + unsigned int c;
2866 + if (get_user(c, (unsigned int *)pc+i))
2867 + printk(KERN_CONT "???????? ");
2868 + else
2869 + printk(KERN_CONT "%08x ", c);
2870 + }
2871 + printk("\n");
2872 +}
2873 +#endif
2874 +
2875 /*
2876 * This routine handles page faults. It determines the address,
2877 * and the problem, and then passes it off to one of the appropriate
2878 diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
2879 index 302d779..7d35bf8 100644
2880 --- a/arch/mips/mm/mmap.c
2881 +++ b/arch/mips/mm/mmap.c
2882 @@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2883 do_color_align = 1;
2884
2885 /* requesting a specific address */
2886 +
2887 +#ifdef CONFIG_PAX_RANDMMAP
2888 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
2889 +#endif
2890 +
2891 if (addr) {
2892 if (do_color_align)
2893 addr = COLOUR_ALIGN(addr, pgoff);
2894 @@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2895 addr = PAGE_ALIGN(addr);
2896
2897 vma = find_vma(mm, addr);
2898 - if (TASK_SIZE - len >= addr &&
2899 - (!vma || addr + len <= vma->vm_start))
2900 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
2901 return addr;
2902 }
2903
2904 @@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2905 /* At this point: (!vma || addr < vma->vm_end). */
2906 if (TASK_SIZE - len < addr)
2907 return -ENOMEM;
2908 - if (!vma || addr + len <= vma->vm_start)
2909 + if (check_heap_stack_gap(vmm, addr, len))
2910 return addr;
2911 addr = vma->vm_end;
2912 if (do_color_align)
2913 @@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2914 /* make sure it can fit in the remaining address space */
2915 if (likely(addr > len)) {
2916 vma = find_vma(mm, addr - len);
2917 - if (!vma || addr <= vma->vm_start) {
2918 + if (check_heap_stack_gap(vmm, addr - len, len))
2919 /* cache the address as a hint for next time */
2920 return mm->free_area_cache = addr - len;
2921 }
2922 @@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
2923 * return with success:
2924 */
2925 vma = find_vma(mm, addr);
2926 - if (likely(!vma || addr + len <= vma->vm_start)) {
2927 + if (check_heap_stack_gap(vmm, addr, len)) {
2928 /* cache the address as a hint for next time */
2929 return mm->free_area_cache = addr;
2930 }
2931 @@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2932 mm->unmap_area = arch_unmap_area_topdown;
2933 }
2934 }
2935 -
2936 -static inline unsigned long brk_rnd(void)
2937 -{
2938 - unsigned long rnd = get_random_int();
2939 -
2940 - rnd = rnd << PAGE_SHIFT;
2941 - /* 8MB for 32bit, 256MB for 64bit */
2942 - if (TASK_IS_32BIT_ADDR)
2943 - rnd = rnd & 0x7ffffful;
2944 - else
2945 - rnd = rnd & 0xffffffful;
2946 -
2947 - return rnd;
2948 -}
2949 -
2950 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2951 -{
2952 - unsigned long base = mm->brk;
2953 - unsigned long ret;
2954 -
2955 - ret = PAGE_ALIGN(base + brk_rnd());
2956 -
2957 - if (ret < mm->brk)
2958 - return mm->brk;
2959 -
2960 - return ret;
2961 -}
2962 diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
2963 index 967d144..db12197 100644
2964 --- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
2965 +++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
2966 @@ -11,12 +11,14 @@
2967 #ifndef _ASM_PROC_CACHE_H
2968 #define _ASM_PROC_CACHE_H
2969
2970 +#include <linux/const.h>
2971 +
2972 /* L1 cache */
2973
2974 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
2975 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
2976 -#define L1_CACHE_BYTES 16 /* bytes per entry */
2977 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
2978 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
2979 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
2980
2981 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
2982 diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
2983 index bcb5df2..84fabd2 100644
2984 --- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
2985 +++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
2986 @@ -16,13 +16,15 @@
2987 #ifndef _ASM_PROC_CACHE_H
2988 #define _ASM_PROC_CACHE_H
2989
2990 +#include <linux/const.h>
2991 +
2992 /*
2993 * L1 cache
2994 */
2995 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
2996 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
2997 -#define L1_CACHE_BYTES 32 /* bytes per entry */
2998 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
2999 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
3000 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
3001
3002 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3003 diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
3004 index 4ce7a01..449202a 100644
3005 --- a/arch/openrisc/include/asm/cache.h
3006 +++ b/arch/openrisc/include/asm/cache.h
3007 @@ -19,11 +19,13 @@
3008 #ifndef __ASM_OPENRISC_CACHE_H
3009 #define __ASM_OPENRISC_CACHE_H
3010
3011 +#include <linux/const.h>
3012 +
3013 /* FIXME: How can we replace these with values from the CPU...
3014 * they shouldn't be hard-coded!
3015 */
3016
3017 -#define L1_CACHE_BYTES 16
3018 #define L1_CACHE_SHIFT 4
3019 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3020
3021 #endif /* __ASM_OPENRISC_CACHE_H */
3022 diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
3023 index 4054b31..a10c105 100644
3024 --- a/arch/parisc/include/asm/atomic.h
3025 +++ b/arch/parisc/include/asm/atomic.h
3026 @@ -335,6 +335,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
3027
3028 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3029
3030 +#define atomic64_read_unchecked(v) atomic64_read(v)
3031 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3032 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3033 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3034 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3035 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
3036 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3037 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
3038 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3039 +
3040 #endif /* !CONFIG_64BIT */
3041
3042
3043 diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
3044 index 47f11c7..3420df2 100644
3045 --- a/arch/parisc/include/asm/cache.h
3046 +++ b/arch/parisc/include/asm/cache.h
3047 @@ -5,6 +5,7 @@
3048 #ifndef __ARCH_PARISC_CACHE_H
3049 #define __ARCH_PARISC_CACHE_H
3050
3051 +#include <linux/const.h>
3052
3053 /*
3054 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
3055 @@ -15,13 +16,13 @@
3056 * just ruin performance.
3057 */
3058 #ifdef CONFIG_PA20
3059 -#define L1_CACHE_BYTES 64
3060 #define L1_CACHE_SHIFT 6
3061 #else
3062 -#define L1_CACHE_BYTES 32
3063 #define L1_CACHE_SHIFT 5
3064 #endif
3065
3066 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3067 +
3068 #ifndef __ASSEMBLY__
3069
3070 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3071 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
3072 index 19f6cb1..6c78cf2 100644
3073 --- a/arch/parisc/include/asm/elf.h
3074 +++ b/arch/parisc/include/asm/elf.h
3075 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
3076
3077 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
3078
3079 +#ifdef CONFIG_PAX_ASLR
3080 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3081 +
3082 +#define PAX_DELTA_MMAP_LEN 16
3083 +#define PAX_DELTA_STACK_LEN 16
3084 +#endif
3085 +
3086 /* This yields a mask that user programs can use to figure out what
3087 instruction set this CPU supports. This could be done in user space,
3088 but it's not easy, and we've already done it here. */
3089 diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
3090 index fc987a1..6e068ef 100644
3091 --- a/arch/parisc/include/asm/pgalloc.h
3092 +++ b/arch/parisc/include/asm/pgalloc.h
3093 @@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
3094 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
3095 }
3096
3097 +static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
3098 +{
3099 + pgd_populate(mm, pgd, pmd);
3100 +}
3101 +
3102 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
3103 {
3104 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
3105 @@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
3106 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
3107 #define pmd_free(mm, x) do { } while (0)
3108 #define pgd_populate(mm, pmd, pte) BUG()
3109 +#define pgd_populate_kernel(mm, pmd, pte) BUG()
3110
3111 #endif
3112
3113 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
3114 index 22dadeb..f6c2be4 100644
3115 --- a/arch/parisc/include/asm/pgtable.h
3116 +++ b/arch/parisc/include/asm/pgtable.h
3117 @@ -210,6 +210,17 @@ struct vm_area_struct;
3118 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
3119 #define PAGE_COPY PAGE_EXECREAD
3120 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
3121 +
3122 +#ifdef CONFIG_PAX_PAGEEXEC
3123 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
3124 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3125 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3126 +#else
3127 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3128 +# define PAGE_COPY_NOEXEC PAGE_COPY
3129 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3130 +#endif
3131 +
3132 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
3133 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
3134 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
3135 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
3136 index 5e34ccf..672bc9c 100644
3137 --- a/arch/parisc/kernel/module.c
3138 +++ b/arch/parisc/kernel/module.c
3139 @@ -98,16 +98,38 @@
3140
3141 /* three functions to determine where in the module core
3142 * or init pieces the location is */
3143 +static inline int in_init_rx(struct module *me, void *loc)
3144 +{
3145 + return (loc >= me->module_init_rx &&
3146 + loc < (me->module_init_rx + me->init_size_rx));
3147 +}
3148 +
3149 +static inline int in_init_rw(struct module *me, void *loc)
3150 +{
3151 + return (loc >= me->module_init_rw &&
3152 + loc < (me->module_init_rw + me->init_size_rw));
3153 +}
3154 +
3155 static inline int in_init(struct module *me, void *loc)
3156 {
3157 - return (loc >= me->module_init &&
3158 - loc <= (me->module_init + me->init_size));
3159 + return in_init_rx(me, loc) || in_init_rw(me, loc);
3160 +}
3161 +
3162 +static inline int in_core_rx(struct module *me, void *loc)
3163 +{
3164 + return (loc >= me->module_core_rx &&
3165 + loc < (me->module_core_rx + me->core_size_rx));
3166 +}
3167 +
3168 +static inline int in_core_rw(struct module *me, void *loc)
3169 +{
3170 + return (loc >= me->module_core_rw &&
3171 + loc < (me->module_core_rw + me->core_size_rw));
3172 }
3173
3174 static inline int in_core(struct module *me, void *loc)
3175 {
3176 - return (loc >= me->module_core &&
3177 - loc <= (me->module_core + me->core_size));
3178 + return in_core_rx(me, loc) || in_core_rw(me, loc);
3179 }
3180
3181 static inline int in_local(struct module *me, void *loc)
3182 @@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
3183 }
3184
3185 /* align things a bit */
3186 - me->core_size = ALIGN(me->core_size, 16);
3187 - me->arch.got_offset = me->core_size;
3188 - me->core_size += gots * sizeof(struct got_entry);
3189 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
3190 + me->arch.got_offset = me->core_size_rw;
3191 + me->core_size_rw += gots * sizeof(struct got_entry);
3192
3193 - me->core_size = ALIGN(me->core_size, 16);
3194 - me->arch.fdesc_offset = me->core_size;
3195 - me->core_size += fdescs * sizeof(Elf_Fdesc);
3196 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
3197 + me->arch.fdesc_offset = me->core_size_rw;
3198 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
3199
3200 me->arch.got_max = gots;
3201 me->arch.fdesc_max = fdescs;
3202 @@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3203
3204 BUG_ON(value == 0);
3205
3206 - got = me->module_core + me->arch.got_offset;
3207 + got = me->module_core_rw + me->arch.got_offset;
3208 for (i = 0; got[i].addr; i++)
3209 if (got[i].addr == value)
3210 goto out;
3211 @@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3212 #ifdef CONFIG_64BIT
3213 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3214 {
3215 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
3216 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
3217
3218 if (!value) {
3219 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
3220 @@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3221
3222 /* Create new one */
3223 fdesc->addr = value;
3224 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3225 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3226 return (Elf_Addr)fdesc;
3227 }
3228 #endif /* CONFIG_64BIT */
3229 @@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
3230
3231 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
3232 end = table + sechdrs[me->arch.unwind_section].sh_size;
3233 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3234 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3235
3236 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
3237 me->arch.unwind_section, table, end, gp);
3238 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
3239 index c9b9322..02d8940 100644
3240 --- a/arch/parisc/kernel/sys_parisc.c
3241 +++ b/arch/parisc/kernel/sys_parisc.c
3242 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
3243 /* At this point: (!vma || addr < vma->vm_end). */
3244 if (TASK_SIZE - len < addr)
3245 return -ENOMEM;
3246 - if (!vma || addr + len <= vma->vm_start)
3247 + if (check_heap_stack_gap(vma, addr, len))
3248 return addr;
3249 addr = vma->vm_end;
3250 }
3251 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
3252 /* At this point: (!vma || addr < vma->vm_end). */
3253 if (TASK_SIZE - len < addr)
3254 return -ENOMEM;
3255 - if (!vma || addr + len <= vma->vm_start)
3256 + if (check_heap_stack_gap(vma, addr, len))
3257 return addr;
3258 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
3259 if (addr < vma->vm_end) /* handle wraparound */
3260 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3261 if (flags & MAP_FIXED)
3262 return addr;
3263 if (!addr)
3264 - addr = TASK_UNMAPPED_BASE;
3265 + addr = current->mm->mmap_base;
3266
3267 if (filp) {
3268 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
3269 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
3270 index f19e660..414fe24 100644
3271 --- a/arch/parisc/kernel/traps.c
3272 +++ b/arch/parisc/kernel/traps.c
3273 @@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
3274
3275 down_read(&current->mm->mmap_sem);
3276 vma = find_vma(current->mm,regs->iaoq[0]);
3277 - if (vma && (regs->iaoq[0] >= vma->vm_start)
3278 - && (vma->vm_flags & VM_EXEC)) {
3279 -
3280 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
3281 fault_address = regs->iaoq[0];
3282 fault_space = regs->iasq[0];
3283
3284 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
3285 index 18162ce..94de376 100644
3286 --- a/arch/parisc/mm/fault.c
3287 +++ b/arch/parisc/mm/fault.c
3288 @@ -15,6 +15,7 @@
3289 #include <linux/sched.h>
3290 #include <linux/interrupt.h>
3291 #include <linux/module.h>
3292 +#include <linux/unistd.h>
3293
3294 #include <asm/uaccess.h>
3295 #include <asm/traps.h>
3296 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
3297 static unsigned long
3298 parisc_acctyp(unsigned long code, unsigned int inst)
3299 {
3300 - if (code == 6 || code == 16)
3301 + if (code == 6 || code == 7 || code == 16)
3302 return VM_EXEC;
3303
3304 switch (inst & 0xf0000000) {
3305 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
3306 }
3307 #endif
3308
3309 +#ifdef CONFIG_PAX_PAGEEXEC
3310 +/*
3311 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
3312 + *
3313 + * returns 1 when task should be killed
3314 + * 2 when rt_sigreturn trampoline was detected
3315 + * 3 when unpatched PLT trampoline was detected
3316 + */
3317 +static int pax_handle_fetch_fault(struct pt_regs *regs)
3318 +{
3319 +
3320 +#ifdef CONFIG_PAX_EMUPLT
3321 + int err;
3322 +
3323 + do { /* PaX: unpatched PLT emulation */
3324 + unsigned int bl, depwi;
3325 +
3326 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
3327 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
3328 +
3329 + if (err)
3330 + break;
3331 +
3332 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
3333 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
3334 +
3335 + err = get_user(ldw, (unsigned int *)addr);
3336 + err |= get_user(bv, (unsigned int *)(addr+4));
3337 + err |= get_user(ldw2, (unsigned int *)(addr+8));
3338 +
3339 + if (err)
3340 + break;
3341 +
3342 + if (ldw == 0x0E801096U &&
3343 + bv == 0xEAC0C000U &&
3344 + ldw2 == 0x0E881095U)
3345 + {
3346 + unsigned int resolver, map;
3347 +
3348 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
3349 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
3350 + if (err)
3351 + break;
3352 +
3353 + regs->gr[20] = instruction_pointer(regs)+8;
3354 + regs->gr[21] = map;
3355 + regs->gr[22] = resolver;
3356 + regs->iaoq[0] = resolver | 3UL;
3357 + regs->iaoq[1] = regs->iaoq[0] + 4;
3358 + return 3;
3359 + }
3360 + }
3361 + } while (0);
3362 +#endif
3363 +
3364 +#ifdef CONFIG_PAX_EMUTRAMP
3365 +
3366 +#ifndef CONFIG_PAX_EMUSIGRT
3367 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
3368 + return 1;
3369 +#endif
3370 +
3371 + do { /* PaX: rt_sigreturn emulation */
3372 + unsigned int ldi1, ldi2, bel, nop;
3373 +
3374 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
3375 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
3376 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
3377 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
3378 +
3379 + if (err)
3380 + break;
3381 +
3382 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
3383 + ldi2 == 0x3414015AU &&
3384 + bel == 0xE4008200U &&
3385 + nop == 0x08000240U)
3386 + {
3387 + regs->gr[25] = (ldi1 & 2) >> 1;
3388 + regs->gr[20] = __NR_rt_sigreturn;
3389 + regs->gr[31] = regs->iaoq[1] + 16;
3390 + regs->sr[0] = regs->iasq[1];
3391 + regs->iaoq[0] = 0x100UL;
3392 + regs->iaoq[1] = regs->iaoq[0] + 4;
3393 + regs->iasq[0] = regs->sr[2];
3394 + regs->iasq[1] = regs->sr[2];
3395 + return 2;
3396 + }
3397 + } while (0);
3398 +#endif
3399 +
3400 + return 1;
3401 +}
3402 +
3403 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3404 +{
3405 + unsigned long i;
3406 +
3407 + printk(KERN_ERR "PAX: bytes at PC: ");
3408 + for (i = 0; i < 5; i++) {
3409 + unsigned int c;
3410 + if (get_user(c, (unsigned int *)pc+i))
3411 + printk(KERN_CONT "???????? ");
3412 + else
3413 + printk(KERN_CONT "%08x ", c);
3414 + }
3415 + printk("\n");
3416 +}
3417 +#endif
3418 +
3419 int fixup_exception(struct pt_regs *regs)
3420 {
3421 const struct exception_table_entry *fix;
3422 @@ -192,8 +303,33 @@ good_area:
3423
3424 acc_type = parisc_acctyp(code,regs->iir);
3425
3426 - if ((vma->vm_flags & acc_type) != acc_type)
3427 + if ((vma->vm_flags & acc_type) != acc_type) {
3428 +
3429 +#ifdef CONFIG_PAX_PAGEEXEC
3430 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
3431 + (address & ~3UL) == instruction_pointer(regs))
3432 + {
3433 + up_read(&mm->mmap_sem);
3434 + switch (pax_handle_fetch_fault(regs)) {
3435 +
3436 +#ifdef CONFIG_PAX_EMUPLT
3437 + case 3:
3438 + return;
3439 +#endif
3440 +
3441 +#ifdef CONFIG_PAX_EMUTRAMP
3442 + case 2:
3443 + return;
3444 +#endif
3445 +
3446 + }
3447 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
3448 + do_group_exit(SIGKILL);
3449 + }
3450 +#endif
3451 +
3452 goto bad_area;
3453 + }
3454
3455 /*
3456 * If for any reason at all we couldn't handle the fault, make
3457 diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
3458 index 02e41b5..ec6e26c 100644
3459 --- a/arch/powerpc/include/asm/atomic.h
3460 +++ b/arch/powerpc/include/asm/atomic.h
3461 @@ -469,6 +469,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
3462
3463 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3464
3465 +#define atomic64_read_unchecked(v) atomic64_read(v)
3466 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3467 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3468 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3469 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3470 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
3471 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3472 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
3473 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3474 +
3475 #endif /* __powerpc64__ */
3476
3477 #endif /* __KERNEL__ */
3478 diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
3479 index 4b50941..5605819 100644
3480 --- a/arch/powerpc/include/asm/cache.h
3481 +++ b/arch/powerpc/include/asm/cache.h
3482 @@ -3,6 +3,7 @@
3483
3484 #ifdef __KERNEL__
3485
3486 +#include <linux/const.h>
3487
3488 /* bytes per L1 cache line */
3489 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
3490 @@ -22,7 +23,7 @@
3491 #define L1_CACHE_SHIFT 7
3492 #endif
3493
3494 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3495 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3496
3497 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3498
3499 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
3500 index 3bf9cca..e7457d0 100644
3501 --- a/arch/powerpc/include/asm/elf.h
3502 +++ b/arch/powerpc/include/asm/elf.h
3503 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
3504 the loader. We need to make sure that it is out of the way of the program
3505 that it will "exec", and that there is sufficient room for the brk. */
3506
3507 -extern unsigned long randomize_et_dyn(unsigned long base);
3508 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
3509 +#define ELF_ET_DYN_BASE (0x20000000)
3510 +
3511 +#ifdef CONFIG_PAX_ASLR
3512 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
3513 +
3514 +#ifdef __powerpc64__
3515 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
3516 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
3517 +#else
3518 +#define PAX_DELTA_MMAP_LEN 15
3519 +#define PAX_DELTA_STACK_LEN 15
3520 +#endif
3521 +#endif
3522
3523 /*
3524 * Our registers are always unsigned longs, whether we're a 32 bit
3525 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
3526 (0x7ff >> (PAGE_SHIFT - 12)) : \
3527 (0x3ffff >> (PAGE_SHIFT - 12)))
3528
3529 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3530 -#define arch_randomize_brk arch_randomize_brk
3531 -
3532 #endif /* __KERNEL__ */
3533
3534 /*
3535 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
3536 index bca8fdc..61e9580 100644
3537 --- a/arch/powerpc/include/asm/kmap_types.h
3538 +++ b/arch/powerpc/include/asm/kmap_types.h
3539 @@ -27,6 +27,7 @@ enum km_type {
3540 KM_PPC_SYNC_PAGE,
3541 KM_PPC_SYNC_ICACHE,
3542 KM_KDB,
3543 + KM_CLEARPAGE,
3544 KM_TYPE_NR
3545 };
3546
3547 diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
3548 index d4a7f64..451de1c 100644
3549 --- a/arch/powerpc/include/asm/mman.h
3550 +++ b/arch/powerpc/include/asm/mman.h
3551 @@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
3552 }
3553 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
3554
3555 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
3556 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
3557 {
3558 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
3559 }
3560 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
3561 index f072e97..b436dee 100644
3562 --- a/arch/powerpc/include/asm/page.h
3563 +++ b/arch/powerpc/include/asm/page.h
3564 @@ -220,8 +220,9 @@ extern long long virt_phys_offset;
3565 * and needs to be executable. This means the whole heap ends
3566 * up being executable.
3567 */
3568 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3569 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3570 +#define VM_DATA_DEFAULT_FLAGS32 \
3571 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3572 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3573
3574 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3575 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3576 @@ -249,6 +250,9 @@ extern long long virt_phys_offset;
3577 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
3578 #endif
3579
3580 +#define ktla_ktva(addr) (addr)
3581 +#define ktva_ktla(addr) (addr)
3582 +
3583 /*
3584 * Use the top bit of the higher-level page table entries to indicate whether
3585 * the entries we point to contain hugepages. This works because we know that
3586 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
3587 index fed85e6..da5c71b 100644
3588 --- a/arch/powerpc/include/asm/page_64.h
3589 +++ b/arch/powerpc/include/asm/page_64.h
3590 @@ -146,15 +146,18 @@ do { \
3591 * stack by default, so in the absence of a PT_GNU_STACK program header
3592 * we turn execute permission off.
3593 */
3594 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3595 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3596 +#define VM_STACK_DEFAULT_FLAGS32 \
3597 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3598 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3599
3600 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3601 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3602
3603 +#ifndef CONFIG_PAX_PAGEEXEC
3604 #define VM_STACK_DEFAULT_FLAGS \
3605 (is_32bit_task() ? \
3606 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
3607 +#endif
3608
3609 #include <asm-generic/getorder.h>
3610
3611 diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
3612 index 292725c..f87ae14 100644
3613 --- a/arch/powerpc/include/asm/pgalloc-64.h
3614 +++ b/arch/powerpc/include/asm/pgalloc-64.h
3615 @@ -50,6 +50,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
3616 #ifndef CONFIG_PPC_64K_PAGES
3617
3618 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
3619 +#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
3620
3621 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
3622 {
3623 @@ -67,6 +68,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3624 pud_set(pud, (unsigned long)pmd);
3625 }
3626
3627 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3628 +{
3629 + pud_populate(mm, pud, pmd);
3630 +}
3631 +
3632 #define pmd_populate(mm, pmd, pte_page) \
3633 pmd_populate_kernel(mm, pmd, page_address(pte_page))
3634 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
3635 @@ -76,6 +82,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3636 #else /* CONFIG_PPC_64K_PAGES */
3637
3638 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
3639 +#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
3640
3641 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
3642 pte_t *pte)
3643 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
3644 index 2e0e411..7899c68 100644
3645 --- a/arch/powerpc/include/asm/pgtable.h
3646 +++ b/arch/powerpc/include/asm/pgtable.h
3647 @@ -2,6 +2,7 @@
3648 #define _ASM_POWERPC_PGTABLE_H
3649 #ifdef __KERNEL__
3650
3651 +#include <linux/const.h>
3652 #ifndef __ASSEMBLY__
3653 #include <asm/processor.h> /* For TASK_SIZE */
3654 #include <asm/mmu.h>
3655 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
3656 index 4aad413..85d86bf 100644
3657 --- a/arch/powerpc/include/asm/pte-hash32.h
3658 +++ b/arch/powerpc/include/asm/pte-hash32.h
3659 @@ -21,6 +21,7 @@
3660 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
3661 #define _PAGE_USER 0x004 /* usermode access allowed */
3662 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
3663 +#define _PAGE_EXEC _PAGE_GUARDED
3664 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
3665 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
3666 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
3667 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
3668 index 7fdc2c0..e47a9b02d3 100644
3669 --- a/arch/powerpc/include/asm/reg.h
3670 +++ b/arch/powerpc/include/asm/reg.h
3671 @@ -212,6 +212,7 @@
3672 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
3673 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
3674 #define DSISR_NOHPTE 0x40000000 /* no translation found */
3675 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
3676 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
3677 #define DSISR_ISSTORE 0x02000000 /* access was a store */
3678 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
3679 diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
3680 index c377457..3c69fbc 100644
3681 --- a/arch/powerpc/include/asm/system.h
3682 +++ b/arch/powerpc/include/asm/system.h
3683 @@ -539,7 +539,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
3684 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
3685 #endif
3686
3687 -extern unsigned long arch_align_stack(unsigned long sp);
3688 +#define arch_align_stack(x) ((x) & ~0xfUL)
3689
3690 /* Used in very early kernel initialization. */
3691 extern unsigned long reloc_offset(void);
3692 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
3693 index bd0fb84..a42a14b 100644
3694 --- a/arch/powerpc/include/asm/uaccess.h
3695 +++ b/arch/powerpc/include/asm/uaccess.h
3696 @@ -13,6 +13,8 @@
3697 #define VERIFY_READ 0
3698 #define VERIFY_WRITE 1
3699
3700 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3701 +
3702 /*
3703 * The fs value determines whether argument validity checking should be
3704 * performed or not. If get_fs() == USER_DS, checking is performed, with
3705 @@ -327,52 +329,6 @@ do { \
3706 extern unsigned long __copy_tofrom_user(void __user *to,
3707 const void __user *from, unsigned long size);
3708
3709 -#ifndef __powerpc64__
3710 -
3711 -static inline unsigned long copy_from_user(void *to,
3712 - const void __user *from, unsigned long n)
3713 -{
3714 - unsigned long over;
3715 -
3716 - if (access_ok(VERIFY_READ, from, n))
3717 - return __copy_tofrom_user((__force void __user *)to, from, n);
3718 - if ((unsigned long)from < TASK_SIZE) {
3719 - over = (unsigned long)from + n - TASK_SIZE;
3720 - return __copy_tofrom_user((__force void __user *)to, from,
3721 - n - over) + over;
3722 - }
3723 - return n;
3724 -}
3725 -
3726 -static inline unsigned long copy_to_user(void __user *to,
3727 - const void *from, unsigned long n)
3728 -{
3729 - unsigned long over;
3730 -
3731 - if (access_ok(VERIFY_WRITE, to, n))
3732 - return __copy_tofrom_user(to, (__force void __user *)from, n);
3733 - if ((unsigned long)to < TASK_SIZE) {
3734 - over = (unsigned long)to + n - TASK_SIZE;
3735 - return __copy_tofrom_user(to, (__force void __user *)from,
3736 - n - over) + over;
3737 - }
3738 - return n;
3739 -}
3740 -
3741 -#else /* __powerpc64__ */
3742 -
3743 -#define __copy_in_user(to, from, size) \
3744 - __copy_tofrom_user((to), (from), (size))
3745 -
3746 -extern unsigned long copy_from_user(void *to, const void __user *from,
3747 - unsigned long n);
3748 -extern unsigned long copy_to_user(void __user *to, const void *from,
3749 - unsigned long n);
3750 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
3751 - unsigned long n);
3752 -
3753 -#endif /* __powerpc64__ */
3754 -
3755 static inline unsigned long __copy_from_user_inatomic(void *to,
3756 const void __user *from, unsigned long n)
3757 {
3758 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
3759 if (ret == 0)
3760 return 0;
3761 }
3762 +
3763 + if (!__builtin_constant_p(n))
3764 + check_object_size(to, n, false);
3765 +
3766 return __copy_tofrom_user((__force void __user *)to, from, n);
3767 }
3768
3769 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
3770 if (ret == 0)
3771 return 0;
3772 }
3773 +
3774 + if (!__builtin_constant_p(n))
3775 + check_object_size(from, n, true);
3776 +
3777 return __copy_tofrom_user(to, (__force const void __user *)from, n);
3778 }
3779
3780 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
3781 return __copy_to_user_inatomic(to, from, size);
3782 }
3783
3784 +#ifndef __powerpc64__
3785 +
3786 +static inline unsigned long __must_check copy_from_user(void *to,
3787 + const void __user *from, unsigned long n)
3788 +{
3789 + unsigned long over;
3790 +
3791 + if ((long)n < 0)
3792 + return n;
3793 +
3794 + if (access_ok(VERIFY_READ, from, n)) {
3795 + if (!__builtin_constant_p(n))
3796 + check_object_size(to, n, false);
3797 + return __copy_tofrom_user((__force void __user *)to, from, n);
3798 + }
3799 + if ((unsigned long)from < TASK_SIZE) {
3800 + over = (unsigned long)from + n - TASK_SIZE;
3801 + if (!__builtin_constant_p(n - over))
3802 + check_object_size(to, n - over, false);
3803 + return __copy_tofrom_user((__force void __user *)to, from,
3804 + n - over) + over;
3805 + }
3806 + return n;
3807 +}
3808 +
3809 +static inline unsigned long __must_check copy_to_user(void __user *to,
3810 + const void *from, unsigned long n)
3811 +{
3812 + unsigned long over;
3813 +
3814 + if ((long)n < 0)
3815 + return n;
3816 +
3817 + if (access_ok(VERIFY_WRITE, to, n)) {
3818 + if (!__builtin_constant_p(n))
3819 + check_object_size(from, n, true);
3820 + return __copy_tofrom_user(to, (__force void __user *)from, n);
3821 + }
3822 + if ((unsigned long)to < TASK_SIZE) {
3823 + over = (unsigned long)to + n - TASK_SIZE;
3824 + if (!__builtin_constant_p(n))
3825 + check_object_size(from, n - over, true);
3826 + return __copy_tofrom_user(to, (__force void __user *)from,
3827 + n - over) + over;
3828 + }
3829 + return n;
3830 +}
3831 +
3832 +#else /* __powerpc64__ */
3833 +
3834 +#define __copy_in_user(to, from, size) \
3835 + __copy_tofrom_user((to), (from), (size))
3836 +
3837 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
3838 +{
3839 + if ((long)n < 0 || n > INT_MAX)
3840 + return n;
3841 +
3842 + if (!__builtin_constant_p(n))
3843 + check_object_size(to, n, false);
3844 +
3845 + if (likely(access_ok(VERIFY_READ, from, n)))
3846 + n = __copy_from_user(to, from, n);
3847 + else
3848 + memset(to, 0, n);
3849 + return n;
3850 +}
3851 +
3852 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
3853 +{
3854 + if ((long)n < 0 || n > INT_MAX)
3855 + return n;
3856 +
3857 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
3858 + if (!__builtin_constant_p(n))
3859 + check_object_size(from, n, true);
3860 + n = __copy_to_user(to, from, n);
3861 + }
3862 + return n;
3863 +}
3864 +
3865 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
3866 + unsigned long n);
3867 +
3868 +#endif /* __powerpc64__ */
3869 +
3870 extern unsigned long __clear_user(void __user *addr, unsigned long size);
3871
3872 static inline unsigned long clear_user(void __user *addr, unsigned long size)
3873 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
3874 index 429983c..7af363b 100644
3875 --- a/arch/powerpc/kernel/exceptions-64e.S
3876 +++ b/arch/powerpc/kernel/exceptions-64e.S
3877 @@ -587,6 +587,7 @@ storage_fault_common:
3878 std r14,_DAR(r1)
3879 std r15,_DSISR(r1)
3880 addi r3,r1,STACK_FRAME_OVERHEAD
3881 + bl .save_nvgprs
3882 mr r4,r14
3883 mr r5,r15
3884 ld r14,PACA_EXGEN+EX_R14(r13)
3885 @@ -596,8 +597,7 @@ storage_fault_common:
3886 cmpdi r3,0
3887 bne- 1f
3888 b .ret_from_except_lite
3889 -1: bl .save_nvgprs
3890 - mr r5,r3
3891 +1: mr r5,r3
3892 addi r3,r1,STACK_FRAME_OVERHEAD
3893 ld r4,_DAR(r1)
3894 bl .bad_page_fault
3895 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
3896 index 15c5a4f..22a4000 100644
3897 --- a/arch/powerpc/kernel/exceptions-64s.S
3898 +++ b/arch/powerpc/kernel/exceptions-64s.S
3899 @@ -1004,10 +1004,10 @@ handle_page_fault:
3900 11: ld r4,_DAR(r1)
3901 ld r5,_DSISR(r1)
3902 addi r3,r1,STACK_FRAME_OVERHEAD
3903 + bl .save_nvgprs
3904 bl .do_page_fault
3905 cmpdi r3,0
3906 beq+ 13f
3907 - bl .save_nvgprs
3908 mr r5,r3
3909 addi r3,r1,STACK_FRAME_OVERHEAD
3910 lwz r4,_DAR(r1)
3911 diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
3912 index 01e2877..a1ba360 100644
3913 --- a/arch/powerpc/kernel/irq.c
3914 +++ b/arch/powerpc/kernel/irq.c
3915 @@ -560,9 +560,6 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
3916 host->ops = ops;
3917 host->of_node = of_node_get(of_node);
3918
3919 - if (host->ops->match == NULL)
3920 - host->ops->match = default_irq_host_match;
3921 -
3922 raw_spin_lock_irqsave(&irq_big_lock, flags);
3923
3924 /* If it's a legacy controller, check for duplicates and
3925 @@ -635,7 +632,12 @@ struct irq_host *irq_find_host(struct device_node *node)
3926 */
3927 raw_spin_lock_irqsave(&irq_big_lock, flags);
3928 list_for_each_entry(h, &irq_hosts, link)
3929 - if (h->ops->match(h, node)) {
3930 + if (h->ops->match) {
3931 + if (h->ops->match(h, node)) {
3932 + found = h;
3933 + break;
3934 + }
3935 + } else if (default_irq_host_match(h, node)) {
3936 found = h;
3937 break;
3938 }
3939 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
3940 index 0b6d796..d760ddb 100644
3941 --- a/arch/powerpc/kernel/module_32.c
3942 +++ b/arch/powerpc/kernel/module_32.c
3943 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
3944 me->arch.core_plt_section = i;
3945 }
3946 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
3947 - printk("Module doesn't contain .plt or .init.plt sections.\n");
3948 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
3949 return -ENOEXEC;
3950 }
3951
3952 @@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
3953
3954 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
3955 /* Init, or core PLT? */
3956 - if (location >= mod->module_core
3957 - && location < mod->module_core + mod->core_size)
3958 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
3959 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
3960 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
3961 - else
3962 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
3963 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
3964 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
3965 + else {
3966 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
3967 + return ~0UL;
3968 + }
3969
3970 /* Find this entry, or if that fails, the next avail. entry */
3971 while (entry->jump[0]) {
3972 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
3973 index d817ab0..b23b18e 100644
3974 --- a/arch/powerpc/kernel/process.c
3975 +++ b/arch/powerpc/kernel/process.c
3976 @@ -676,8 +676,8 @@ void show_regs(struct pt_regs * regs)
3977 * Lookup NIP late so we have the best change of getting the
3978 * above info out without failing
3979 */
3980 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
3981 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
3982 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
3983 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
3984 #endif
3985 show_stack(current, (unsigned long *) regs->gpr[1]);
3986 if (!user_mode(regs))
3987 @@ -1181,10 +1181,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3988 newsp = stack[0];
3989 ip = stack[STACK_FRAME_LR_SAVE];
3990 if (!firstframe || ip != lr) {
3991 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
3992 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
3993 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3994 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
3995 - printk(" (%pS)",
3996 + printk(" (%pA)",
3997 (void *)current->ret_stack[curr_frame].ret);
3998 curr_frame--;
3999 }
4000 @@ -1204,7 +1204,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4001 struct pt_regs *regs = (struct pt_regs *)
4002 (sp + STACK_FRAME_OVERHEAD);
4003 lr = regs->link;
4004 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
4005 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
4006 regs->trap, (void *)regs->nip, (void *)lr);
4007 firstframe = 1;
4008 }
4009 @@ -1279,58 +1279,3 @@ void thread_info_cache_init(void)
4010 }
4011
4012 #endif /* THREAD_SHIFT < PAGE_SHIFT */
4013 -
4014 -unsigned long arch_align_stack(unsigned long sp)
4015 -{
4016 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4017 - sp -= get_random_int() & ~PAGE_MASK;
4018 - return sp & ~0xf;
4019 -}
4020 -
4021 -static inline unsigned long brk_rnd(void)
4022 -{
4023 - unsigned long rnd = 0;
4024 -
4025 - /* 8MB for 32bit, 1GB for 64bit */
4026 - if (is_32bit_task())
4027 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
4028 - else
4029 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
4030 -
4031 - return rnd << PAGE_SHIFT;
4032 -}
4033 -
4034 -unsigned long arch_randomize_brk(struct mm_struct *mm)
4035 -{
4036 - unsigned long base = mm->brk;
4037 - unsigned long ret;
4038 -
4039 -#ifdef CONFIG_PPC_STD_MMU_64
4040 - /*
4041 - * If we are using 1TB segments and we are allowed to randomise
4042 - * the heap, we can put it above 1TB so it is backed by a 1TB
4043 - * segment. Otherwise the heap will be in the bottom 1TB
4044 - * which always uses 256MB segments and this may result in a
4045 - * performance penalty.
4046 - */
4047 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
4048 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
4049 -#endif
4050 -
4051 - ret = PAGE_ALIGN(base + brk_rnd());
4052 -
4053 - if (ret < mm->brk)
4054 - return mm->brk;
4055 -
4056 - return ret;
4057 -}
4058 -
4059 -unsigned long randomize_et_dyn(unsigned long base)
4060 -{
4061 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4062 -
4063 - if (ret < base)
4064 - return base;
4065 -
4066 - return ret;
4067 -}
4068 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
4069 index 836a5a1..27289a3 100644
4070 --- a/arch/powerpc/kernel/signal_32.c
4071 +++ b/arch/powerpc/kernel/signal_32.c
4072 @@ -859,7 +859,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
4073 /* Save user registers on the stack */
4074 frame = &rt_sf->uc.uc_mcontext;
4075 addr = frame;
4076 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
4077 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4078 if (save_user_regs(regs, frame, 0, 1))
4079 goto badframe;
4080 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
4081 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
4082 index a50b5ec..547078a 100644
4083 --- a/arch/powerpc/kernel/signal_64.c
4084 +++ b/arch/powerpc/kernel/signal_64.c
4085 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
4086 current->thread.fpscr.val = 0;
4087
4088 /* Set up to return from userspace. */
4089 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
4090 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4091 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
4092 } else {
4093 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
4094 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
4095 index c091527..5592625 100644
4096 --- a/arch/powerpc/kernel/traps.c
4097 +++ b/arch/powerpc/kernel/traps.c
4098 @@ -131,6 +131,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
4099 return flags;
4100 }
4101
4102 +extern void gr_handle_kernel_exploit(void);
4103 +
4104 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
4105 int signr)
4106 {
4107 @@ -178,6 +180,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
4108 panic("Fatal exception in interrupt");
4109 if (panic_on_oops)
4110 panic("Fatal exception");
4111 +
4112 + gr_handle_kernel_exploit();
4113 +
4114 do_exit(signr);
4115 }
4116
4117 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
4118 index 7d14bb6..1305601 100644
4119 --- a/arch/powerpc/kernel/vdso.c
4120 +++ b/arch/powerpc/kernel/vdso.c
4121 @@ -35,6 +35,7 @@
4122 #include <asm/firmware.h>
4123 #include <asm/vdso.h>
4124 #include <asm/vdso_datapage.h>
4125 +#include <asm/mman.h>
4126
4127 #include "setup.h"
4128
4129 @@ -219,7 +220,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4130 vdso_base = VDSO32_MBASE;
4131 #endif
4132
4133 - current->mm->context.vdso_base = 0;
4134 + current->mm->context.vdso_base = ~0UL;
4135
4136 /* vDSO has a problem and was disabled, just don't "enable" it for the
4137 * process
4138 @@ -239,7 +240,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4139 vdso_base = get_unmapped_area(NULL, vdso_base,
4140 (vdso_pages << PAGE_SHIFT) +
4141 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
4142 - 0, 0);
4143 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
4144 if (IS_ERR_VALUE(vdso_base)) {
4145 rc = vdso_base;
4146 goto fail_mmapsem;
4147 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
4148 index 5eea6f3..5d10396 100644
4149 --- a/arch/powerpc/lib/usercopy_64.c
4150 +++ b/arch/powerpc/lib/usercopy_64.c
4151 @@ -9,22 +9,6 @@
4152 #include <linux/module.h>
4153 #include <asm/uaccess.h>
4154
4155 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4156 -{
4157 - if (likely(access_ok(VERIFY_READ, from, n)))
4158 - n = __copy_from_user(to, from, n);
4159 - else
4160 - memset(to, 0, n);
4161 - return n;
4162 -}
4163 -
4164 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4165 -{
4166 - if (likely(access_ok(VERIFY_WRITE, to, n)))
4167 - n = __copy_to_user(to, from, n);
4168 - return n;
4169 -}
4170 -
4171 unsigned long copy_in_user(void __user *to, const void __user *from,
4172 unsigned long n)
4173 {
4174 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
4175 return n;
4176 }
4177
4178 -EXPORT_SYMBOL(copy_from_user);
4179 -EXPORT_SYMBOL(copy_to_user);
4180 EXPORT_SYMBOL(copy_in_user);
4181
4182 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
4183 index 2f0d1b0..36fb5cc 100644
4184 --- a/arch/powerpc/mm/fault.c
4185 +++ b/arch/powerpc/mm/fault.c
4186 @@ -32,6 +32,10 @@
4187 #include <linux/perf_event.h>
4188 #include <linux/magic.h>
4189 #include <linux/ratelimit.h>
4190 +#include <linux/slab.h>
4191 +#include <linux/pagemap.h>
4192 +#include <linux/compiler.h>
4193 +#include <linux/unistd.h>
4194
4195 #include <asm/firmware.h>
4196 #include <asm/page.h>
4197 @@ -43,6 +47,7 @@
4198 #include <asm/tlbflush.h>
4199 #include <asm/siginfo.h>
4200 #include <mm/mmu_decl.h>
4201 +#include <asm/ptrace.h>
4202
4203 #include "icswx.h"
4204
4205 @@ -68,6 +73,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
4206 }
4207 #endif
4208
4209 +#ifdef CONFIG_PAX_PAGEEXEC
4210 +/*
4211 + * PaX: decide what to do with offenders (regs->nip = fault address)
4212 + *
4213 + * returns 1 when task should be killed
4214 + */
4215 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4216 +{
4217 + return 1;
4218 +}
4219 +
4220 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4221 +{
4222 + unsigned long i;
4223 +
4224 + printk(KERN_ERR "PAX: bytes at PC: ");
4225 + for (i = 0; i < 5; i++) {
4226 + unsigned int c;
4227 + if (get_user(c, (unsigned int __user *)pc+i))
4228 + printk(KERN_CONT "???????? ");
4229 + else
4230 + printk(KERN_CONT "%08x ", c);
4231 + }
4232 + printk("\n");
4233 +}
4234 +#endif
4235 +
4236 /*
4237 * Check whether the instruction at regs->nip is a store using
4238 * an update addressing form which will update r1.
4239 @@ -138,7 +170,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
4240 * indicate errors in DSISR but can validly be set in SRR1.
4241 */
4242 if (trap == 0x400)
4243 - error_code &= 0x48200000;
4244 + error_code &= 0x58200000;
4245 else
4246 is_write = error_code & DSISR_ISSTORE;
4247 #else
4248 @@ -276,7 +308,7 @@ good_area:
4249 * "undefined". Of those that can be set, this is the only
4250 * one which seems bad.
4251 */
4252 - if (error_code & 0x10000000)
4253 + if (error_code & DSISR_GUARDED)
4254 /* Guarded storage error. */
4255 goto bad_area;
4256 #endif /* CONFIG_8xx */
4257 @@ -291,7 +323,7 @@ good_area:
4258 * processors use the same I/D cache coherency mechanism
4259 * as embedded.
4260 */
4261 - if (error_code & DSISR_PROTFAULT)
4262 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
4263 goto bad_area;
4264 #endif /* CONFIG_PPC_STD_MMU */
4265
4266 @@ -360,6 +392,23 @@ bad_area:
4267 bad_area_nosemaphore:
4268 /* User mode accesses cause a SIGSEGV */
4269 if (user_mode(regs)) {
4270 +
4271 +#ifdef CONFIG_PAX_PAGEEXEC
4272 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4273 +#ifdef CONFIG_PPC_STD_MMU
4274 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
4275 +#else
4276 + if (is_exec && regs->nip == address) {
4277 +#endif
4278 + switch (pax_handle_fetch_fault(regs)) {
4279 + }
4280 +
4281 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
4282 + do_group_exit(SIGKILL);
4283 + }
4284 + }
4285 +#endif
4286 +
4287 _exception(SIGSEGV, regs, code, address);
4288 return 0;
4289 }
4290 diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
4291 index 67a42ed..1c7210c 100644
4292 --- a/arch/powerpc/mm/mmap_64.c
4293 +++ b/arch/powerpc/mm/mmap_64.c
4294 @@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4295 */
4296 if (mmap_is_legacy()) {
4297 mm->mmap_base = TASK_UNMAPPED_BASE;
4298 +
4299 +#ifdef CONFIG_PAX_RANDMMAP
4300 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4301 + mm->mmap_base += mm->delta_mmap;
4302 +#endif
4303 +
4304 mm->get_unmapped_area = arch_get_unmapped_area;
4305 mm->unmap_area = arch_unmap_area;
4306 } else {
4307 mm->mmap_base = mmap_base();
4308 +
4309 +#ifdef CONFIG_PAX_RANDMMAP
4310 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4311 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4312 +#endif
4313 +
4314 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4315 mm->unmap_area = arch_unmap_area_topdown;
4316 }
4317 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
4318 index 73709f7..6b90313 100644
4319 --- a/arch/powerpc/mm/slice.c
4320 +++ b/arch/powerpc/mm/slice.c
4321 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
4322 if ((mm->task_size - len) < addr)
4323 return 0;
4324 vma = find_vma(mm, addr);
4325 - return (!vma || (addr + len) <= vma->vm_start);
4326 + return check_heap_stack_gap(vma, addr, len);
4327 }
4328
4329 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
4330 @@ -256,7 +256,7 @@ full_search:
4331 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
4332 continue;
4333 }
4334 - if (!vma || addr + len <= vma->vm_start) {
4335 + if (check_heap_stack_gap(vma, addr, len)) {
4336 /*
4337 * Remember the place where we stopped the search:
4338 */
4339 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4340 }
4341 }
4342
4343 - addr = mm->mmap_base;
4344 - while (addr > len) {
4345 + if (mm->mmap_base < len)
4346 + addr = -ENOMEM;
4347 + else
4348 + addr = mm->mmap_base - len;
4349 +
4350 + while (!IS_ERR_VALUE(addr)) {
4351 /* Go down by chunk size */
4352 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
4353 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
4354
4355 /* Check for hit with different page size */
4356 mask = slice_range_to_mask(addr, len);
4357 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4358 * return with success:
4359 */
4360 vma = find_vma(mm, addr);
4361 - if (!vma || (addr + len) <= vma->vm_start) {
4362 + if (check_heap_stack_gap(vma, addr, len)) {
4363 /* remember the address as a hint for next time */
4364 if (use_cache)
4365 mm->free_area_cache = addr;
4366 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4367 mm->cached_hole_size = vma->vm_start - addr;
4368
4369 /* try just below the current vma->vm_start */
4370 - addr = vma->vm_start;
4371 + addr = skip_heap_stack_gap(vma, len);
4372 }
4373
4374 /*
4375 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
4376 if (fixed && addr > (mm->task_size - len))
4377 return -EINVAL;
4378
4379 +#ifdef CONFIG_PAX_RANDMMAP
4380 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
4381 + addr = 0;
4382 +#endif
4383 +
4384 /* If hint, make sure it matches our alignment restrictions */
4385 if (!fixed && addr) {
4386 addr = _ALIGN_UP(addr, 1ul << pshift);
4387 diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
4388 index 8517d2a..d2738d4 100644
4389 --- a/arch/s390/include/asm/atomic.h
4390 +++ b/arch/s390/include/asm/atomic.h
4391 @@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
4392 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
4393 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4394
4395 +#define atomic64_read_unchecked(v) atomic64_read(v)
4396 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4397 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4398 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4399 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4400 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
4401 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4402 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
4403 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4404 +
4405 #define smp_mb__before_atomic_dec() smp_mb()
4406 #define smp_mb__after_atomic_dec() smp_mb()
4407 #define smp_mb__before_atomic_inc() smp_mb()
4408 diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
4409 index 2a30d5a..5e5586f 100644
4410 --- a/arch/s390/include/asm/cache.h
4411 +++ b/arch/s390/include/asm/cache.h
4412 @@ -11,8 +11,10 @@
4413 #ifndef __ARCH_S390_CACHE_H
4414 #define __ARCH_S390_CACHE_H
4415
4416 -#define L1_CACHE_BYTES 256
4417 +#include <linux/const.h>
4418 +
4419 #define L1_CACHE_SHIFT 8
4420 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4421 #define NET_SKB_PAD 32
4422
4423 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4424 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
4425 index 547f1a6..0b22b53 100644
4426 --- a/arch/s390/include/asm/elf.h
4427 +++ b/arch/s390/include/asm/elf.h
4428 @@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
4429 the loader. We need to make sure that it is out of the way of the program
4430 that it will "exec", and that there is sufficient room for the brk. */
4431
4432 -extern unsigned long randomize_et_dyn(unsigned long base);
4433 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
4434 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
4435 +
4436 +#ifdef CONFIG_PAX_ASLR
4437 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
4438 +
4439 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4440 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4441 +#endif
4442
4443 /* This yields a mask that user programs can use to figure out what
4444 instruction set this CPU supports. */
4445 @@ -211,7 +217,4 @@ struct linux_binprm;
4446 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
4447 int arch_setup_additional_pages(struct linux_binprm *, int);
4448
4449 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
4450 -#define arch_randomize_brk arch_randomize_brk
4451 -
4452 #endif
4453 diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
4454 index d73cc6b..1a296ad 100644
4455 --- a/arch/s390/include/asm/system.h
4456 +++ b/arch/s390/include/asm/system.h
4457 @@ -260,7 +260,7 @@ extern void (*_machine_restart)(char *command);
4458 extern void (*_machine_halt)(void);
4459 extern void (*_machine_power_off)(void);
4460
4461 -extern unsigned long arch_align_stack(unsigned long sp);
4462 +#define arch_align_stack(x) ((x) & ~0xfUL)
4463
4464 static inline int tprot(unsigned long addr)
4465 {
4466 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
4467 index 2b23885..e136e31 100644
4468 --- a/arch/s390/include/asm/uaccess.h
4469 +++ b/arch/s390/include/asm/uaccess.h
4470 @@ -235,6 +235,10 @@ static inline unsigned long __must_check
4471 copy_to_user(void __user *to, const void *from, unsigned long n)
4472 {
4473 might_fault();
4474 +
4475 + if ((long)n < 0)
4476 + return n;
4477 +
4478 if (access_ok(VERIFY_WRITE, to, n))
4479 n = __copy_to_user(to, from, n);
4480 return n;
4481 @@ -260,6 +264,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
4482 static inline unsigned long __must_check
4483 __copy_from_user(void *to, const void __user *from, unsigned long n)
4484 {
4485 + if ((long)n < 0)
4486 + return n;
4487 +
4488 if (__builtin_constant_p(n) && (n <= 256))
4489 return uaccess.copy_from_user_small(n, from, to);
4490 else
4491 @@ -294,6 +301,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
4492 unsigned int sz = __compiletime_object_size(to);
4493
4494 might_fault();
4495 +
4496 + if ((long)n < 0)
4497 + return n;
4498 +
4499 if (unlikely(sz != -1 && sz < n)) {
4500 copy_from_user_overflow();
4501 return n;
4502 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
4503 index dfcb343..eda788a 100644
4504 --- a/arch/s390/kernel/module.c
4505 +++ b/arch/s390/kernel/module.c
4506 @@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
4507
4508 /* Increase core size by size of got & plt and set start
4509 offsets for got and plt. */
4510 - me->core_size = ALIGN(me->core_size, 4);
4511 - me->arch.got_offset = me->core_size;
4512 - me->core_size += me->arch.got_size;
4513 - me->arch.plt_offset = me->core_size;
4514 - me->core_size += me->arch.plt_size;
4515 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
4516 + me->arch.got_offset = me->core_size_rw;
4517 + me->core_size_rw += me->arch.got_size;
4518 + me->arch.plt_offset = me->core_size_rx;
4519 + me->core_size_rx += me->arch.plt_size;
4520 return 0;
4521 }
4522
4523 @@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4524 if (info->got_initialized == 0) {
4525 Elf_Addr *gotent;
4526
4527 - gotent = me->module_core + me->arch.got_offset +
4528 + gotent = me->module_core_rw + me->arch.got_offset +
4529 info->got_offset;
4530 *gotent = val;
4531 info->got_initialized = 1;
4532 @@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4533 else if (r_type == R_390_GOTENT ||
4534 r_type == R_390_GOTPLTENT)
4535 *(unsigned int *) loc =
4536 - (val + (Elf_Addr) me->module_core - loc) >> 1;
4537 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
4538 else if (r_type == R_390_GOT64 ||
4539 r_type == R_390_GOTPLT64)
4540 *(unsigned long *) loc = val;
4541 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4542 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
4543 if (info->plt_initialized == 0) {
4544 unsigned int *ip;
4545 - ip = me->module_core + me->arch.plt_offset +
4546 + ip = me->module_core_rx + me->arch.plt_offset +
4547 info->plt_offset;
4548 #ifndef CONFIG_64BIT
4549 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
4550 @@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4551 val - loc + 0xffffUL < 0x1ffffeUL) ||
4552 (r_type == R_390_PLT32DBL &&
4553 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
4554 - val = (Elf_Addr) me->module_core +
4555 + val = (Elf_Addr) me->module_core_rx +
4556 me->arch.plt_offset +
4557 info->plt_offset;
4558 val += rela->r_addend - loc;
4559 @@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4560 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
4561 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
4562 val = val + rela->r_addend -
4563 - ((Elf_Addr) me->module_core + me->arch.got_offset);
4564 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
4565 if (r_type == R_390_GOTOFF16)
4566 *(unsigned short *) loc = val;
4567 else if (r_type == R_390_GOTOFF32)
4568 @@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4569 break;
4570 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
4571 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
4572 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
4573 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
4574 rela->r_addend - loc;
4575 if (r_type == R_390_GOTPC)
4576 *(unsigned int *) loc = val;
4577 diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
4578 index e795933..b32563c 100644
4579 --- a/arch/s390/kernel/process.c
4580 +++ b/arch/s390/kernel/process.c
4581 @@ -323,39 +323,3 @@ unsigned long get_wchan(struct task_struct *p)
4582 }
4583 return 0;
4584 }
4585 -
4586 -unsigned long arch_align_stack(unsigned long sp)
4587 -{
4588 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4589 - sp -= get_random_int() & ~PAGE_MASK;
4590 - return sp & ~0xf;
4591 -}
4592 -
4593 -static inline unsigned long brk_rnd(void)
4594 -{
4595 - /* 8MB for 32bit, 1GB for 64bit */
4596 - if (is_32bit_task())
4597 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
4598 - else
4599 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
4600 -}
4601 -
4602 -unsigned long arch_randomize_brk(struct mm_struct *mm)
4603 -{
4604 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
4605 -
4606 - if (ret < mm->brk)
4607 - return mm->brk;
4608 - return ret;
4609 -}
4610 -
4611 -unsigned long randomize_et_dyn(unsigned long base)
4612 -{
4613 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4614 -
4615 - if (!(current->flags & PF_RANDOMIZE))
4616 - return base;
4617 - if (ret < base)
4618 - return base;
4619 - return ret;
4620 -}
4621 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4622 index a0155c0..34cc491 100644
4623 --- a/arch/s390/mm/mmap.c
4624 +++ b/arch/s390/mm/mmap.c
4625 @@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4626 */
4627 if (mmap_is_legacy()) {
4628 mm->mmap_base = TASK_UNMAPPED_BASE;
4629 +
4630 +#ifdef CONFIG_PAX_RANDMMAP
4631 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4632 + mm->mmap_base += mm->delta_mmap;
4633 +#endif
4634 +
4635 mm->get_unmapped_area = arch_get_unmapped_area;
4636 mm->unmap_area = arch_unmap_area;
4637 } else {
4638 mm->mmap_base = mmap_base();
4639 +
4640 +#ifdef CONFIG_PAX_RANDMMAP
4641 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4642 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4643 +#endif
4644 +
4645 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4646 mm->unmap_area = arch_unmap_area_topdown;
4647 }
4648 @@ -167,10 +179,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4649 */
4650 if (mmap_is_legacy()) {
4651 mm->mmap_base = TASK_UNMAPPED_BASE;
4652 +
4653 +#ifdef CONFIG_PAX_RANDMMAP
4654 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4655 + mm->mmap_base += mm->delta_mmap;
4656 +#endif
4657 +
4658 mm->get_unmapped_area = s390_get_unmapped_area;
4659 mm->unmap_area = arch_unmap_area;
4660 } else {
4661 mm->mmap_base = mmap_base();
4662 +
4663 +#ifdef CONFIG_PAX_RANDMMAP
4664 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4665 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4666 +#endif
4667 +
4668 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4669 mm->unmap_area = arch_unmap_area_topdown;
4670 }
4671 diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
4672 index ae3d59f..f65f075 100644
4673 --- a/arch/score/include/asm/cache.h
4674 +++ b/arch/score/include/asm/cache.h
4675 @@ -1,7 +1,9 @@
4676 #ifndef _ASM_SCORE_CACHE_H
4677 #define _ASM_SCORE_CACHE_H
4678
4679 +#include <linux/const.h>
4680 +
4681 #define L1_CACHE_SHIFT 4
4682 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4683 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4684
4685 #endif /* _ASM_SCORE_CACHE_H */
4686 diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
4687 index 589d5c7..669e274 100644
4688 --- a/arch/score/include/asm/system.h
4689 +++ b/arch/score/include/asm/system.h
4690 @@ -17,7 +17,7 @@ do { \
4691 #define finish_arch_switch(prev) do {} while (0)
4692
4693 typedef void (*vi_handler_t)(void);
4694 -extern unsigned long arch_align_stack(unsigned long sp);
4695 +#define arch_align_stack(x) (x)
4696
4697 #define mb() barrier()
4698 #define rmb() barrier()
4699 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
4700 index 25d0803..d6c8e36 100644
4701 --- a/arch/score/kernel/process.c
4702 +++ b/arch/score/kernel/process.c
4703 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
4704
4705 return task_pt_regs(task)->cp0_epc;
4706 }
4707 -
4708 -unsigned long arch_align_stack(unsigned long sp)
4709 -{
4710 - return sp;
4711 -}
4712 diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
4713 index ef9e555..331bd29 100644
4714 --- a/arch/sh/include/asm/cache.h
4715 +++ b/arch/sh/include/asm/cache.h
4716 @@ -9,10 +9,11 @@
4717 #define __ASM_SH_CACHE_H
4718 #ifdef __KERNEL__
4719
4720 +#include <linux/const.h>
4721 #include <linux/init.h>
4722 #include <cpu/cache.h>
4723
4724 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4725 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4726
4727 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4728
4729 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
4730 index afeb710..d1d1289 100644
4731 --- a/arch/sh/mm/mmap.c
4732 +++ b/arch/sh/mm/mmap.c
4733 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
4734 addr = PAGE_ALIGN(addr);
4735
4736 vma = find_vma(mm, addr);
4737 - if (TASK_SIZE - len >= addr &&
4738 - (!vma || addr + len <= vma->vm_start))
4739 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4740 return addr;
4741 }
4742
4743 @@ -106,7 +105,7 @@ full_search:
4744 }
4745 return -ENOMEM;
4746 }
4747 - if (likely(!vma || addr + len <= vma->vm_start)) {
4748 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4749 /*
4750 * Remember the place where we stopped the search:
4751 */
4752 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4753 addr = PAGE_ALIGN(addr);
4754
4755 vma = find_vma(mm, addr);
4756 - if (TASK_SIZE - len >= addr &&
4757 - (!vma || addr + len <= vma->vm_start))
4758 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4759 return addr;
4760 }
4761
4762 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4763 /* make sure it can fit in the remaining address space */
4764 if (likely(addr > len)) {
4765 vma = find_vma(mm, addr-len);
4766 - if (!vma || addr <= vma->vm_start) {
4767 + if (check_heap_stack_gap(vma, addr - len, len)) {
4768 /* remember the address as a hint for next time */
4769 return (mm->free_area_cache = addr-len);
4770 }
4771 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4772 if (unlikely(mm->mmap_base < len))
4773 goto bottomup;
4774
4775 - addr = mm->mmap_base-len;
4776 - if (do_colour_align)
4777 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4778 + addr = mm->mmap_base - len;
4779
4780 do {
4781 + if (do_colour_align)
4782 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4783 /*
4784 * Lookup failure means no vma is above this address,
4785 * else if new region fits below vma->vm_start,
4786 * return with success:
4787 */
4788 vma = find_vma(mm, addr);
4789 - if (likely(!vma || addr+len <= vma->vm_start)) {
4790 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4791 /* remember the address as a hint for next time */
4792 return (mm->free_area_cache = addr);
4793 }
4794 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4795 mm->cached_hole_size = vma->vm_start - addr;
4796
4797 /* try just below the current vma->vm_start */
4798 - addr = vma->vm_start-len;
4799 - if (do_colour_align)
4800 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4801 - } while (likely(len < vma->vm_start));
4802 + addr = skip_heap_stack_gap(vma, len);
4803 + } while (!IS_ERR_VALUE(addr));
4804
4805 bottomup:
4806 /*
4807 diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
4808 index eddcfb3..b117d90 100644
4809 --- a/arch/sparc/Makefile
4810 +++ b/arch/sparc/Makefile
4811 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
4812 # Export what is needed by arch/sparc/boot/Makefile
4813 export VMLINUX_INIT VMLINUX_MAIN
4814 VMLINUX_INIT := $(head-y) $(init-y)
4815 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4816 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4817 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4818 VMLINUX_MAIN += $(drivers-y) $(net-y)
4819
4820 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
4821 index 9f421df..b81fc12 100644
4822 --- a/arch/sparc/include/asm/atomic_64.h
4823 +++ b/arch/sparc/include/asm/atomic_64.h
4824 @@ -14,18 +14,40 @@
4825 #define ATOMIC64_INIT(i) { (i) }
4826
4827 #define atomic_read(v) (*(volatile int *)&(v)->counter)
4828 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
4829 +{
4830 + return v->counter;
4831 +}
4832 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
4833 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
4834 +{
4835 + return v->counter;
4836 +}
4837
4838 #define atomic_set(v, i) (((v)->counter) = i)
4839 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
4840 +{
4841 + v->counter = i;
4842 +}
4843 #define atomic64_set(v, i) (((v)->counter) = i)
4844 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
4845 +{
4846 + v->counter = i;
4847 +}
4848
4849 extern void atomic_add(int, atomic_t *);
4850 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
4851 extern void atomic64_add(long, atomic64_t *);
4852 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
4853 extern void atomic_sub(int, atomic_t *);
4854 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
4855 extern void atomic64_sub(long, atomic64_t *);
4856 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
4857
4858 extern int atomic_add_ret(int, atomic_t *);
4859 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
4860 extern long atomic64_add_ret(long, atomic64_t *);
4861 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
4862 extern int atomic_sub_ret(int, atomic_t *);
4863 extern long atomic64_sub_ret(long, atomic64_t *);
4864
4865 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4866 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
4867
4868 #define atomic_inc_return(v) atomic_add_ret(1, v)
4869 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
4870 +{
4871 + return atomic_add_ret_unchecked(1, v);
4872 +}
4873 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
4874 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
4875 +{
4876 + return atomic64_add_ret_unchecked(1, v);
4877 +}
4878
4879 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
4880 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
4881
4882 #define atomic_add_return(i, v) atomic_add_ret(i, v)
4883 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
4884 +{
4885 + return atomic_add_ret_unchecked(i, v);
4886 +}
4887 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
4888 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
4889 +{
4890 + return atomic64_add_ret_unchecked(i, v);
4891 +}
4892
4893 /*
4894 * atomic_inc_and_test - increment and test
4895 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4896 * other cases.
4897 */
4898 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
4899 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
4900 +{
4901 + return atomic_inc_return_unchecked(v) == 0;
4902 +}
4903 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
4904
4905 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
4906 @@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4907 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
4908
4909 #define atomic_inc(v) atomic_add(1, v)
4910 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
4911 +{
4912 + atomic_add_unchecked(1, v);
4913 +}
4914 #define atomic64_inc(v) atomic64_add(1, v)
4915 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
4916 +{
4917 + atomic64_add_unchecked(1, v);
4918 +}
4919
4920 #define atomic_dec(v) atomic_sub(1, v)
4921 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
4922 +{
4923 + atomic_sub_unchecked(1, v);
4924 +}
4925 #define atomic64_dec(v) atomic64_sub(1, v)
4926 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
4927 +{
4928 + atomic64_sub_unchecked(1, v);
4929 +}
4930
4931 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
4932 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
4933
4934 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
4935 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
4936 +{
4937 + return cmpxchg(&v->counter, old, new);
4938 +}
4939 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
4940 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
4941 +{
4942 + return xchg(&v->counter, new);
4943 +}
4944
4945 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
4946 {
4947 - int c, old;
4948 + int c, old, new;
4949 c = atomic_read(v);
4950 for (;;) {
4951 - if (unlikely(c == (u)))
4952 + if (unlikely(c == u))
4953 break;
4954 - old = atomic_cmpxchg((v), c, c + (a));
4955 +
4956 + asm volatile("addcc %2, %0, %0\n"
4957 +
4958 +#ifdef CONFIG_PAX_REFCOUNT
4959 + "tvs %%icc, 6\n"
4960 +#endif
4961 +
4962 + : "=r" (new)
4963 + : "0" (c), "ir" (a)
4964 + : "cc");
4965 +
4966 + old = atomic_cmpxchg(v, c, new);
4967 if (likely(old == c))
4968 break;
4969 c = old;
4970 @@ -89,20 +166,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
4971 #define atomic64_cmpxchg(v, o, n) \
4972 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
4973 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
4974 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
4975 +{
4976 + return xchg(&v->counter, new);
4977 +}
4978
4979 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
4980 {
4981 - long c, old;
4982 + long c, old, new;
4983 c = atomic64_read(v);
4984 for (;;) {
4985 - if (unlikely(c == (u)))
4986 + if (unlikely(c == u))
4987 break;
4988 - old = atomic64_cmpxchg((v), c, c + (a));
4989 +
4990 + asm volatile("addcc %2, %0, %0\n"
4991 +
4992 +#ifdef CONFIG_PAX_REFCOUNT
4993 + "tvs %%xcc, 6\n"
4994 +#endif
4995 +
4996 + : "=r" (new)
4997 + : "0" (c), "ir" (a)
4998 + : "cc");
4999 +
5000 + old = atomic64_cmpxchg(v, c, new);
5001 if (likely(old == c))
5002 break;
5003 c = old;
5004 }
5005 - return c != (u);
5006 + return c != u;
5007 }
5008
5009 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5010 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
5011 index 69358b5..9d0d492 100644
5012 --- a/arch/sparc/include/asm/cache.h
5013 +++ b/arch/sparc/include/asm/cache.h
5014 @@ -7,10 +7,12 @@
5015 #ifndef _SPARC_CACHE_H
5016 #define _SPARC_CACHE_H
5017
5018 +#include <linux/const.h>
5019 +
5020 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
5021
5022 #define L1_CACHE_SHIFT 5
5023 -#define L1_CACHE_BYTES 32
5024 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5025
5026 #ifdef CONFIG_SPARC32
5027 #define SMP_CACHE_BYTES_SHIFT 5
5028 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
5029 index 4269ca6..e3da77f 100644
5030 --- a/arch/sparc/include/asm/elf_32.h
5031 +++ b/arch/sparc/include/asm/elf_32.h
5032 @@ -114,6 +114,13 @@ typedef struct {
5033
5034 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
5035
5036 +#ifdef CONFIG_PAX_ASLR
5037 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
5038 +
5039 +#define PAX_DELTA_MMAP_LEN 16
5040 +#define PAX_DELTA_STACK_LEN 16
5041 +#endif
5042 +
5043 /* This yields a mask that user programs can use to figure out what
5044 instruction set this cpu supports. This can NOT be done in userspace
5045 on Sparc. */
5046 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
5047 index 7df8b7f..4946269 100644
5048 --- a/arch/sparc/include/asm/elf_64.h
5049 +++ b/arch/sparc/include/asm/elf_64.h
5050 @@ -180,6 +180,13 @@ typedef struct {
5051 #define ELF_ET_DYN_BASE 0x0000010000000000UL
5052 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
5053
5054 +#ifdef CONFIG_PAX_ASLR
5055 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
5056 +
5057 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
5058 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
5059 +#endif
5060 +
5061 extern unsigned long sparc64_elf_hwcap;
5062 #define ELF_HWCAP sparc64_elf_hwcap
5063
5064 diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
5065 index ca2b344..c6084f89 100644
5066 --- a/arch/sparc/include/asm/pgalloc_32.h
5067 +++ b/arch/sparc/include/asm/pgalloc_32.h
5068 @@ -37,6 +37,7 @@ BTFIXUPDEF_CALL(void, free_pgd_fast, pgd_t *)
5069 BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *)
5070 #define pgd_set(pgdp,pmdp) BTFIXUP_CALL(pgd_set)(pgdp,pmdp)
5071 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
5072 +#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
5073
5074 BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one, struct mm_struct *, unsigned long)
5075 #define pmd_alloc_one(mm, address) BTFIXUP_CALL(pmd_alloc_one)(mm, address)
5076 diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
5077 index 40b2d7a..22a665b 100644
5078 --- a/arch/sparc/include/asm/pgalloc_64.h
5079 +++ b/arch/sparc/include/asm/pgalloc_64.h
5080 @@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
5081 }
5082
5083 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
5084 +#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
5085
5086 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5087 {
5088 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
5089 index a790cc6..091ed94 100644
5090 --- a/arch/sparc/include/asm/pgtable_32.h
5091 +++ b/arch/sparc/include/asm/pgtable_32.h
5092 @@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
5093 BTFIXUPDEF_INT(page_none)
5094 BTFIXUPDEF_INT(page_copy)
5095 BTFIXUPDEF_INT(page_readonly)
5096 +
5097 +#ifdef CONFIG_PAX_PAGEEXEC
5098 +BTFIXUPDEF_INT(page_shared_noexec)
5099 +BTFIXUPDEF_INT(page_copy_noexec)
5100 +BTFIXUPDEF_INT(page_readonly_noexec)
5101 +#endif
5102 +
5103 BTFIXUPDEF_INT(page_kernel)
5104
5105 #define PMD_SHIFT SUN4C_PMD_SHIFT
5106 @@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
5107 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
5108 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
5109
5110 +#ifdef CONFIG_PAX_PAGEEXEC
5111 +extern pgprot_t PAGE_SHARED_NOEXEC;
5112 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
5113 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
5114 +#else
5115 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
5116 +# define PAGE_COPY_NOEXEC PAGE_COPY
5117 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
5118 +#endif
5119 +
5120 extern unsigned long page_kernel;
5121
5122 #ifdef MODULE
5123 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
5124 index f6ae2b2..b03ffc7 100644
5125 --- a/arch/sparc/include/asm/pgtsrmmu.h
5126 +++ b/arch/sparc/include/asm/pgtsrmmu.h
5127 @@ -115,6 +115,13 @@
5128 SRMMU_EXEC | SRMMU_REF)
5129 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
5130 SRMMU_EXEC | SRMMU_REF)
5131 +
5132 +#ifdef CONFIG_PAX_PAGEEXEC
5133 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
5134 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5135 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5136 +#endif
5137 +
5138 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
5139 SRMMU_DIRTY | SRMMU_REF)
5140
5141 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
5142 index 9689176..63c18ea 100644
5143 --- a/arch/sparc/include/asm/spinlock_64.h
5144 +++ b/arch/sparc/include/asm/spinlock_64.h
5145 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
5146
5147 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
5148
5149 -static void inline arch_read_lock(arch_rwlock_t *lock)
5150 +static inline void arch_read_lock(arch_rwlock_t *lock)
5151 {
5152 unsigned long tmp1, tmp2;
5153
5154 __asm__ __volatile__ (
5155 "1: ldsw [%2], %0\n"
5156 " brlz,pn %0, 2f\n"
5157 -"4: add %0, 1, %1\n"
5158 +"4: addcc %0, 1, %1\n"
5159 +
5160 +#ifdef CONFIG_PAX_REFCOUNT
5161 +" tvs %%icc, 6\n"
5162 +#endif
5163 +
5164 " cas [%2], %0, %1\n"
5165 " cmp %0, %1\n"
5166 " bne,pn %%icc, 1b\n"
5167 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
5168 " .previous"
5169 : "=&r" (tmp1), "=&r" (tmp2)
5170 : "r" (lock)
5171 - : "memory");
5172 + : "memory", "cc");
5173 }
5174
5175 -static int inline arch_read_trylock(arch_rwlock_t *lock)
5176 +static inline int arch_read_trylock(arch_rwlock_t *lock)
5177 {
5178 int tmp1, tmp2;
5179
5180 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5181 "1: ldsw [%2], %0\n"
5182 " brlz,a,pn %0, 2f\n"
5183 " mov 0, %0\n"
5184 -" add %0, 1, %1\n"
5185 +" addcc %0, 1, %1\n"
5186 +
5187 +#ifdef CONFIG_PAX_REFCOUNT
5188 +" tvs %%icc, 6\n"
5189 +#endif
5190 +
5191 " cas [%2], %0, %1\n"
5192 " cmp %0, %1\n"
5193 " bne,pn %%icc, 1b\n"
5194 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5195 return tmp1;
5196 }
5197
5198 -static void inline arch_read_unlock(arch_rwlock_t *lock)
5199 +static inline void arch_read_unlock(arch_rwlock_t *lock)
5200 {
5201 unsigned long tmp1, tmp2;
5202
5203 __asm__ __volatile__(
5204 "1: lduw [%2], %0\n"
5205 -" sub %0, 1, %1\n"
5206 +" subcc %0, 1, %1\n"
5207 +
5208 +#ifdef CONFIG_PAX_REFCOUNT
5209 +" tvs %%icc, 6\n"
5210 +#endif
5211 +
5212 " cas [%2], %0, %1\n"
5213 " cmp %0, %1\n"
5214 " bne,pn %%xcc, 1b\n"
5215 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
5216 : "memory");
5217 }
5218
5219 -static void inline arch_write_lock(arch_rwlock_t *lock)
5220 +static inline void arch_write_lock(arch_rwlock_t *lock)
5221 {
5222 unsigned long mask, tmp1, tmp2;
5223
5224 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
5225 : "memory");
5226 }
5227
5228 -static void inline arch_write_unlock(arch_rwlock_t *lock)
5229 +static inline void arch_write_unlock(arch_rwlock_t *lock)
5230 {
5231 __asm__ __volatile__(
5232 " stw %%g0, [%0]"
5233 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
5234 : "memory");
5235 }
5236
5237 -static int inline arch_write_trylock(arch_rwlock_t *lock)
5238 +static inline int arch_write_trylock(arch_rwlock_t *lock)
5239 {
5240 unsigned long mask, tmp1, tmp2, result;
5241
5242 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
5243 index c2a1080..21ed218 100644
5244 --- a/arch/sparc/include/asm/thread_info_32.h
5245 +++ b/arch/sparc/include/asm/thread_info_32.h
5246 @@ -50,6 +50,8 @@ struct thread_info {
5247 unsigned long w_saved;
5248
5249 struct restart_block restart_block;
5250 +
5251 + unsigned long lowest_stack;
5252 };
5253
5254 /*
5255 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
5256 index 01d057f..0a02f7e 100644
5257 --- a/arch/sparc/include/asm/thread_info_64.h
5258 +++ b/arch/sparc/include/asm/thread_info_64.h
5259 @@ -63,6 +63,8 @@ struct thread_info {
5260 struct pt_regs *kern_una_regs;
5261 unsigned int kern_una_insn;
5262
5263 + unsigned long lowest_stack;
5264 +
5265 unsigned long fpregs[0] __attribute__ ((aligned(64)));
5266 };
5267
5268 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
5269 index e88fbe5..96b0ce5 100644
5270 --- a/arch/sparc/include/asm/uaccess.h
5271 +++ b/arch/sparc/include/asm/uaccess.h
5272 @@ -1,5 +1,13 @@
5273 #ifndef ___ASM_SPARC_UACCESS_H
5274 #define ___ASM_SPARC_UACCESS_H
5275 +
5276 +#ifdef __KERNEL__
5277 +#ifndef __ASSEMBLY__
5278 +#include <linux/types.h>
5279 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
5280 +#endif
5281 +#endif
5282 +
5283 #if defined(__sparc__) && defined(__arch64__)
5284 #include <asm/uaccess_64.h>
5285 #else
5286 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
5287 index 8303ac4..07f333d 100644
5288 --- a/arch/sparc/include/asm/uaccess_32.h
5289 +++ b/arch/sparc/include/asm/uaccess_32.h
5290 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
5291
5292 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
5293 {
5294 - if (n && __access_ok((unsigned long) to, n))
5295 + if ((long)n < 0)
5296 + return n;
5297 +
5298 + if (n && __access_ok((unsigned long) to, n)) {
5299 + if (!__builtin_constant_p(n))
5300 + check_object_size(from, n, true);
5301 return __copy_user(to, (__force void __user *) from, n);
5302 - else
5303 + } else
5304 return n;
5305 }
5306
5307 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
5308 {
5309 + if ((long)n < 0)
5310 + return n;
5311 +
5312 + if (!__builtin_constant_p(n))
5313 + check_object_size(from, n, true);
5314 +
5315 return __copy_user(to, (__force void __user *) from, n);
5316 }
5317
5318 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
5319 {
5320 - if (n && __access_ok((unsigned long) from, n))
5321 + if ((long)n < 0)
5322 + return n;
5323 +
5324 + if (n && __access_ok((unsigned long) from, n)) {
5325 + if (!__builtin_constant_p(n))
5326 + check_object_size(to, n, false);
5327 return __copy_user((__force void __user *) to, from, n);
5328 - else
5329 + } else
5330 return n;
5331 }
5332
5333 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
5334 {
5335 + if ((long)n < 0)
5336 + return n;
5337 +
5338 return __copy_user((__force void __user *) to, from, n);
5339 }
5340
5341 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
5342 index 3e1449f..5293a0e 100644
5343 --- a/arch/sparc/include/asm/uaccess_64.h
5344 +++ b/arch/sparc/include/asm/uaccess_64.h
5345 @@ -10,6 +10,7 @@
5346 #include <linux/compiler.h>
5347 #include <linux/string.h>
5348 #include <linux/thread_info.h>
5349 +#include <linux/kernel.h>
5350 #include <asm/asi.h>
5351 #include <asm/system.h>
5352 #include <asm/spitfire.h>
5353 @@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
5354 static inline unsigned long __must_check
5355 copy_from_user(void *to, const void __user *from, unsigned long size)
5356 {
5357 - unsigned long ret = ___copy_from_user(to, from, size);
5358 + unsigned long ret;
5359
5360 + if ((long)size < 0 || size > INT_MAX)
5361 + return size;
5362 +
5363 + if (!__builtin_constant_p(size))
5364 + check_object_size(to, size, false);
5365 +
5366 + ret = ___copy_from_user(to, from, size);
5367 if (unlikely(ret))
5368 ret = copy_from_user_fixup(to, from, size);
5369
5370 @@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
5371 static inline unsigned long __must_check
5372 copy_to_user(void __user *to, const void *from, unsigned long size)
5373 {
5374 - unsigned long ret = ___copy_to_user(to, from, size);
5375 + unsigned long ret;
5376
5377 + if ((long)size < 0 || size > INT_MAX)
5378 + return size;
5379 +
5380 + if (!__builtin_constant_p(size))
5381 + check_object_size(from, size, true);
5382 +
5383 + ret = ___copy_to_user(to, from, size);
5384 if (unlikely(ret))
5385 ret = copy_to_user_fixup(to, from, size);
5386 return ret;
5387 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
5388 index cb85458..e063f17 100644
5389 --- a/arch/sparc/kernel/Makefile
5390 +++ b/arch/sparc/kernel/Makefile
5391 @@ -3,7 +3,7 @@
5392 #
5393
5394 asflags-y := -ansi
5395 -ccflags-y := -Werror
5396 +#ccflags-y := -Werror
5397
5398 extra-y := head_$(BITS).o
5399 extra-y += init_task.o
5400 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
5401 index f793742..4d880af 100644
5402 --- a/arch/sparc/kernel/process_32.c
5403 +++ b/arch/sparc/kernel/process_32.c
5404 @@ -204,7 +204,7 @@ void __show_backtrace(unsigned long fp)
5405 rw->ins[4], rw->ins[5],
5406 rw->ins[6],
5407 rw->ins[7]);
5408 - printk("%pS\n", (void *) rw->ins[7]);
5409 + printk("%pA\n", (void *) rw->ins[7]);
5410 rw = (struct reg_window32 *) rw->ins[6];
5411 }
5412 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
5413 @@ -271,14 +271,14 @@ void show_regs(struct pt_regs *r)
5414
5415 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
5416 r->psr, r->pc, r->npc, r->y, print_tainted());
5417 - printk("PC: <%pS>\n", (void *) r->pc);
5418 + printk("PC: <%pA>\n", (void *) r->pc);
5419 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5420 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
5421 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
5422 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5423 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
5424 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
5425 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
5426 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
5427
5428 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5429 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
5430 @@ -313,7 +313,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5431 rw = (struct reg_window32 *) fp;
5432 pc = rw->ins[7];
5433 printk("[%08lx : ", pc);
5434 - printk("%pS ] ", (void *) pc);
5435 + printk("%pA ] ", (void *) pc);
5436 fp = rw->ins[6];
5437 } while (++count < 16);
5438 printk("\n");
5439 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
5440 index 39d8b05..d1a7d90 100644
5441 --- a/arch/sparc/kernel/process_64.c
5442 +++ b/arch/sparc/kernel/process_64.c
5443 @@ -182,14 +182,14 @@ static void show_regwindow(struct pt_regs *regs)
5444 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
5445 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
5446 if (regs->tstate & TSTATE_PRIV)
5447 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
5448 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
5449 }
5450
5451 void show_regs(struct pt_regs *regs)
5452 {
5453 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5454 regs->tpc, regs->tnpc, regs->y, print_tainted());
5455 - printk("TPC: <%pS>\n", (void *) regs->tpc);
5456 + printk("TPC: <%pA>\n", (void *) regs->tpc);
5457 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5458 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5459 regs->u_regs[3]);
5460 @@ -202,7 +202,7 @@ void show_regs(struct pt_regs *regs)
5461 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5462 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5463 regs->u_regs[15]);
5464 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5465 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5466 show_regwindow(regs);
5467 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
5468 }
5469 @@ -287,7 +287,7 @@ void arch_trigger_all_cpu_backtrace(void)
5470 ((tp && tp->task) ? tp->task->pid : -1));
5471
5472 if (gp->tstate & TSTATE_PRIV) {
5473 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5474 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5475 (void *) gp->tpc,
5476 (void *) gp->o7,
5477 (void *) gp->i7,
5478 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5479 index 42b282f..28ce9f2 100644
5480 --- a/arch/sparc/kernel/sys_sparc_32.c
5481 +++ b/arch/sparc/kernel/sys_sparc_32.c
5482 @@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5483 if (ARCH_SUN4C && len > 0x20000000)
5484 return -ENOMEM;
5485 if (!addr)
5486 - addr = TASK_UNMAPPED_BASE;
5487 + addr = current->mm->mmap_base;
5488
5489 if (flags & MAP_SHARED)
5490 addr = COLOUR_ALIGN(addr);
5491 @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5492 }
5493 if (TASK_SIZE - PAGE_SIZE - len < addr)
5494 return -ENOMEM;
5495 - if (!vmm || addr + len <= vmm->vm_start)
5496 + if (check_heap_stack_gap(vmm, addr, len))
5497 return addr;
5498 addr = vmm->vm_end;
5499 if (flags & MAP_SHARED)
5500 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5501 index 232df99..cee1f9c 100644
5502 --- a/arch/sparc/kernel/sys_sparc_64.c
5503 +++ b/arch/sparc/kernel/sys_sparc_64.c
5504 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5505 /* We do not accept a shared mapping if it would violate
5506 * cache aliasing constraints.
5507 */
5508 - if ((flags & MAP_SHARED) &&
5509 + if ((filp || (flags & MAP_SHARED)) &&
5510 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5511 return -EINVAL;
5512 return addr;
5513 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5514 if (filp || (flags & MAP_SHARED))
5515 do_color_align = 1;
5516
5517 +#ifdef CONFIG_PAX_RANDMMAP
5518 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5519 +#endif
5520 +
5521 if (addr) {
5522 if (do_color_align)
5523 addr = COLOUR_ALIGN(addr, pgoff);
5524 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5525 addr = PAGE_ALIGN(addr);
5526
5527 vma = find_vma(mm, addr);
5528 - if (task_size - len >= addr &&
5529 - (!vma || addr + len <= vma->vm_start))
5530 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5531 return addr;
5532 }
5533
5534 if (len > mm->cached_hole_size) {
5535 - start_addr = addr = mm->free_area_cache;
5536 + start_addr = addr = mm->free_area_cache;
5537 } else {
5538 - start_addr = addr = TASK_UNMAPPED_BASE;
5539 + start_addr = addr = mm->mmap_base;
5540 mm->cached_hole_size = 0;
5541 }
5542
5543 @@ -174,14 +177,14 @@ full_search:
5544 vma = find_vma(mm, VA_EXCLUDE_END);
5545 }
5546 if (unlikely(task_size < addr)) {
5547 - if (start_addr != TASK_UNMAPPED_BASE) {
5548 - start_addr = addr = TASK_UNMAPPED_BASE;
5549 + if (start_addr != mm->mmap_base) {
5550 + start_addr = addr = mm->mmap_base;
5551 mm->cached_hole_size = 0;
5552 goto full_search;
5553 }
5554 return -ENOMEM;
5555 }
5556 - if (likely(!vma || addr + len <= vma->vm_start)) {
5557 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5558 /*
5559 * Remember the place where we stopped the search:
5560 */
5561 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5562 /* We do not accept a shared mapping if it would violate
5563 * cache aliasing constraints.
5564 */
5565 - if ((flags & MAP_SHARED) &&
5566 + if ((filp || (flags & MAP_SHARED)) &&
5567 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5568 return -EINVAL;
5569 return addr;
5570 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5571 addr = PAGE_ALIGN(addr);
5572
5573 vma = find_vma(mm, addr);
5574 - if (task_size - len >= addr &&
5575 - (!vma || addr + len <= vma->vm_start))
5576 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5577 return addr;
5578 }
5579
5580 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5581 /* make sure it can fit in the remaining address space */
5582 if (likely(addr > len)) {
5583 vma = find_vma(mm, addr-len);
5584 - if (!vma || addr <= vma->vm_start) {
5585 + if (check_heap_stack_gap(vma, addr - len, len)) {
5586 /* remember the address as a hint for next time */
5587 return (mm->free_area_cache = addr-len);
5588 }
5589 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5590 if (unlikely(mm->mmap_base < len))
5591 goto bottomup;
5592
5593 - addr = mm->mmap_base-len;
5594 - if (do_color_align)
5595 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5596 + addr = mm->mmap_base - len;
5597
5598 do {
5599 + if (do_color_align)
5600 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5601 /*
5602 * Lookup failure means no vma is above this address,
5603 * else if new region fits below vma->vm_start,
5604 * return with success:
5605 */
5606 vma = find_vma(mm, addr);
5607 - if (likely(!vma || addr+len <= vma->vm_start)) {
5608 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5609 /* remember the address as a hint for next time */
5610 return (mm->free_area_cache = addr);
5611 }
5612 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5613 mm->cached_hole_size = vma->vm_start - addr;
5614
5615 /* try just below the current vma->vm_start */
5616 - addr = vma->vm_start-len;
5617 - if (do_color_align)
5618 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5619 - } while (likely(len < vma->vm_start));
5620 + addr = skip_heap_stack_gap(vma, len);
5621 + } while (!IS_ERR_VALUE(addr));
5622
5623 bottomup:
5624 /*
5625 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5626 gap == RLIM_INFINITY ||
5627 sysctl_legacy_va_layout) {
5628 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5629 +
5630 +#ifdef CONFIG_PAX_RANDMMAP
5631 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5632 + mm->mmap_base += mm->delta_mmap;
5633 +#endif
5634 +
5635 mm->get_unmapped_area = arch_get_unmapped_area;
5636 mm->unmap_area = arch_unmap_area;
5637 } else {
5638 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5639 gap = (task_size / 6 * 5);
5640
5641 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
5642 +
5643 +#ifdef CONFIG_PAX_RANDMMAP
5644 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5645 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5646 +#endif
5647 +
5648 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5649 mm->unmap_area = arch_unmap_area_topdown;
5650 }
5651 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
5652 index 591f20c..0f1b925 100644
5653 --- a/arch/sparc/kernel/traps_32.c
5654 +++ b/arch/sparc/kernel/traps_32.c
5655 @@ -45,6 +45,8 @@ static void instruction_dump(unsigned long *pc)
5656 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
5657 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
5658
5659 +extern void gr_handle_kernel_exploit(void);
5660 +
5661 void die_if_kernel(char *str, struct pt_regs *regs)
5662 {
5663 static int die_counter;
5664 @@ -77,15 +79,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5665 count++ < 30 &&
5666 (((unsigned long) rw) >= PAGE_OFFSET) &&
5667 !(((unsigned long) rw) & 0x7)) {
5668 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
5669 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
5670 (void *) rw->ins[7]);
5671 rw = (struct reg_window32 *)rw->ins[6];
5672 }
5673 }
5674 printk("Instruction DUMP:");
5675 instruction_dump ((unsigned long *) regs->pc);
5676 - if(regs->psr & PSR_PS)
5677 + if(regs->psr & PSR_PS) {
5678 + gr_handle_kernel_exploit();
5679 do_exit(SIGKILL);
5680 + }
5681 do_exit(SIGSEGV);
5682 }
5683
5684 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
5685 index 0cbdaa4..438e4c9 100644
5686 --- a/arch/sparc/kernel/traps_64.c
5687 +++ b/arch/sparc/kernel/traps_64.c
5688 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
5689 i + 1,
5690 p->trapstack[i].tstate, p->trapstack[i].tpc,
5691 p->trapstack[i].tnpc, p->trapstack[i].tt);
5692 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
5693 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
5694 }
5695 }
5696
5697 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
5698
5699 lvl -= 0x100;
5700 if (regs->tstate & TSTATE_PRIV) {
5701 +
5702 +#ifdef CONFIG_PAX_REFCOUNT
5703 + if (lvl == 6)
5704 + pax_report_refcount_overflow(regs);
5705 +#endif
5706 +
5707 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
5708 die_if_kernel(buffer, regs);
5709 }
5710 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
5711 void bad_trap_tl1(struct pt_regs *regs, long lvl)
5712 {
5713 char buffer[32];
5714 -
5715 +
5716 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
5717 0, lvl, SIGTRAP) == NOTIFY_STOP)
5718 return;
5719
5720 +#ifdef CONFIG_PAX_REFCOUNT
5721 + if (lvl == 6)
5722 + pax_report_refcount_overflow(regs);
5723 +#endif
5724 +
5725 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
5726
5727 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
5728 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
5729 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
5730 printk("%s" "ERROR(%d): ",
5731 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
5732 - printk("TPC<%pS>\n", (void *) regs->tpc);
5733 + printk("TPC<%pA>\n", (void *) regs->tpc);
5734 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
5735 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
5736 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
5737 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5738 smp_processor_id(),
5739 (type & 0x1) ? 'I' : 'D',
5740 regs->tpc);
5741 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
5742 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
5743 panic("Irrecoverable Cheetah+ parity error.");
5744 }
5745
5746 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5747 smp_processor_id(),
5748 (type & 0x1) ? 'I' : 'D',
5749 regs->tpc);
5750 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
5751 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
5752 }
5753
5754 struct sun4v_error_entry {
5755 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
5756
5757 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
5758 regs->tpc, tl);
5759 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
5760 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
5761 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5762 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
5763 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
5764 (void *) regs->u_regs[UREG_I7]);
5765 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
5766 "pte[%lx] error[%lx]\n",
5767 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
5768
5769 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
5770 regs->tpc, tl);
5771 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
5772 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
5773 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5774 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
5775 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
5776 (void *) regs->u_regs[UREG_I7]);
5777 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
5778 "pte[%lx] error[%lx]\n",
5779 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5780 fp = (unsigned long)sf->fp + STACK_BIAS;
5781 }
5782
5783 - printk(" [%016lx] %pS\n", pc, (void *) pc);
5784 + printk(" [%016lx] %pA\n", pc, (void *) pc);
5785 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5786 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
5787 int index = tsk->curr_ret_stack;
5788 if (tsk->ret_stack && index >= graph) {
5789 pc = tsk->ret_stack[index - graph].ret;
5790 - printk(" [%016lx] %pS\n", pc, (void *) pc);
5791 + printk(" [%016lx] %pA\n", pc, (void *) pc);
5792 graph++;
5793 }
5794 }
5795 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
5796 return (struct reg_window *) (fp + STACK_BIAS);
5797 }
5798
5799 +extern void gr_handle_kernel_exploit(void);
5800 +
5801 void die_if_kernel(char *str, struct pt_regs *regs)
5802 {
5803 static int die_counter;
5804 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5805 while (rw &&
5806 count++ < 30 &&
5807 kstack_valid(tp, (unsigned long) rw)) {
5808 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
5809 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
5810 (void *) rw->ins[7]);
5811
5812 rw = kernel_stack_up(rw);
5813 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5814 }
5815 user_instruction_dump ((unsigned int __user *) regs->tpc);
5816 }
5817 - if (regs->tstate & TSTATE_PRIV)
5818 + if (regs->tstate & TSTATE_PRIV) {
5819 + gr_handle_kernel_exploit();
5820 do_exit(SIGKILL);
5821 + }
5822 do_exit(SIGSEGV);
5823 }
5824 EXPORT_SYMBOL(die_if_kernel);
5825 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
5826 index 76e4ac1..78f8bb1 100644
5827 --- a/arch/sparc/kernel/unaligned_64.c
5828 +++ b/arch/sparc/kernel/unaligned_64.c
5829 @@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
5830 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
5831
5832 if (__ratelimit(&ratelimit)) {
5833 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
5834 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
5835 regs->tpc, (void *) regs->tpc);
5836 }
5837 }
5838 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
5839 index a3fc437..fea9957 100644
5840 --- a/arch/sparc/lib/Makefile
5841 +++ b/arch/sparc/lib/Makefile
5842 @@ -2,7 +2,7 @@
5843 #
5844
5845 asflags-y := -ansi -DST_DIV0=0x02
5846 -ccflags-y := -Werror
5847 +#ccflags-y := -Werror
5848
5849 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
5850 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
5851 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
5852 index 59186e0..f747d7a 100644
5853 --- a/arch/sparc/lib/atomic_64.S
5854 +++ b/arch/sparc/lib/atomic_64.S
5855 @@ -18,7 +18,12 @@
5856 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5857 BACKOFF_SETUP(%o2)
5858 1: lduw [%o1], %g1
5859 - add %g1, %o0, %g7
5860 + addcc %g1, %o0, %g7
5861 +
5862 +#ifdef CONFIG_PAX_REFCOUNT
5863 + tvs %icc, 6
5864 +#endif
5865 +
5866 cas [%o1], %g1, %g7
5867 cmp %g1, %g7
5868 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
5869 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5870 2: BACKOFF_SPIN(%o2, %o3, 1b)
5871 .size atomic_add, .-atomic_add
5872
5873 + .globl atomic_add_unchecked
5874 + .type atomic_add_unchecked,#function
5875 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5876 + BACKOFF_SETUP(%o2)
5877 +1: lduw [%o1], %g1
5878 + add %g1, %o0, %g7
5879 + cas [%o1], %g1, %g7
5880 + cmp %g1, %g7
5881 + bne,pn %icc, 2f
5882 + nop
5883 + retl
5884 + nop
5885 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5886 + .size atomic_add_unchecked, .-atomic_add_unchecked
5887 +
5888 .globl atomic_sub
5889 .type atomic_sub,#function
5890 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5891 BACKOFF_SETUP(%o2)
5892 1: lduw [%o1], %g1
5893 - sub %g1, %o0, %g7
5894 + subcc %g1, %o0, %g7
5895 +
5896 +#ifdef CONFIG_PAX_REFCOUNT
5897 + tvs %icc, 6
5898 +#endif
5899 +
5900 cas [%o1], %g1, %g7
5901 cmp %g1, %g7
5902 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
5903 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5904 2: BACKOFF_SPIN(%o2, %o3, 1b)
5905 .size atomic_sub, .-atomic_sub
5906
5907 + .globl atomic_sub_unchecked
5908 + .type atomic_sub_unchecked,#function
5909 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5910 + BACKOFF_SETUP(%o2)
5911 +1: lduw [%o1], %g1
5912 + sub %g1, %o0, %g7
5913 + cas [%o1], %g1, %g7
5914 + cmp %g1, %g7
5915 + bne,pn %icc, 2f
5916 + nop
5917 + retl
5918 + nop
5919 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5920 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
5921 +
5922 .globl atomic_add_ret
5923 .type atomic_add_ret,#function
5924 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5925 BACKOFF_SETUP(%o2)
5926 1: lduw [%o1], %g1
5927 - add %g1, %o0, %g7
5928 + addcc %g1, %o0, %g7
5929 +
5930 +#ifdef CONFIG_PAX_REFCOUNT
5931 + tvs %icc, 6
5932 +#endif
5933 +
5934 cas [%o1], %g1, %g7
5935 cmp %g1, %g7
5936 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
5937 @@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5938 2: BACKOFF_SPIN(%o2, %o3, 1b)
5939 .size atomic_add_ret, .-atomic_add_ret
5940
5941 + .globl atomic_add_ret_unchecked
5942 + .type atomic_add_ret_unchecked,#function
5943 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5944 + BACKOFF_SETUP(%o2)
5945 +1: lduw [%o1], %g1
5946 + addcc %g1, %o0, %g7
5947 + cas [%o1], %g1, %g7
5948 + cmp %g1, %g7
5949 + bne,pn %icc, 2f
5950 + add %g7, %o0, %g7
5951 + sra %g7, 0, %o0
5952 + retl
5953 + nop
5954 +2: BACKOFF_SPIN(%o2, %o3, 1b)
5955 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
5956 +
5957 .globl atomic_sub_ret
5958 .type atomic_sub_ret,#function
5959 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5960 BACKOFF_SETUP(%o2)
5961 1: lduw [%o1], %g1
5962 - sub %g1, %o0, %g7
5963 + subcc %g1, %o0, %g7
5964 +
5965 +#ifdef CONFIG_PAX_REFCOUNT
5966 + tvs %icc, 6
5967 +#endif
5968 +
5969 cas [%o1], %g1, %g7
5970 cmp %g1, %g7
5971 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
5972 @@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5973 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5974 BACKOFF_SETUP(%o2)
5975 1: ldx [%o1], %g1
5976 - add %g1, %o0, %g7
5977 + addcc %g1, %o0, %g7
5978 +
5979 +#ifdef CONFIG_PAX_REFCOUNT
5980 + tvs %xcc, 6
5981 +#endif
5982 +
5983 casx [%o1], %g1, %g7
5984 cmp %g1, %g7
5985 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
5986 @@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5987 2: BACKOFF_SPIN(%o2, %o3, 1b)
5988 .size atomic64_add, .-atomic64_add
5989
5990 + .globl atomic64_add_unchecked
5991 + .type atomic64_add_unchecked,#function
5992 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5993 + BACKOFF_SETUP(%o2)
5994 +1: ldx [%o1], %g1
5995 + addcc %g1, %o0, %g7
5996 + casx [%o1], %g1, %g7
5997 + cmp %g1, %g7
5998 + bne,pn %xcc, 2f
5999 + nop
6000 + retl
6001 + nop
6002 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6003 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
6004 +
6005 .globl atomic64_sub
6006 .type atomic64_sub,#function
6007 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6008 BACKOFF_SETUP(%o2)
6009 1: ldx [%o1], %g1
6010 - sub %g1, %o0, %g7
6011 + subcc %g1, %o0, %g7
6012 +
6013 +#ifdef CONFIG_PAX_REFCOUNT
6014 + tvs %xcc, 6
6015 +#endif
6016 +
6017 casx [%o1], %g1, %g7
6018 cmp %g1, %g7
6019 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6020 @@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6021 2: BACKOFF_SPIN(%o2, %o3, 1b)
6022 .size atomic64_sub, .-atomic64_sub
6023
6024 + .globl atomic64_sub_unchecked
6025 + .type atomic64_sub_unchecked,#function
6026 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
6027 + BACKOFF_SETUP(%o2)
6028 +1: ldx [%o1], %g1
6029 + subcc %g1, %o0, %g7
6030 + casx [%o1], %g1, %g7
6031 + cmp %g1, %g7
6032 + bne,pn %xcc, 2f
6033 + nop
6034 + retl
6035 + nop
6036 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6037 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
6038 +
6039 .globl atomic64_add_ret
6040 .type atomic64_add_ret,#function
6041 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6042 BACKOFF_SETUP(%o2)
6043 1: ldx [%o1], %g1
6044 - add %g1, %o0, %g7
6045 + addcc %g1, %o0, %g7
6046 +
6047 +#ifdef CONFIG_PAX_REFCOUNT
6048 + tvs %xcc, 6
6049 +#endif
6050 +
6051 casx [%o1], %g1, %g7
6052 cmp %g1, %g7
6053 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6054 @@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6055 2: BACKOFF_SPIN(%o2, %o3, 1b)
6056 .size atomic64_add_ret, .-atomic64_add_ret
6057
6058 + .globl atomic64_add_ret_unchecked
6059 + .type atomic64_add_ret_unchecked,#function
6060 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6061 + BACKOFF_SETUP(%o2)
6062 +1: ldx [%o1], %g1
6063 + addcc %g1, %o0, %g7
6064 + casx [%o1], %g1, %g7
6065 + cmp %g1, %g7
6066 + bne,pn %xcc, 2f
6067 + add %g7, %o0, %g7
6068 + mov %g7, %o0
6069 + retl
6070 + nop
6071 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6072 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
6073 +
6074 .globl atomic64_sub_ret
6075 .type atomic64_sub_ret,#function
6076 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6077 BACKOFF_SETUP(%o2)
6078 1: ldx [%o1], %g1
6079 - sub %g1, %o0, %g7
6080 + subcc %g1, %o0, %g7
6081 +
6082 +#ifdef CONFIG_PAX_REFCOUNT
6083 + tvs %xcc, 6
6084 +#endif
6085 +
6086 casx [%o1], %g1, %g7
6087 cmp %g1, %g7
6088 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6089 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
6090 index f73c224..662af10 100644
6091 --- a/arch/sparc/lib/ksyms.c
6092 +++ b/arch/sparc/lib/ksyms.c
6093 @@ -136,12 +136,18 @@ EXPORT_SYMBOL(__downgrade_write);
6094
6095 /* Atomic counter implementation. */
6096 EXPORT_SYMBOL(atomic_add);
6097 +EXPORT_SYMBOL(atomic_add_unchecked);
6098 EXPORT_SYMBOL(atomic_add_ret);
6099 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
6100 EXPORT_SYMBOL(atomic_sub);
6101 +EXPORT_SYMBOL(atomic_sub_unchecked);
6102 EXPORT_SYMBOL(atomic_sub_ret);
6103 EXPORT_SYMBOL(atomic64_add);
6104 +EXPORT_SYMBOL(atomic64_add_unchecked);
6105 EXPORT_SYMBOL(atomic64_add_ret);
6106 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
6107 EXPORT_SYMBOL(atomic64_sub);
6108 +EXPORT_SYMBOL(atomic64_sub_unchecked);
6109 EXPORT_SYMBOL(atomic64_sub_ret);
6110
6111 /* Atomic bit operations. */
6112 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
6113 index 301421c..e2535d1 100644
6114 --- a/arch/sparc/mm/Makefile
6115 +++ b/arch/sparc/mm/Makefile
6116 @@ -2,7 +2,7 @@
6117 #
6118
6119 asflags-y := -ansi
6120 -ccflags-y := -Werror
6121 +#ccflags-y := -Werror
6122
6123 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
6124 obj-y += fault_$(BITS).o
6125 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
6126 index 8023fd7..c8e89e9 100644
6127 --- a/arch/sparc/mm/fault_32.c
6128 +++ b/arch/sparc/mm/fault_32.c
6129 @@ -21,6 +21,9 @@
6130 #include <linux/perf_event.h>
6131 #include <linux/interrupt.h>
6132 #include <linux/kdebug.h>
6133 +#include <linux/slab.h>
6134 +#include <linux/pagemap.h>
6135 +#include <linux/compiler.h>
6136
6137 #include <asm/system.h>
6138 #include <asm/page.h>
6139 @@ -208,6 +211,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
6140 return safe_compute_effective_address(regs, insn);
6141 }
6142
6143 +#ifdef CONFIG_PAX_PAGEEXEC
6144 +#ifdef CONFIG_PAX_DLRESOLVE
6145 +static void pax_emuplt_close(struct vm_area_struct *vma)
6146 +{
6147 + vma->vm_mm->call_dl_resolve = 0UL;
6148 +}
6149 +
6150 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6151 +{
6152 + unsigned int *kaddr;
6153 +
6154 + vmf->page = alloc_page(GFP_HIGHUSER);
6155 + if (!vmf->page)
6156 + return VM_FAULT_OOM;
6157 +
6158 + kaddr = kmap(vmf->page);
6159 + memset(kaddr, 0, PAGE_SIZE);
6160 + kaddr[0] = 0x9DE3BFA8U; /* save */
6161 + flush_dcache_page(vmf->page);
6162 + kunmap(vmf->page);
6163 + return VM_FAULT_MAJOR;
6164 +}
6165 +
6166 +static const struct vm_operations_struct pax_vm_ops = {
6167 + .close = pax_emuplt_close,
6168 + .fault = pax_emuplt_fault
6169 +};
6170 +
6171 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6172 +{
6173 + int ret;
6174 +
6175 + INIT_LIST_HEAD(&vma->anon_vma_chain);
6176 + vma->vm_mm = current->mm;
6177 + vma->vm_start = addr;
6178 + vma->vm_end = addr + PAGE_SIZE;
6179 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6180 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6181 + vma->vm_ops = &pax_vm_ops;
6182 +
6183 + ret = insert_vm_struct(current->mm, vma);
6184 + if (ret)
6185 + return ret;
6186 +
6187 + ++current->mm->total_vm;
6188 + return 0;
6189 +}
6190 +#endif
6191 +
6192 +/*
6193 + * PaX: decide what to do with offenders (regs->pc = fault address)
6194 + *
6195 + * returns 1 when task should be killed
6196 + * 2 when patched PLT trampoline was detected
6197 + * 3 when unpatched PLT trampoline was detected
6198 + */
6199 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6200 +{
6201 +
6202 +#ifdef CONFIG_PAX_EMUPLT
6203 + int err;
6204 +
6205 + do { /* PaX: patched PLT emulation #1 */
6206 + unsigned int sethi1, sethi2, jmpl;
6207 +
6208 + err = get_user(sethi1, (unsigned int *)regs->pc);
6209 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
6210 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
6211 +
6212 + if (err)
6213 + break;
6214 +
6215 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6216 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6217 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6218 + {
6219 + unsigned int addr;
6220 +
6221 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6222 + addr = regs->u_regs[UREG_G1];
6223 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6224 + regs->pc = addr;
6225 + regs->npc = addr+4;
6226 + return 2;
6227 + }
6228 + } while (0);
6229 +
6230 + { /* PaX: patched PLT emulation #2 */
6231 + unsigned int ba;
6232 +
6233 + err = get_user(ba, (unsigned int *)regs->pc);
6234 +
6235 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6236 + unsigned int addr;
6237 +
6238 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6239 + regs->pc = addr;
6240 + regs->npc = addr+4;
6241 + return 2;
6242 + }
6243 + }
6244 +
6245 + do { /* PaX: patched PLT emulation #3 */
6246 + unsigned int sethi, jmpl, nop;
6247 +
6248 + err = get_user(sethi, (unsigned int *)regs->pc);
6249 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
6250 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6251 +
6252 + if (err)
6253 + break;
6254 +
6255 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6256 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6257 + nop == 0x01000000U)
6258 + {
6259 + unsigned int addr;
6260 +
6261 + addr = (sethi & 0x003FFFFFU) << 10;
6262 + regs->u_regs[UREG_G1] = addr;
6263 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6264 + regs->pc = addr;
6265 + regs->npc = addr+4;
6266 + return 2;
6267 + }
6268 + } while (0);
6269 +
6270 + do { /* PaX: unpatched PLT emulation step 1 */
6271 + unsigned int sethi, ba, nop;
6272 +
6273 + err = get_user(sethi, (unsigned int *)regs->pc);
6274 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
6275 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6276 +
6277 + if (err)
6278 + break;
6279 +
6280 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6281 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6282 + nop == 0x01000000U)
6283 + {
6284 + unsigned int addr, save, call;
6285 +
6286 + if ((ba & 0xFFC00000U) == 0x30800000U)
6287 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6288 + else
6289 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
6290 +
6291 + err = get_user(save, (unsigned int *)addr);
6292 + err |= get_user(call, (unsigned int *)(addr+4));
6293 + err |= get_user(nop, (unsigned int *)(addr+8));
6294 + if (err)
6295 + break;
6296 +
6297 +#ifdef CONFIG_PAX_DLRESOLVE
6298 + if (save == 0x9DE3BFA8U &&
6299 + (call & 0xC0000000U) == 0x40000000U &&
6300 + nop == 0x01000000U)
6301 + {
6302 + struct vm_area_struct *vma;
6303 + unsigned long call_dl_resolve;
6304 +
6305 + down_read(&current->mm->mmap_sem);
6306 + call_dl_resolve = current->mm->call_dl_resolve;
6307 + up_read(&current->mm->mmap_sem);
6308 + if (likely(call_dl_resolve))
6309 + goto emulate;
6310 +
6311 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6312 +
6313 + down_write(&current->mm->mmap_sem);
6314 + if (current->mm->call_dl_resolve) {
6315 + call_dl_resolve = current->mm->call_dl_resolve;
6316 + up_write(&current->mm->mmap_sem);
6317 + if (vma)
6318 + kmem_cache_free(vm_area_cachep, vma);
6319 + goto emulate;
6320 + }
6321 +
6322 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6323 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6324 + up_write(&current->mm->mmap_sem);
6325 + if (vma)
6326 + kmem_cache_free(vm_area_cachep, vma);
6327 + return 1;
6328 + }
6329 +
6330 + if (pax_insert_vma(vma, call_dl_resolve)) {
6331 + up_write(&current->mm->mmap_sem);
6332 + kmem_cache_free(vm_area_cachep, vma);
6333 + return 1;
6334 + }
6335 +
6336 + current->mm->call_dl_resolve = call_dl_resolve;
6337 + up_write(&current->mm->mmap_sem);
6338 +
6339 +emulate:
6340 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6341 + regs->pc = call_dl_resolve;
6342 + regs->npc = addr+4;
6343 + return 3;
6344 + }
6345 +#endif
6346 +
6347 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6348 + if ((save & 0xFFC00000U) == 0x05000000U &&
6349 + (call & 0xFFFFE000U) == 0x85C0A000U &&
6350 + nop == 0x01000000U)
6351 + {
6352 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6353 + regs->u_regs[UREG_G2] = addr + 4;
6354 + addr = (save & 0x003FFFFFU) << 10;
6355 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6356 + regs->pc = addr;
6357 + regs->npc = addr+4;
6358 + return 3;
6359 + }
6360 + }
6361 + } while (0);
6362 +
6363 + do { /* PaX: unpatched PLT emulation step 2 */
6364 + unsigned int save, call, nop;
6365 +
6366 + err = get_user(save, (unsigned int *)(regs->pc-4));
6367 + err |= get_user(call, (unsigned int *)regs->pc);
6368 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
6369 + if (err)
6370 + break;
6371 +
6372 + if (save == 0x9DE3BFA8U &&
6373 + (call & 0xC0000000U) == 0x40000000U &&
6374 + nop == 0x01000000U)
6375 + {
6376 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6377 +
6378 + regs->u_regs[UREG_RETPC] = regs->pc;
6379 + regs->pc = dl_resolve;
6380 + regs->npc = dl_resolve+4;
6381 + return 3;
6382 + }
6383 + } while (0);
6384 +#endif
6385 +
6386 + return 1;
6387 +}
6388 +
6389 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6390 +{
6391 + unsigned long i;
6392 +
6393 + printk(KERN_ERR "PAX: bytes at PC: ");
6394 + for (i = 0; i < 8; i++) {
6395 + unsigned int c;
6396 + if (get_user(c, (unsigned int *)pc+i))
6397 + printk(KERN_CONT "???????? ");
6398 + else
6399 + printk(KERN_CONT "%08x ", c);
6400 + }
6401 + printk("\n");
6402 +}
6403 +#endif
6404 +
6405 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
6406 int text_fault)
6407 {
6408 @@ -280,6 +545,24 @@ good_area:
6409 if(!(vma->vm_flags & VM_WRITE))
6410 goto bad_area;
6411 } else {
6412 +
6413 +#ifdef CONFIG_PAX_PAGEEXEC
6414 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6415 + up_read(&mm->mmap_sem);
6416 + switch (pax_handle_fetch_fault(regs)) {
6417 +
6418 +#ifdef CONFIG_PAX_EMUPLT
6419 + case 2:
6420 + case 3:
6421 + return;
6422 +#endif
6423 +
6424 + }
6425 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6426 + do_group_exit(SIGKILL);
6427 + }
6428 +#endif
6429 +
6430 /* Allow reads even for write-only mappings */
6431 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
6432 goto bad_area;
6433 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6434 index 504c062..6fcb9c6 100644
6435 --- a/arch/sparc/mm/fault_64.c
6436 +++ b/arch/sparc/mm/fault_64.c
6437 @@ -21,6 +21,9 @@
6438 #include <linux/kprobes.h>
6439 #include <linux/kdebug.h>
6440 #include <linux/percpu.h>
6441 +#include <linux/slab.h>
6442 +#include <linux/pagemap.h>
6443 +#include <linux/compiler.h>
6444
6445 #include <asm/page.h>
6446 #include <asm/pgtable.h>
6447 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
6448 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6449 regs->tpc);
6450 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6451 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6452 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6453 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6454 dump_stack();
6455 unhandled_fault(regs->tpc, current, regs);
6456 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
6457 show_regs(regs);
6458 }
6459
6460 +#ifdef CONFIG_PAX_PAGEEXEC
6461 +#ifdef CONFIG_PAX_DLRESOLVE
6462 +static void pax_emuplt_close(struct vm_area_struct *vma)
6463 +{
6464 + vma->vm_mm->call_dl_resolve = 0UL;
6465 +}
6466 +
6467 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6468 +{
6469 + unsigned int *kaddr;
6470 +
6471 + vmf->page = alloc_page(GFP_HIGHUSER);
6472 + if (!vmf->page)
6473 + return VM_FAULT_OOM;
6474 +
6475 + kaddr = kmap(vmf->page);
6476 + memset(kaddr, 0, PAGE_SIZE);
6477 + kaddr[0] = 0x9DE3BFA8U; /* save */
6478 + flush_dcache_page(vmf->page);
6479 + kunmap(vmf->page);
6480 + return VM_FAULT_MAJOR;
6481 +}
6482 +
6483 +static const struct vm_operations_struct pax_vm_ops = {
6484 + .close = pax_emuplt_close,
6485 + .fault = pax_emuplt_fault
6486 +};
6487 +
6488 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6489 +{
6490 + int ret;
6491 +
6492 + INIT_LIST_HEAD(&vma->anon_vma_chain);
6493 + vma->vm_mm = current->mm;
6494 + vma->vm_start = addr;
6495 + vma->vm_end = addr + PAGE_SIZE;
6496 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6497 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6498 + vma->vm_ops = &pax_vm_ops;
6499 +
6500 + ret = insert_vm_struct(current->mm, vma);
6501 + if (ret)
6502 + return ret;
6503 +
6504 + ++current->mm->total_vm;
6505 + return 0;
6506 +}
6507 +#endif
6508 +
6509 +/*
6510 + * PaX: decide what to do with offenders (regs->tpc = fault address)
6511 + *
6512 + * returns 1 when task should be killed
6513 + * 2 when patched PLT trampoline was detected
6514 + * 3 when unpatched PLT trampoline was detected
6515 + */
6516 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6517 +{
6518 +
6519 +#ifdef CONFIG_PAX_EMUPLT
6520 + int err;
6521 +
6522 + do { /* PaX: patched PLT emulation #1 */
6523 + unsigned int sethi1, sethi2, jmpl;
6524 +
6525 + err = get_user(sethi1, (unsigned int *)regs->tpc);
6526 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6527 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6528 +
6529 + if (err)
6530 + break;
6531 +
6532 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6533 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6534 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6535 + {
6536 + unsigned long addr;
6537 +
6538 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6539 + addr = regs->u_regs[UREG_G1];
6540 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6541 +
6542 + if (test_thread_flag(TIF_32BIT))
6543 + addr &= 0xFFFFFFFFUL;
6544 +
6545 + regs->tpc = addr;
6546 + regs->tnpc = addr+4;
6547 + return 2;
6548 + }
6549 + } while (0);
6550 +
6551 + { /* PaX: patched PLT emulation #2 */
6552 + unsigned int ba;
6553 +
6554 + err = get_user(ba, (unsigned int *)regs->tpc);
6555 +
6556 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6557 + unsigned long addr;
6558 +
6559 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6560 +
6561 + if (test_thread_flag(TIF_32BIT))
6562 + addr &= 0xFFFFFFFFUL;
6563 +
6564 + regs->tpc = addr;
6565 + regs->tnpc = addr+4;
6566 + return 2;
6567 + }
6568 + }
6569 +
6570 + do { /* PaX: patched PLT emulation #3 */
6571 + unsigned int sethi, jmpl, nop;
6572 +
6573 + err = get_user(sethi, (unsigned int *)regs->tpc);
6574 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
6575 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6576 +
6577 + if (err)
6578 + break;
6579 +
6580 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6581 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6582 + nop == 0x01000000U)
6583 + {
6584 + unsigned long addr;
6585 +
6586 + addr = (sethi & 0x003FFFFFU) << 10;
6587 + regs->u_regs[UREG_G1] = addr;
6588 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6589 +
6590 + if (test_thread_flag(TIF_32BIT))
6591 + addr &= 0xFFFFFFFFUL;
6592 +
6593 + regs->tpc = addr;
6594 + regs->tnpc = addr+4;
6595 + return 2;
6596 + }
6597 + } while (0);
6598 +
6599 + do { /* PaX: patched PLT emulation #4 */
6600 + unsigned int sethi, mov1, call, mov2;
6601 +
6602 + err = get_user(sethi, (unsigned int *)regs->tpc);
6603 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
6604 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
6605 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
6606 +
6607 + if (err)
6608 + break;
6609 +
6610 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6611 + mov1 == 0x8210000FU &&
6612 + (call & 0xC0000000U) == 0x40000000U &&
6613 + mov2 == 0x9E100001U)
6614 + {
6615 + unsigned long addr;
6616 +
6617 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
6618 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6619 +
6620 + if (test_thread_flag(TIF_32BIT))
6621 + addr &= 0xFFFFFFFFUL;
6622 +
6623 + regs->tpc = addr;
6624 + regs->tnpc = addr+4;
6625 + return 2;
6626 + }
6627 + } while (0);
6628 +
6629 + do { /* PaX: patched PLT emulation #5 */
6630 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
6631 +
6632 + err = get_user(sethi, (unsigned int *)regs->tpc);
6633 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6634 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6635 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
6636 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
6637 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
6638 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
6639 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
6640 +
6641 + if (err)
6642 + break;
6643 +
6644 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6645 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
6646 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6647 + (or1 & 0xFFFFE000U) == 0x82106000U &&
6648 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
6649 + sllx == 0x83287020U &&
6650 + jmpl == 0x81C04005U &&
6651 + nop == 0x01000000U)
6652 + {
6653 + unsigned long addr;
6654 +
6655 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6656 + regs->u_regs[UREG_G1] <<= 32;
6657 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6658 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6659 + regs->tpc = addr;
6660 + regs->tnpc = addr+4;
6661 + return 2;
6662 + }
6663 + } while (0);
6664 +
6665 + do { /* PaX: patched PLT emulation #6 */
6666 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
6667 +
6668 + err = get_user(sethi, (unsigned int *)regs->tpc);
6669 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6670 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6671 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
6672 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
6673 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
6674 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
6675 +
6676 + if (err)
6677 + break;
6678 +
6679 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6680 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
6681 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6682 + sllx == 0x83287020U &&
6683 + (or & 0xFFFFE000U) == 0x8A116000U &&
6684 + jmpl == 0x81C04005U &&
6685 + nop == 0x01000000U)
6686 + {
6687 + unsigned long addr;
6688 +
6689 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
6690 + regs->u_regs[UREG_G1] <<= 32;
6691 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
6692 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6693 + regs->tpc = addr;
6694 + regs->tnpc = addr+4;
6695 + return 2;
6696 + }
6697 + } while (0);
6698 +
6699 + do { /* PaX: unpatched PLT emulation step 1 */
6700 + unsigned int sethi, ba, nop;
6701 +
6702 + err = get_user(sethi, (unsigned int *)regs->tpc);
6703 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6704 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6705 +
6706 + if (err)
6707 + break;
6708 +
6709 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6710 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6711 + nop == 0x01000000U)
6712 + {
6713 + unsigned long addr;
6714 + unsigned int save, call;
6715 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
6716 +
6717 + if ((ba & 0xFFC00000U) == 0x30800000U)
6718 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6719 + else
6720 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6721 +
6722 + if (test_thread_flag(TIF_32BIT))
6723 + addr &= 0xFFFFFFFFUL;
6724 +
6725 + err = get_user(save, (unsigned int *)addr);
6726 + err |= get_user(call, (unsigned int *)(addr+4));
6727 + err |= get_user(nop, (unsigned int *)(addr+8));
6728 + if (err)
6729 + break;
6730 +
6731 +#ifdef CONFIG_PAX_DLRESOLVE
6732 + if (save == 0x9DE3BFA8U &&
6733 + (call & 0xC0000000U) == 0x40000000U &&
6734 + nop == 0x01000000U)
6735 + {
6736 + struct vm_area_struct *vma;
6737 + unsigned long call_dl_resolve;
6738 +
6739 + down_read(&current->mm->mmap_sem);
6740 + call_dl_resolve = current->mm->call_dl_resolve;
6741 + up_read(&current->mm->mmap_sem);
6742 + if (likely(call_dl_resolve))
6743 + goto emulate;
6744 +
6745 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6746 +
6747 + down_write(&current->mm->mmap_sem);
6748 + if (current->mm->call_dl_resolve) {
6749 + call_dl_resolve = current->mm->call_dl_resolve;
6750 + up_write(&current->mm->mmap_sem);
6751 + if (vma)
6752 + kmem_cache_free(vm_area_cachep, vma);
6753 + goto emulate;
6754 + }
6755 +
6756 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6757 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6758 + up_write(&current->mm->mmap_sem);
6759 + if (vma)
6760 + kmem_cache_free(vm_area_cachep, vma);
6761 + return 1;
6762 + }
6763 +
6764 + if (pax_insert_vma(vma, call_dl_resolve)) {
6765 + up_write(&current->mm->mmap_sem);
6766 + kmem_cache_free(vm_area_cachep, vma);
6767 + return 1;
6768 + }
6769 +
6770 + current->mm->call_dl_resolve = call_dl_resolve;
6771 + up_write(&current->mm->mmap_sem);
6772 +
6773 +emulate:
6774 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6775 + regs->tpc = call_dl_resolve;
6776 + regs->tnpc = addr+4;
6777 + return 3;
6778 + }
6779 +#endif
6780 +
6781 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6782 + if ((save & 0xFFC00000U) == 0x05000000U &&
6783 + (call & 0xFFFFE000U) == 0x85C0A000U &&
6784 + nop == 0x01000000U)
6785 + {
6786 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6787 + regs->u_regs[UREG_G2] = addr + 4;
6788 + addr = (save & 0x003FFFFFU) << 10;
6789 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6790 +
6791 + if (test_thread_flag(TIF_32BIT))
6792 + addr &= 0xFFFFFFFFUL;
6793 +
6794 + regs->tpc = addr;
6795 + regs->tnpc = addr+4;
6796 + return 3;
6797 + }
6798 +
6799 + /* PaX: 64-bit PLT stub */
6800 + err = get_user(sethi1, (unsigned int *)addr);
6801 + err |= get_user(sethi2, (unsigned int *)(addr+4));
6802 + err |= get_user(or1, (unsigned int *)(addr+8));
6803 + err |= get_user(or2, (unsigned int *)(addr+12));
6804 + err |= get_user(sllx, (unsigned int *)(addr+16));
6805 + err |= get_user(add, (unsigned int *)(addr+20));
6806 + err |= get_user(jmpl, (unsigned int *)(addr+24));
6807 + err |= get_user(nop, (unsigned int *)(addr+28));
6808 + if (err)
6809 + break;
6810 +
6811 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
6812 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6813 + (or1 & 0xFFFFE000U) == 0x88112000U &&
6814 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
6815 + sllx == 0x89293020U &&
6816 + add == 0x8A010005U &&
6817 + jmpl == 0x89C14000U &&
6818 + nop == 0x01000000U)
6819 + {
6820 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6821 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6822 + regs->u_regs[UREG_G4] <<= 32;
6823 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6824 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
6825 + regs->u_regs[UREG_G4] = addr + 24;
6826 + addr = regs->u_regs[UREG_G5];
6827 + regs->tpc = addr;
6828 + regs->tnpc = addr+4;
6829 + return 3;
6830 + }
6831 + }
6832 + } while (0);
6833 +
6834 +#ifdef CONFIG_PAX_DLRESOLVE
6835 + do { /* PaX: unpatched PLT emulation step 2 */
6836 + unsigned int save, call, nop;
6837 +
6838 + err = get_user(save, (unsigned int *)(regs->tpc-4));
6839 + err |= get_user(call, (unsigned int *)regs->tpc);
6840 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
6841 + if (err)
6842 + break;
6843 +
6844 + if (save == 0x9DE3BFA8U &&
6845 + (call & 0xC0000000U) == 0x40000000U &&
6846 + nop == 0x01000000U)
6847 + {
6848 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6849 +
6850 + if (test_thread_flag(TIF_32BIT))
6851 + dl_resolve &= 0xFFFFFFFFUL;
6852 +
6853 + regs->u_regs[UREG_RETPC] = regs->tpc;
6854 + regs->tpc = dl_resolve;
6855 + regs->tnpc = dl_resolve+4;
6856 + return 3;
6857 + }
6858 + } while (0);
6859 +#endif
6860 +
6861 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
6862 + unsigned int sethi, ba, nop;
6863 +
6864 + err = get_user(sethi, (unsigned int *)regs->tpc);
6865 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6866 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6867 +
6868 + if (err)
6869 + break;
6870 +
6871 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6872 + (ba & 0xFFF00000U) == 0x30600000U &&
6873 + nop == 0x01000000U)
6874 + {
6875 + unsigned long addr;
6876 +
6877 + addr = (sethi & 0x003FFFFFU) << 10;
6878 + regs->u_regs[UREG_G1] = addr;
6879 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6880 +
6881 + if (test_thread_flag(TIF_32BIT))
6882 + addr &= 0xFFFFFFFFUL;
6883 +
6884 + regs->tpc = addr;
6885 + regs->tnpc = addr+4;
6886 + return 2;
6887 + }
6888 + } while (0);
6889 +
6890 +#endif
6891 +
6892 + return 1;
6893 +}
6894 +
6895 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6896 +{
6897 + unsigned long i;
6898 +
6899 + printk(KERN_ERR "PAX: bytes at PC: ");
6900 + for (i = 0; i < 8; i++) {
6901 + unsigned int c;
6902 + if (get_user(c, (unsigned int *)pc+i))
6903 + printk(KERN_CONT "???????? ");
6904 + else
6905 + printk(KERN_CONT "%08x ", c);
6906 + }
6907 + printk("\n");
6908 +}
6909 +#endif
6910 +
6911 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6912 {
6913 struct mm_struct *mm = current->mm;
6914 @@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6915 if (!vma)
6916 goto bad_area;
6917
6918 +#ifdef CONFIG_PAX_PAGEEXEC
6919 + /* PaX: detect ITLB misses on non-exec pages */
6920 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
6921 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
6922 + {
6923 + if (address != regs->tpc)
6924 + goto good_area;
6925 +
6926 + up_read(&mm->mmap_sem);
6927 + switch (pax_handle_fetch_fault(regs)) {
6928 +
6929 +#ifdef CONFIG_PAX_EMUPLT
6930 + case 2:
6931 + case 3:
6932 + return;
6933 +#endif
6934 +
6935 + }
6936 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
6937 + do_group_exit(SIGKILL);
6938 + }
6939 +#endif
6940 +
6941 /* Pure DTLB misses do not tell us whether the fault causing
6942 * load/store/atomic was a write or not, it only says that there
6943 * was no match. So in such a case we (carefully) read the
6944 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
6945 index 07e1453..0a7d9e9 100644
6946 --- a/arch/sparc/mm/hugetlbpage.c
6947 +++ b/arch/sparc/mm/hugetlbpage.c
6948 @@ -67,7 +67,7 @@ full_search:
6949 }
6950 return -ENOMEM;
6951 }
6952 - if (likely(!vma || addr + len <= vma->vm_start)) {
6953 + if (likely(check_heap_stack_gap(vma, addr, len))) {
6954 /*
6955 * Remember the place where we stopped the search:
6956 */
6957 @@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6958 /* make sure it can fit in the remaining address space */
6959 if (likely(addr > len)) {
6960 vma = find_vma(mm, addr-len);
6961 - if (!vma || addr <= vma->vm_start) {
6962 + if (check_heap_stack_gap(vma, addr - len, len)) {
6963 /* remember the address as a hint for next time */
6964 return (mm->free_area_cache = addr-len);
6965 }
6966 @@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6967 if (unlikely(mm->mmap_base < len))
6968 goto bottomup;
6969
6970 - addr = (mm->mmap_base-len) & HPAGE_MASK;
6971 + addr = mm->mmap_base - len;
6972
6973 do {
6974 + addr &= HPAGE_MASK;
6975 /*
6976 * Lookup failure means no vma is above this address,
6977 * else if new region fits below vma->vm_start,
6978 * return with success:
6979 */
6980 vma = find_vma(mm, addr);
6981 - if (likely(!vma || addr+len <= vma->vm_start)) {
6982 + if (likely(check_heap_stack_gap(vma, addr, len))) {
6983 /* remember the address as a hint for next time */
6984 return (mm->free_area_cache = addr);
6985 }
6986 @@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6987 mm->cached_hole_size = vma->vm_start - addr;
6988
6989 /* try just below the current vma->vm_start */
6990 - addr = (vma->vm_start-len) & HPAGE_MASK;
6991 - } while (likely(len < vma->vm_start));
6992 + addr = skip_heap_stack_gap(vma, len);
6993 + } while (!IS_ERR_VALUE(addr));
6994
6995 bottomup:
6996 /*
6997 @@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
6998 if (addr) {
6999 addr = ALIGN(addr, HPAGE_SIZE);
7000 vma = find_vma(mm, addr);
7001 - if (task_size - len >= addr &&
7002 - (!vma || addr + len <= vma->vm_start))
7003 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
7004 return addr;
7005 }
7006 if (mm->get_unmapped_area == arch_get_unmapped_area)
7007 diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
7008 index 7b00de6..78239f4 100644
7009 --- a/arch/sparc/mm/init_32.c
7010 +++ b/arch/sparc/mm/init_32.c
7011 @@ -316,6 +316,9 @@ extern void device_scan(void);
7012 pgprot_t PAGE_SHARED __read_mostly;
7013 EXPORT_SYMBOL(PAGE_SHARED);
7014
7015 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
7016 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
7017 +
7018 void __init paging_init(void)
7019 {
7020 switch(sparc_cpu_model) {
7021 @@ -344,17 +347,17 @@ void __init paging_init(void)
7022
7023 /* Initialize the protection map with non-constant, MMU dependent values. */
7024 protection_map[0] = PAGE_NONE;
7025 - protection_map[1] = PAGE_READONLY;
7026 - protection_map[2] = PAGE_COPY;
7027 - protection_map[3] = PAGE_COPY;
7028 + protection_map[1] = PAGE_READONLY_NOEXEC;
7029 + protection_map[2] = PAGE_COPY_NOEXEC;
7030 + protection_map[3] = PAGE_COPY_NOEXEC;
7031 protection_map[4] = PAGE_READONLY;
7032 protection_map[5] = PAGE_READONLY;
7033 protection_map[6] = PAGE_COPY;
7034 protection_map[7] = PAGE_COPY;
7035 protection_map[8] = PAGE_NONE;
7036 - protection_map[9] = PAGE_READONLY;
7037 - protection_map[10] = PAGE_SHARED;
7038 - protection_map[11] = PAGE_SHARED;
7039 + protection_map[9] = PAGE_READONLY_NOEXEC;
7040 + protection_map[10] = PAGE_SHARED_NOEXEC;
7041 + protection_map[11] = PAGE_SHARED_NOEXEC;
7042 protection_map[12] = PAGE_READONLY;
7043 protection_map[13] = PAGE_READONLY;
7044 protection_map[14] = PAGE_SHARED;
7045 diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
7046 index cbef74e..c38fead 100644
7047 --- a/arch/sparc/mm/srmmu.c
7048 +++ b/arch/sparc/mm/srmmu.c
7049 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
7050 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
7051 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
7052 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
7053 +
7054 +#ifdef CONFIG_PAX_PAGEEXEC
7055 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
7056 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
7057 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
7058 +#endif
7059 +
7060 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
7061 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
7062
7063 diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
7064 index 27fe667..36d474c 100644
7065 --- a/arch/tile/include/asm/atomic_64.h
7066 +++ b/arch/tile/include/asm/atomic_64.h
7067 @@ -142,6 +142,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
7068
7069 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7070
7071 +#define atomic64_read_unchecked(v) atomic64_read(v)
7072 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7073 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7074 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7075 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7076 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
7077 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7078 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
7079 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7080 +
7081 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
7082 #define smp_mb__before_atomic_dec() smp_mb()
7083 #define smp_mb__after_atomic_dec() smp_mb()
7084 diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
7085 index 392e533..536b092 100644
7086 --- a/arch/tile/include/asm/cache.h
7087 +++ b/arch/tile/include/asm/cache.h
7088 @@ -15,11 +15,12 @@
7089 #ifndef _ASM_TILE_CACHE_H
7090 #define _ASM_TILE_CACHE_H
7091
7092 +#include <linux/const.h>
7093 #include <arch/chip.h>
7094
7095 /* bytes per L1 data cache line */
7096 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
7097 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7098 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7099
7100 /* bytes per L2 cache line */
7101 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
7102 diff --git a/arch/um/Makefile b/arch/um/Makefile
7103 index 28688e6..4c0aa1c 100644
7104 --- a/arch/um/Makefile
7105 +++ b/arch/um/Makefile
7106 @@ -61,6 +61,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
7107 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
7108 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
7109
7110 +ifdef CONSTIFY_PLUGIN
7111 +USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7112 +endif
7113 +
7114 #This will adjust *FLAGS accordingly to the platform.
7115 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
7116
7117 diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
7118 index 19e1bdd..3665b77 100644
7119 --- a/arch/um/include/asm/cache.h
7120 +++ b/arch/um/include/asm/cache.h
7121 @@ -1,6 +1,7 @@
7122 #ifndef __UM_CACHE_H
7123 #define __UM_CACHE_H
7124
7125 +#include <linux/const.h>
7126
7127 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
7128 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7129 @@ -12,6 +13,6 @@
7130 # define L1_CACHE_SHIFT 5
7131 #endif
7132
7133 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7134 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7135
7136 #endif
7137 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
7138 index 6c03acd..a5e0215 100644
7139 --- a/arch/um/include/asm/kmap_types.h
7140 +++ b/arch/um/include/asm/kmap_types.h
7141 @@ -23,6 +23,7 @@ enum km_type {
7142 KM_IRQ1,
7143 KM_SOFTIRQ0,
7144 KM_SOFTIRQ1,
7145 + KM_CLEARPAGE,
7146 KM_TYPE_NR
7147 };
7148
7149 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
7150 index 7cfc3ce..cbd1a58 100644
7151 --- a/arch/um/include/asm/page.h
7152 +++ b/arch/um/include/asm/page.h
7153 @@ -14,6 +14,9 @@
7154 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
7155 #define PAGE_MASK (~(PAGE_SIZE-1))
7156
7157 +#define ktla_ktva(addr) (addr)
7158 +#define ktva_ktla(addr) (addr)
7159 +
7160 #ifndef __ASSEMBLY__
7161
7162 struct page;
7163 diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
7164 index 0032f92..cd151e0 100644
7165 --- a/arch/um/include/asm/pgtable-3level.h
7166 +++ b/arch/um/include/asm/pgtable-3level.h
7167 @@ -58,6 +58,7 @@
7168 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
7169 #define pud_populate(mm, pud, pmd) \
7170 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
7171 +#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
7172
7173 #ifdef CONFIG_64BIT
7174 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
7175 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
7176 index 69f2490..2634831 100644
7177 --- a/arch/um/kernel/process.c
7178 +++ b/arch/um/kernel/process.c
7179 @@ -408,22 +408,6 @@ int singlestepping(void * t)
7180 return 2;
7181 }
7182
7183 -/*
7184 - * Only x86 and x86_64 have an arch_align_stack().
7185 - * All other arches have "#define arch_align_stack(x) (x)"
7186 - * in their asm/system.h
7187 - * As this is included in UML from asm-um/system-generic.h,
7188 - * we can use it to behave as the subarch does.
7189 - */
7190 -#ifndef arch_align_stack
7191 -unsigned long arch_align_stack(unsigned long sp)
7192 -{
7193 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7194 - sp -= get_random_int() % 8192;
7195 - return sp & ~0xf;
7196 -}
7197 -#endif
7198 -
7199 unsigned long get_wchan(struct task_struct *p)
7200 {
7201 unsigned long stack_page, sp, ip;
7202 diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
7203 index ad8f795..2c7eec6 100644
7204 --- a/arch/unicore32/include/asm/cache.h
7205 +++ b/arch/unicore32/include/asm/cache.h
7206 @@ -12,8 +12,10 @@
7207 #ifndef __UNICORE_CACHE_H__
7208 #define __UNICORE_CACHE_H__
7209
7210 -#define L1_CACHE_SHIFT (5)
7211 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7212 +#include <linux/const.h>
7213 +
7214 +#define L1_CACHE_SHIFT 5
7215 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7216
7217 /*
7218 * Memory returned by kmalloc() may be used for DMA, so we must make
7219 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
7220 index 5bed94e..fbcf200 100644
7221 --- a/arch/x86/Kconfig
7222 +++ b/arch/x86/Kconfig
7223 @@ -226,7 +226,7 @@ config X86_HT
7224
7225 config X86_32_LAZY_GS
7226 def_bool y
7227 - depends on X86_32 && !CC_STACKPROTECTOR
7228 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
7229
7230 config ARCH_HWEIGHT_CFLAGS
7231 string
7232 @@ -1058,7 +1058,7 @@ choice
7233
7234 config NOHIGHMEM
7235 bool "off"
7236 - depends on !X86_NUMAQ
7237 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7238 ---help---
7239 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
7240 However, the address space of 32-bit x86 processors is only 4
7241 @@ -1095,7 +1095,7 @@ config NOHIGHMEM
7242
7243 config HIGHMEM4G
7244 bool "4GB"
7245 - depends on !X86_NUMAQ
7246 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7247 ---help---
7248 Select this if you have a 32-bit processor and between 1 and 4
7249 gigabytes of physical RAM.
7250 @@ -1149,7 +1149,7 @@ config PAGE_OFFSET
7251 hex
7252 default 0xB0000000 if VMSPLIT_3G_OPT
7253 default 0x80000000 if VMSPLIT_2G
7254 - default 0x78000000 if VMSPLIT_2G_OPT
7255 + default 0x70000000 if VMSPLIT_2G_OPT
7256 default 0x40000000 if VMSPLIT_1G
7257 default 0xC0000000
7258 depends on X86_32
7259 @@ -1539,6 +1539,7 @@ config SECCOMP
7260
7261 config CC_STACKPROTECTOR
7262 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
7263 + depends on X86_64 || !PAX_MEMORY_UDEREF
7264 ---help---
7265 This option turns on the -fstack-protector GCC feature. This
7266 feature puts, at the beginning of functions, a canary value on
7267 @@ -1596,6 +1597,7 @@ config KEXEC_JUMP
7268 config PHYSICAL_START
7269 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
7270 default "0x1000000"
7271 + range 0x400000 0x40000000
7272 ---help---
7273 This gives the physical address where the kernel is loaded.
7274
7275 @@ -1659,6 +1661,7 @@ config X86_NEED_RELOCS
7276 config PHYSICAL_ALIGN
7277 hex "Alignment value to which kernel should be aligned" if X86_32
7278 default "0x1000000"
7279 + range 0x400000 0x1000000 if PAX_KERNEXEC
7280 range 0x2000 0x1000000
7281 ---help---
7282 This value puts the alignment restrictions on physical address
7283 @@ -1690,9 +1693,10 @@ config HOTPLUG_CPU
7284 Say N if you want to disable CPU hotplug.
7285
7286 config COMPAT_VDSO
7287 - def_bool y
7288 + def_bool n
7289 prompt "Compat VDSO support"
7290 depends on X86_32 || IA32_EMULATION
7291 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
7292 ---help---
7293 Map the 32-bit VDSO to the predictable old-style address too.
7294
7295 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
7296 index 3c57033..22d44aa 100644
7297 --- a/arch/x86/Kconfig.cpu
7298 +++ b/arch/x86/Kconfig.cpu
7299 @@ -335,7 +335,7 @@ config X86_PPRO_FENCE
7300
7301 config X86_F00F_BUG
7302 def_bool y
7303 - depends on M586MMX || M586TSC || M586 || M486 || M386
7304 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
7305
7306 config X86_INVD_BUG
7307 def_bool y
7308 @@ -359,7 +359,7 @@ config X86_POPAD_OK
7309
7310 config X86_ALIGNMENT_16
7311 def_bool y
7312 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7313 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7314
7315 config X86_INTEL_USERCOPY
7316 def_bool y
7317 @@ -405,7 +405,7 @@ config X86_CMPXCHG64
7318 # generates cmov.
7319 config X86_CMOV
7320 def_bool y
7321 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7322 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7323
7324 config X86_MINIMUM_CPU_FAMILY
7325 int
7326 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
7327 index e46c214..7c72b55 100644
7328 --- a/arch/x86/Kconfig.debug
7329 +++ b/arch/x86/Kconfig.debug
7330 @@ -84,7 +84,7 @@ config X86_PTDUMP
7331 config DEBUG_RODATA
7332 bool "Write protect kernel read-only data structures"
7333 default y
7334 - depends on DEBUG_KERNEL
7335 + depends on DEBUG_KERNEL && BROKEN
7336 ---help---
7337 Mark the kernel read-only data as write-protected in the pagetables,
7338 in order to catch accidental (and incorrect) writes to such const
7339 @@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
7340
7341 config DEBUG_SET_MODULE_RONX
7342 bool "Set loadable kernel module data as NX and text as RO"
7343 - depends on MODULES
7344 + depends on MODULES && BROKEN
7345 ---help---
7346 This option helps catch unintended modifications to loadable
7347 kernel module's text and read-only data. It also prevents execution
7348 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
7349 index 209ba12..15140db 100644
7350 --- a/arch/x86/Makefile
7351 +++ b/arch/x86/Makefile
7352 @@ -46,6 +46,7 @@ else
7353 UTS_MACHINE := x86_64
7354 CHECKFLAGS += -D__x86_64__ -m64
7355
7356 + biarch := $(call cc-option,-m64)
7357 KBUILD_AFLAGS += -m64
7358 KBUILD_CFLAGS += -m64
7359
7360 @@ -201,3 +202,12 @@ define archhelp
7361 echo ' FDARGS="..." arguments for the booted kernel'
7362 echo ' FDINITRD=file initrd for the booted kernel'
7363 endef
7364 +
7365 +define OLD_LD
7366 +
7367 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
7368 +*** Please upgrade your binutils to 2.18 or newer
7369 +endef
7370 +
7371 +archprepare:
7372 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
7373 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
7374 index 5a747dd..ff7b12c 100644
7375 --- a/arch/x86/boot/Makefile
7376 +++ b/arch/x86/boot/Makefile
7377 @@ -64,6 +64,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
7378 $(call cc-option, -fno-stack-protector) \
7379 $(call cc-option, -mpreferred-stack-boundary=2)
7380 KBUILD_CFLAGS += $(call cc-option, -m32)
7381 +ifdef CONSTIFY_PLUGIN
7382 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7383 +endif
7384 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7385 GCOV_PROFILE := n
7386
7387 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7388 index 878e4b9..20537ab 100644
7389 --- a/arch/x86/boot/bitops.h
7390 +++ b/arch/x86/boot/bitops.h
7391 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7392 u8 v;
7393 const u32 *p = (const u32 *)addr;
7394
7395 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7396 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7397 return v;
7398 }
7399
7400 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7401
7402 static inline void set_bit(int nr, void *addr)
7403 {
7404 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7405 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7406 }
7407
7408 #endif /* BOOT_BITOPS_H */
7409 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7410 index c7093bd..d4247ffe0 100644
7411 --- a/arch/x86/boot/boot.h
7412 +++ b/arch/x86/boot/boot.h
7413 @@ -85,7 +85,7 @@ static inline void io_delay(void)
7414 static inline u16 ds(void)
7415 {
7416 u16 seg;
7417 - asm("movw %%ds,%0" : "=rm" (seg));
7418 + asm volatile("movw %%ds,%0" : "=rm" (seg));
7419 return seg;
7420 }
7421
7422 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
7423 static inline int memcmp(const void *s1, const void *s2, size_t len)
7424 {
7425 u8 diff;
7426 - asm("repe; cmpsb; setnz %0"
7427 + asm volatile("repe; cmpsb; setnz %0"
7428 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7429 return diff;
7430 }
7431 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
7432 index fd55a2f..217b501 100644
7433 --- a/arch/x86/boot/compressed/Makefile
7434 +++ b/arch/x86/boot/compressed/Makefile
7435 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7436 KBUILD_CFLAGS += $(cflags-y)
7437 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7438 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7439 +ifdef CONSTIFY_PLUGIN
7440 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7441 +endif
7442
7443 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7444 GCOV_PROFILE := n
7445 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
7446 index c85e3ac..6f5aa80 100644
7447 --- a/arch/x86/boot/compressed/head_32.S
7448 +++ b/arch/x86/boot/compressed/head_32.S
7449 @@ -106,7 +106,7 @@ preferred_addr:
7450 notl %eax
7451 andl %eax, %ebx
7452 #else
7453 - movl $LOAD_PHYSICAL_ADDR, %ebx
7454 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7455 #endif
7456
7457 /* Target address to relocate to for decompression */
7458 @@ -192,7 +192,7 @@ relocated:
7459 * and where it was actually loaded.
7460 */
7461 movl %ebp, %ebx
7462 - subl $LOAD_PHYSICAL_ADDR, %ebx
7463 + subl $____LOAD_PHYSICAL_ADDR, %ebx
7464 jz 2f /* Nothing to be done if loaded at compiled addr. */
7465 /*
7466 * Process relocations.
7467 @@ -200,8 +200,7 @@ relocated:
7468
7469 1: subl $4, %edi
7470 movl (%edi), %ecx
7471 - testl %ecx, %ecx
7472 - jz 2f
7473 + jecxz 2f
7474 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7475 jmp 1b
7476 2:
7477 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
7478 index 87e03a1..0d94c76 100644
7479 --- a/arch/x86/boot/compressed/head_64.S
7480 +++ b/arch/x86/boot/compressed/head_64.S
7481 @@ -91,7 +91,7 @@ ENTRY(startup_32)
7482 notl %eax
7483 andl %eax, %ebx
7484 #else
7485 - movl $LOAD_PHYSICAL_ADDR, %ebx
7486 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7487 #endif
7488
7489 /* Target address to relocate to for decompression */
7490 @@ -263,7 +263,7 @@ preferred_addr:
7491 notq %rax
7492 andq %rax, %rbp
7493 #else
7494 - movq $LOAD_PHYSICAL_ADDR, %rbp
7495 + movq $____LOAD_PHYSICAL_ADDR, %rbp
7496 #endif
7497
7498 /* Target address to relocate to for decompression */
7499 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
7500 index 7116dcb..d9ae1d7 100644
7501 --- a/arch/x86/boot/compressed/misc.c
7502 +++ b/arch/x86/boot/compressed/misc.c
7503 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
7504 case PT_LOAD:
7505 #ifdef CONFIG_RELOCATABLE
7506 dest = output;
7507 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7508 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7509 #else
7510 dest = (void *)(phdr->p_paddr);
7511 #endif
7512 @@ -365,7 +365,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
7513 error("Destination address too large");
7514 #endif
7515 #ifndef CONFIG_RELOCATABLE
7516 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7517 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7518 error("Wrong destination address");
7519 #endif
7520
7521 diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
7522 index e77f4e4..17e511f 100644
7523 --- a/arch/x86/boot/compressed/relocs.c
7524 +++ b/arch/x86/boot/compressed/relocs.c
7525 @@ -13,8 +13,11 @@
7526
7527 static void die(char *fmt, ...);
7528
7529 +#include "../../../../include/generated/autoconf.h"
7530 +
7531 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
7532 static Elf32_Ehdr ehdr;
7533 +static Elf32_Phdr *phdr;
7534 static unsigned long reloc_count, reloc_idx;
7535 static unsigned long *relocs;
7536
7537 @@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
7538 }
7539 }
7540
7541 +static void read_phdrs(FILE *fp)
7542 +{
7543 + unsigned int i;
7544 +
7545 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
7546 + if (!phdr) {
7547 + die("Unable to allocate %d program headers\n",
7548 + ehdr.e_phnum);
7549 + }
7550 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
7551 + die("Seek to %d failed: %s\n",
7552 + ehdr.e_phoff, strerror(errno));
7553 + }
7554 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
7555 + die("Cannot read ELF program headers: %s\n",
7556 + strerror(errno));
7557 + }
7558 + for(i = 0; i < ehdr.e_phnum; i++) {
7559 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
7560 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
7561 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
7562 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
7563 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
7564 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
7565 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
7566 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
7567 + }
7568 +
7569 +}
7570 +
7571 static void read_shdrs(FILE *fp)
7572 {
7573 - int i;
7574 + unsigned int i;
7575 Elf32_Shdr shdr;
7576
7577 secs = calloc(ehdr.e_shnum, sizeof(struct section));
7578 @@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
7579
7580 static void read_strtabs(FILE *fp)
7581 {
7582 - int i;
7583 + unsigned int i;
7584 for (i = 0; i < ehdr.e_shnum; i++) {
7585 struct section *sec = &secs[i];
7586 if (sec->shdr.sh_type != SHT_STRTAB) {
7587 @@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
7588
7589 static void read_symtabs(FILE *fp)
7590 {
7591 - int i,j;
7592 + unsigned int i,j;
7593 for (i = 0; i < ehdr.e_shnum; i++) {
7594 struct section *sec = &secs[i];
7595 if (sec->shdr.sh_type != SHT_SYMTAB) {
7596 @@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
7597
7598 static void read_relocs(FILE *fp)
7599 {
7600 - int i,j;
7601 + unsigned int i,j;
7602 + uint32_t base;
7603 +
7604 for (i = 0; i < ehdr.e_shnum; i++) {
7605 struct section *sec = &secs[i];
7606 if (sec->shdr.sh_type != SHT_REL) {
7607 @@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
7608 die("Cannot read symbol table: %s\n",
7609 strerror(errno));
7610 }
7611 + base = 0;
7612 + for (j = 0; j < ehdr.e_phnum; j++) {
7613 + if (phdr[j].p_type != PT_LOAD )
7614 + continue;
7615 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
7616 + continue;
7617 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
7618 + break;
7619 + }
7620 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
7621 Elf32_Rel *rel = &sec->reltab[j];
7622 - rel->r_offset = elf32_to_cpu(rel->r_offset);
7623 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
7624 rel->r_info = elf32_to_cpu(rel->r_info);
7625 }
7626 }
7627 @@ -396,13 +440,13 @@ static void read_relocs(FILE *fp)
7628
7629 static void print_absolute_symbols(void)
7630 {
7631 - int i;
7632 + unsigned int i;
7633 printf("Absolute symbols\n");
7634 printf(" Num: Value Size Type Bind Visibility Name\n");
7635 for (i = 0; i < ehdr.e_shnum; i++) {
7636 struct section *sec = &secs[i];
7637 char *sym_strtab;
7638 - int j;
7639 + unsigned int j;
7640
7641 if (sec->shdr.sh_type != SHT_SYMTAB) {
7642 continue;
7643 @@ -429,14 +473,14 @@ static void print_absolute_symbols(void)
7644
7645 static void print_absolute_relocs(void)
7646 {
7647 - int i, printed = 0;
7648 + unsigned int i, printed = 0;
7649
7650 for (i = 0; i < ehdr.e_shnum; i++) {
7651 struct section *sec = &secs[i];
7652 struct section *sec_applies, *sec_symtab;
7653 char *sym_strtab;
7654 Elf32_Sym *sh_symtab;
7655 - int j;
7656 + unsigned int j;
7657 if (sec->shdr.sh_type != SHT_REL) {
7658 continue;
7659 }
7660 @@ -497,13 +541,13 @@ static void print_absolute_relocs(void)
7661
7662 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7663 {
7664 - int i;
7665 + unsigned int i;
7666 /* Walk through the relocations */
7667 for (i = 0; i < ehdr.e_shnum; i++) {
7668 char *sym_strtab;
7669 Elf32_Sym *sh_symtab;
7670 struct section *sec_applies, *sec_symtab;
7671 - int j;
7672 + unsigned int j;
7673 struct section *sec = &secs[i];
7674
7675 if (sec->shdr.sh_type != SHT_REL) {
7676 @@ -528,6 +572,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7677 !is_rel_reloc(sym_name(sym_strtab, sym))) {
7678 continue;
7679 }
7680 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
7681 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
7682 + continue;
7683 +
7684 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
7685 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
7686 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
7687 + continue;
7688 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
7689 + continue;
7690 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
7691 + continue;
7692 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
7693 + continue;
7694 +#endif
7695 +
7696 switch (r_type) {
7697 case R_386_NONE:
7698 case R_386_PC32:
7699 @@ -569,7 +629,7 @@ static int cmp_relocs(const void *va, const void *vb)
7700
7701 static void emit_relocs(int as_text)
7702 {
7703 - int i;
7704 + unsigned int i;
7705 /* Count how many relocations I have and allocate space for them. */
7706 reloc_count = 0;
7707 walk_relocs(count_reloc);
7708 @@ -663,6 +723,7 @@ int main(int argc, char **argv)
7709 fname, strerror(errno));
7710 }
7711 read_ehdr(fp);
7712 + read_phdrs(fp);
7713 read_shdrs(fp);
7714 read_strtabs(fp);
7715 read_symtabs(fp);
7716 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
7717 index 4d3ff03..e4972ff 100644
7718 --- a/arch/x86/boot/cpucheck.c
7719 +++ b/arch/x86/boot/cpucheck.c
7720 @@ -74,7 +74,7 @@ static int has_fpu(void)
7721 u16 fcw = -1, fsw = -1;
7722 u32 cr0;
7723
7724 - asm("movl %%cr0,%0" : "=r" (cr0));
7725 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
7726 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
7727 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
7728 asm volatile("movl %0,%%cr0" : : "r" (cr0));
7729 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
7730 {
7731 u32 f0, f1;
7732
7733 - asm("pushfl ; "
7734 + asm volatile("pushfl ; "
7735 "pushfl ; "
7736 "popl %0 ; "
7737 "movl %0,%1 ; "
7738 @@ -115,7 +115,7 @@ static void get_flags(void)
7739 set_bit(X86_FEATURE_FPU, cpu.flags);
7740
7741 if (has_eflag(X86_EFLAGS_ID)) {
7742 - asm("cpuid"
7743 + asm volatile("cpuid"
7744 : "=a" (max_intel_level),
7745 "=b" (cpu_vendor[0]),
7746 "=d" (cpu_vendor[1]),
7747 @@ -124,7 +124,7 @@ static void get_flags(void)
7748
7749 if (max_intel_level >= 0x00000001 &&
7750 max_intel_level <= 0x0000ffff) {
7751 - asm("cpuid"
7752 + asm volatile("cpuid"
7753 : "=a" (tfms),
7754 "=c" (cpu.flags[4]),
7755 "=d" (cpu.flags[0])
7756 @@ -136,7 +136,7 @@ static void get_flags(void)
7757 cpu.model += ((tfms >> 16) & 0xf) << 4;
7758 }
7759
7760 - asm("cpuid"
7761 + asm volatile("cpuid"
7762 : "=a" (max_amd_level)
7763 : "a" (0x80000000)
7764 : "ebx", "ecx", "edx");
7765 @@ -144,7 +144,7 @@ static void get_flags(void)
7766 if (max_amd_level >= 0x80000001 &&
7767 max_amd_level <= 0x8000ffff) {
7768 u32 eax = 0x80000001;
7769 - asm("cpuid"
7770 + asm volatile("cpuid"
7771 : "+a" (eax),
7772 "=c" (cpu.flags[6]),
7773 "=d" (cpu.flags[1])
7774 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7775 u32 ecx = MSR_K7_HWCR;
7776 u32 eax, edx;
7777
7778 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7779 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7780 eax &= ~(1 << 15);
7781 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7782 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7783
7784 get_flags(); /* Make sure it really did something */
7785 err = check_flags();
7786 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7787 u32 ecx = MSR_VIA_FCR;
7788 u32 eax, edx;
7789
7790 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7791 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7792 eax |= (1<<1)|(1<<7);
7793 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7794 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7795
7796 set_bit(X86_FEATURE_CX8, cpu.flags);
7797 err = check_flags();
7798 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7799 u32 eax, edx;
7800 u32 level = 1;
7801
7802 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7803 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7804 - asm("cpuid"
7805 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7806 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7807 + asm volatile("cpuid"
7808 : "+a" (level), "=d" (cpu.flags[0])
7809 : : "ecx", "ebx");
7810 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7811 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7812
7813 err = check_flags();
7814 }
7815 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
7816 index f1bbeeb..aff09cb 100644
7817 --- a/arch/x86/boot/header.S
7818 +++ b/arch/x86/boot/header.S
7819 @@ -372,7 +372,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
7820 # single linked list of
7821 # struct setup_data
7822
7823 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
7824 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
7825
7826 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
7827 #define VO_INIT_SIZE (VO__end - VO__text)
7828 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
7829 index db75d07..8e6d0af 100644
7830 --- a/arch/x86/boot/memory.c
7831 +++ b/arch/x86/boot/memory.c
7832 @@ -19,7 +19,7 @@
7833
7834 static int detect_memory_e820(void)
7835 {
7836 - int count = 0;
7837 + unsigned int count = 0;
7838 struct biosregs ireg, oreg;
7839 struct e820entry *desc = boot_params.e820_map;
7840 static struct e820entry buf; /* static so it is zeroed */
7841 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
7842 index 11e8c6e..fdbb1ed 100644
7843 --- a/arch/x86/boot/video-vesa.c
7844 +++ b/arch/x86/boot/video-vesa.c
7845 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
7846
7847 boot_params.screen_info.vesapm_seg = oreg.es;
7848 boot_params.screen_info.vesapm_off = oreg.di;
7849 + boot_params.screen_info.vesapm_size = oreg.cx;
7850 }
7851
7852 /*
7853 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
7854 index 43eda28..5ab5fdb 100644
7855 --- a/arch/x86/boot/video.c
7856 +++ b/arch/x86/boot/video.c
7857 @@ -96,7 +96,7 @@ static void store_mode_params(void)
7858 static unsigned int get_entry(void)
7859 {
7860 char entry_buf[4];
7861 - int i, len = 0;
7862 + unsigned int i, len = 0;
7863 int key;
7864 unsigned int v;
7865
7866 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
7867 index 5b577d5..3c1fed4 100644
7868 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
7869 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
7870 @@ -8,6 +8,8 @@
7871 * including this sentence is retained in full.
7872 */
7873
7874 +#include <asm/alternative-asm.h>
7875 +
7876 .extern crypto_ft_tab
7877 .extern crypto_it_tab
7878 .extern crypto_fl_tab
7879 @@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
7880 je B192; \
7881 leaq 32(r9),r9;
7882
7883 +#define ret pax_force_retaddr 0, 1; ret
7884 +
7885 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
7886 movq r1,r2; \
7887 movq r3,r4; \
7888 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
7889 index be6d9e3..21fbbca 100644
7890 --- a/arch/x86/crypto/aesni-intel_asm.S
7891 +++ b/arch/x86/crypto/aesni-intel_asm.S
7892 @@ -31,6 +31,7 @@
7893
7894 #include <linux/linkage.h>
7895 #include <asm/inst.h>
7896 +#include <asm/alternative-asm.h>
7897
7898 #ifdef __x86_64__
7899 .data
7900 @@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
7901 pop %r14
7902 pop %r13
7903 pop %r12
7904 + pax_force_retaddr 0, 1
7905 ret
7906 +ENDPROC(aesni_gcm_dec)
7907
7908
7909 /*****************************************************************************
7910 @@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
7911 pop %r14
7912 pop %r13
7913 pop %r12
7914 + pax_force_retaddr 0, 1
7915 ret
7916 +ENDPROC(aesni_gcm_enc)
7917
7918 #endif
7919
7920 @@ -1714,6 +1719,7 @@ _key_expansion_256a:
7921 pxor %xmm1, %xmm0
7922 movaps %xmm0, (TKEYP)
7923 add $0x10, TKEYP
7924 + pax_force_retaddr_bts
7925 ret
7926
7927 .align 4
7928 @@ -1738,6 +1744,7 @@ _key_expansion_192a:
7929 shufps $0b01001110, %xmm2, %xmm1
7930 movaps %xmm1, 0x10(TKEYP)
7931 add $0x20, TKEYP
7932 + pax_force_retaddr_bts
7933 ret
7934
7935 .align 4
7936 @@ -1757,6 +1764,7 @@ _key_expansion_192b:
7937
7938 movaps %xmm0, (TKEYP)
7939 add $0x10, TKEYP
7940 + pax_force_retaddr_bts
7941 ret
7942
7943 .align 4
7944 @@ -1769,6 +1777,7 @@ _key_expansion_256b:
7945 pxor %xmm1, %xmm2
7946 movaps %xmm2, (TKEYP)
7947 add $0x10, TKEYP
7948 + pax_force_retaddr_bts
7949 ret
7950
7951 /*
7952 @@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
7953 #ifndef __x86_64__
7954 popl KEYP
7955 #endif
7956 + pax_force_retaddr 0, 1
7957 ret
7958 +ENDPROC(aesni_set_key)
7959
7960 /*
7961 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
7962 @@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
7963 popl KLEN
7964 popl KEYP
7965 #endif
7966 + pax_force_retaddr 0, 1
7967 ret
7968 +ENDPROC(aesni_enc)
7969
7970 /*
7971 * _aesni_enc1: internal ABI
7972 @@ -1959,6 +1972,7 @@ _aesni_enc1:
7973 AESENC KEY STATE
7974 movaps 0x70(TKEYP), KEY
7975 AESENCLAST KEY STATE
7976 + pax_force_retaddr_bts
7977 ret
7978
7979 /*
7980 @@ -2067,6 +2081,7 @@ _aesni_enc4:
7981 AESENCLAST KEY STATE2
7982 AESENCLAST KEY STATE3
7983 AESENCLAST KEY STATE4
7984 + pax_force_retaddr_bts
7985 ret
7986
7987 /*
7988 @@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
7989 popl KLEN
7990 popl KEYP
7991 #endif
7992 + pax_force_retaddr 0, 1
7993 ret
7994 +ENDPROC(aesni_dec)
7995
7996 /*
7997 * _aesni_dec1: internal ABI
7998 @@ -2146,6 +2163,7 @@ _aesni_dec1:
7999 AESDEC KEY STATE
8000 movaps 0x70(TKEYP), KEY
8001 AESDECLAST KEY STATE
8002 + pax_force_retaddr_bts
8003 ret
8004
8005 /*
8006 @@ -2254,6 +2272,7 @@ _aesni_dec4:
8007 AESDECLAST KEY STATE2
8008 AESDECLAST KEY STATE3
8009 AESDECLAST KEY STATE4
8010 + pax_force_retaddr_bts
8011 ret
8012
8013 /*
8014 @@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
8015 popl KEYP
8016 popl LEN
8017 #endif
8018 + pax_force_retaddr 0, 1
8019 ret
8020 +ENDPROC(aesni_ecb_enc)
8021
8022 /*
8023 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8024 @@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
8025 popl KEYP
8026 popl LEN
8027 #endif
8028 + pax_force_retaddr 0, 1
8029 ret
8030 +ENDPROC(aesni_ecb_dec)
8031
8032 /*
8033 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8034 @@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
8035 popl LEN
8036 popl IVP
8037 #endif
8038 + pax_force_retaddr 0, 1
8039 ret
8040 +ENDPROC(aesni_cbc_enc)
8041
8042 /*
8043 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8044 @@ -2498,7 +2523,9 @@ ENTRY(aesni_cbc_dec)
8045 popl LEN
8046 popl IVP
8047 #endif
8048 + pax_force_retaddr 0, 1
8049 ret
8050 +ENDPROC(aesni_cbc_dec)
8051
8052 #ifdef __x86_64__
8053 .align 16
8054 @@ -2524,6 +2551,7 @@ _aesni_inc_init:
8055 mov $1, TCTR_LOW
8056 MOVQ_R64_XMM TCTR_LOW INC
8057 MOVQ_R64_XMM CTR TCTR_LOW
8058 + pax_force_retaddr_bts
8059 ret
8060
8061 /*
8062 @@ -2552,6 +2580,7 @@ _aesni_inc:
8063 .Linc_low:
8064 movaps CTR, IV
8065 PSHUFB_XMM BSWAP_MASK IV
8066 + pax_force_retaddr_bts
8067 ret
8068
8069 /*
8070 @@ -2612,5 +2641,7 @@ ENTRY(aesni_ctr_enc)
8071 .Lctr_enc_ret:
8072 movups IV, (IVP)
8073 .Lctr_enc_just_ret:
8074 + pax_force_retaddr 0, 1
8075 ret
8076 +ENDPROC(aesni_ctr_enc)
8077 #endif
8078 diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
8079 index 545d0ce..14841a6 100644
8080 --- a/arch/x86/crypto/aesni-intel_glue.c
8081 +++ b/arch/x86/crypto/aesni-intel_glue.c
8082 @@ -929,6 +929,8 @@ out_free_ablkcipher:
8083 }
8084
8085 static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
8086 + unsigned int key_len) __size_overflow(3);
8087 +static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
8088 unsigned int key_len)
8089 {
8090 int ret = 0;
8091 diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
8092 index 391d245..67f35c2 100644
8093 --- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
8094 +++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
8095 @@ -20,6 +20,8 @@
8096 *
8097 */
8098
8099 +#include <asm/alternative-asm.h>
8100 +
8101 .file "blowfish-x86_64-asm.S"
8102 .text
8103
8104 @@ -151,9 +153,11 @@ __blowfish_enc_blk:
8105 jnz __enc_xor;
8106
8107 write_block();
8108 + pax_force_retaddr 0, 1
8109 ret;
8110 __enc_xor:
8111 xor_block();
8112 + pax_force_retaddr 0, 1
8113 ret;
8114
8115 .align 8
8116 @@ -188,6 +192,7 @@ blowfish_dec_blk:
8117
8118 movq %r11, %rbp;
8119
8120 + pax_force_retaddr 0, 1
8121 ret;
8122
8123 /**********************************************************************
8124 @@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
8125
8126 popq %rbx;
8127 popq %rbp;
8128 + pax_force_retaddr 0, 1
8129 ret;
8130
8131 __enc_xor4:
8132 @@ -349,6 +355,7 @@ __enc_xor4:
8133
8134 popq %rbx;
8135 popq %rbp;
8136 + pax_force_retaddr 0, 1
8137 ret;
8138
8139 .align 8
8140 @@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
8141 popq %rbx;
8142 popq %rbp;
8143
8144 + pax_force_retaddr 0, 1
8145 ret;
8146
8147 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8148 index 6214a9b..1f4fc9a 100644
8149 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
8150 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8151 @@ -1,3 +1,5 @@
8152 +#include <asm/alternative-asm.h>
8153 +
8154 # enter ECRYPT_encrypt_bytes
8155 .text
8156 .p2align 5
8157 @@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
8158 add %r11,%rsp
8159 mov %rdi,%rax
8160 mov %rsi,%rdx
8161 + pax_force_retaddr 0, 1
8162 ret
8163 # bytesatleast65:
8164 ._bytesatleast65:
8165 @@ -891,6 +894,7 @@ ECRYPT_keysetup:
8166 add %r11,%rsp
8167 mov %rdi,%rax
8168 mov %rsi,%rdx
8169 + pax_force_retaddr
8170 ret
8171 # enter ECRYPT_ivsetup
8172 .text
8173 @@ -917,4 +921,5 @@ ECRYPT_ivsetup:
8174 add %r11,%rsp
8175 mov %rdi,%rax
8176 mov %rsi,%rdx
8177 + pax_force_retaddr
8178 ret
8179 diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8180 index 7f24a15..9cd3ffe 100644
8181 --- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8182 +++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8183 @@ -24,6 +24,8 @@
8184 *
8185 */
8186
8187 +#include <asm/alternative-asm.h>
8188 +
8189 .file "serpent-sse2-x86_64-asm_64.S"
8190 .text
8191
8192 @@ -695,12 +697,14 @@ __serpent_enc_blk_8way:
8193 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8194 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8195
8196 + pax_force_retaddr
8197 ret;
8198
8199 __enc_xor8:
8200 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8201 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8202
8203 + pax_force_retaddr
8204 ret;
8205
8206 .align 8
8207 @@ -758,4 +762,5 @@ serpent_dec_blk_8way:
8208 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
8209 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
8210
8211 + pax_force_retaddr
8212 ret;
8213 diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
8214 index b2c2f57..8470cab 100644
8215 --- a/arch/x86/crypto/sha1_ssse3_asm.S
8216 +++ b/arch/x86/crypto/sha1_ssse3_asm.S
8217 @@ -28,6 +28,8 @@
8218 * (at your option) any later version.
8219 */
8220
8221 +#include <asm/alternative-asm.h>
8222 +
8223 #define CTX %rdi // arg1
8224 #define BUF %rsi // arg2
8225 #define CNT %rdx // arg3
8226 @@ -104,6 +106,7 @@
8227 pop %r12
8228 pop %rbp
8229 pop %rbx
8230 + pax_force_retaddr 0, 1
8231 ret
8232
8233 .size \name, .-\name
8234 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8235 index 5b012a2..36d5364 100644
8236 --- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8237 +++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8238 @@ -20,6 +20,8 @@
8239 *
8240 */
8241
8242 +#include <asm/alternative-asm.h>
8243 +
8244 .file "twofish-x86_64-asm-3way.S"
8245 .text
8246
8247 @@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
8248 popq %r13;
8249 popq %r14;
8250 popq %r15;
8251 + pax_force_retaddr 0, 1
8252 ret;
8253
8254 __enc_xor3:
8255 @@ -271,6 +274,7 @@ __enc_xor3:
8256 popq %r13;
8257 popq %r14;
8258 popq %r15;
8259 + pax_force_retaddr 0, 1
8260 ret;
8261
8262 .global twofish_dec_blk_3way
8263 @@ -312,5 +316,6 @@ twofish_dec_blk_3way:
8264 popq %r13;
8265 popq %r14;
8266 popq %r15;
8267 + pax_force_retaddr 0, 1
8268 ret;
8269
8270 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
8271 index 7bcf3fc..f53832f 100644
8272 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
8273 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
8274 @@ -21,6 +21,7 @@
8275 .text
8276
8277 #include <asm/asm-offsets.h>
8278 +#include <asm/alternative-asm.h>
8279
8280 #define a_offset 0
8281 #define b_offset 4
8282 @@ -268,6 +269,7 @@ twofish_enc_blk:
8283
8284 popq R1
8285 movq $1,%rax
8286 + pax_force_retaddr 0, 1
8287 ret
8288
8289 twofish_dec_blk:
8290 @@ -319,4 +321,5 @@ twofish_dec_blk:
8291
8292 popq R1
8293 movq $1,%rax
8294 + pax_force_retaddr 0, 1
8295 ret
8296 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
8297 index 39e4909..887aa7e 100644
8298 --- a/arch/x86/ia32/ia32_aout.c
8299 +++ b/arch/x86/ia32/ia32_aout.c
8300 @@ -162,6 +162,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
8301 unsigned long dump_start, dump_size;
8302 struct user32 dump;
8303
8304 + memset(&dump, 0, sizeof(dump));
8305 +
8306 fs = get_fs();
8307 set_fs(KERNEL_DS);
8308 has_dumped = 1;
8309 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
8310 index 6557769..ef6ae89 100644
8311 --- a/arch/x86/ia32/ia32_signal.c
8312 +++ b/arch/x86/ia32/ia32_signal.c
8313 @@ -169,7 +169,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
8314 }
8315 seg = get_fs();
8316 set_fs(KERNEL_DS);
8317 - ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
8318 + ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
8319 set_fs(seg);
8320 if (ret >= 0 && uoss_ptr) {
8321 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
8322 @@ -370,7 +370,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
8323 */
8324 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8325 size_t frame_size,
8326 - void **fpstate)
8327 + void __user **fpstate)
8328 {
8329 unsigned long sp;
8330
8331 @@ -391,7 +391,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8332
8333 if (used_math()) {
8334 sp = sp - sig_xstate_ia32_size;
8335 - *fpstate = (struct _fpstate_ia32 *) sp;
8336 + *fpstate = (struct _fpstate_ia32 __user *) sp;
8337 if (save_i387_xstate_ia32(*fpstate) < 0)
8338 return (void __user *) -1L;
8339 }
8340 @@ -399,7 +399,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8341 sp -= frame_size;
8342 /* Align the stack pointer according to the i386 ABI,
8343 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
8344 - sp = ((sp + 4) & -16ul) - 4;
8345 + sp = ((sp - 12) & -16ul) - 4;
8346 return (void __user *) sp;
8347 }
8348
8349 @@ -457,7 +457,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
8350 * These are actually not used anymore, but left because some
8351 * gdb versions depend on them as a marker.
8352 */
8353 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8354 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8355 } put_user_catch(err);
8356
8357 if (err)
8358 @@ -499,7 +499,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8359 0xb8,
8360 __NR_ia32_rt_sigreturn,
8361 0x80cd,
8362 - 0,
8363 + 0
8364 };
8365
8366 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
8367 @@ -529,16 +529,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8368
8369 if (ka->sa.sa_flags & SA_RESTORER)
8370 restorer = ka->sa.sa_restorer;
8371 + else if (current->mm->context.vdso)
8372 + /* Return stub is in 32bit vsyscall page */
8373 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
8374 else
8375 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
8376 - rt_sigreturn);
8377 + restorer = &frame->retcode;
8378 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
8379
8380 /*
8381 * Not actually used anymore, but left because some gdb
8382 * versions need it.
8383 */
8384 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8385 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8386 } put_user_catch(err);
8387
8388 if (err)
8389 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
8390 index e3e7340..05ed805 100644
8391 --- a/arch/x86/ia32/ia32entry.S
8392 +++ b/arch/x86/ia32/ia32entry.S
8393 @@ -13,8 +13,10 @@
8394 #include <asm/thread_info.h>
8395 #include <asm/segment.h>
8396 #include <asm/irqflags.h>
8397 +#include <asm/pgtable.h>
8398 #include <linux/linkage.h>
8399 #include <linux/err.h>
8400 +#include <asm/alternative-asm.h>
8401
8402 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
8403 #include <linux/elf-em.h>
8404 @@ -94,6 +96,32 @@ ENTRY(native_irq_enable_sysexit)
8405 ENDPROC(native_irq_enable_sysexit)
8406 #endif
8407
8408 + .macro pax_enter_kernel_user
8409 + pax_set_fptr_mask
8410 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8411 + call pax_enter_kernel_user
8412 +#endif
8413 + .endm
8414 +
8415 + .macro pax_exit_kernel_user
8416 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8417 + call pax_exit_kernel_user
8418 +#endif
8419 +#ifdef CONFIG_PAX_RANDKSTACK
8420 + pushq %rax
8421 + pushq %r11
8422 + call pax_randomize_kstack
8423 + popq %r11
8424 + popq %rax
8425 +#endif
8426 + .endm
8427 +
8428 +.macro pax_erase_kstack
8429 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
8430 + call pax_erase_kstack
8431 +#endif
8432 +.endm
8433 +
8434 /*
8435 * 32bit SYSENTER instruction entry.
8436 *
8437 @@ -120,12 +148,6 @@ ENTRY(ia32_sysenter_target)
8438 CFI_REGISTER rsp,rbp
8439 SWAPGS_UNSAFE_STACK
8440 movq PER_CPU_VAR(kernel_stack), %rsp
8441 - addq $(KERNEL_STACK_OFFSET),%rsp
8442 - /*
8443 - * No need to follow this irqs on/off section: the syscall
8444 - * disabled irqs, here we enable it straight after entry:
8445 - */
8446 - ENABLE_INTERRUPTS(CLBR_NONE)
8447 movl %ebp,%ebp /* zero extension */
8448 pushq_cfi $__USER32_DS
8449 /*CFI_REL_OFFSET ss,0*/
8450 @@ -133,24 +155,39 @@ ENTRY(ia32_sysenter_target)
8451 CFI_REL_OFFSET rsp,0
8452 pushfq_cfi
8453 /*CFI_REL_OFFSET rflags,0*/
8454 - movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
8455 - CFI_REGISTER rip,r10
8456 + orl $X86_EFLAGS_IF,(%rsp)
8457 + GET_THREAD_INFO(%r11)
8458 + movl TI_sysenter_return(%r11), %r11d
8459 + CFI_REGISTER rip,r11
8460 pushq_cfi $__USER32_CS
8461 /*CFI_REL_OFFSET cs,0*/
8462 movl %eax, %eax
8463 - pushq_cfi %r10
8464 + pushq_cfi %r11
8465 CFI_REL_OFFSET rip,0
8466 pushq_cfi %rax
8467 cld
8468 SAVE_ARGS 0,1,0
8469 + pax_enter_kernel_user
8470 + /*
8471 + * No need to follow this irqs on/off section: the syscall
8472 + * disabled irqs, here we enable it straight after entry:
8473 + */
8474 + ENABLE_INTERRUPTS(CLBR_NONE)
8475 /* no need to do an access_ok check here because rbp has been
8476 32bit zero extended */
8477 +
8478 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8479 + mov $PAX_USER_SHADOW_BASE,%r11
8480 + add %r11,%rbp
8481 +#endif
8482 +
8483 1: movl (%rbp),%ebp
8484 .section __ex_table,"a"
8485 .quad 1b,ia32_badarg
8486 .previous
8487 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8488 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8489 + GET_THREAD_INFO(%r11)
8490 + orl $TS_COMPAT,TI_status(%r11)
8491 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8492 CFI_REMEMBER_STATE
8493 jnz sysenter_tracesys
8494 cmpq $(IA32_NR_syscalls-1),%rax
8495 @@ -160,12 +197,15 @@ sysenter_do_call:
8496 sysenter_dispatch:
8497 call *ia32_sys_call_table(,%rax,8)
8498 movq %rax,RAX-ARGOFFSET(%rsp)
8499 + GET_THREAD_INFO(%r11)
8500 DISABLE_INTERRUPTS(CLBR_NONE)
8501 TRACE_IRQS_OFF
8502 - testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8503 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8504 jnz sysexit_audit
8505 sysexit_from_sys_call:
8506 - andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8507 + pax_exit_kernel_user
8508 + pax_erase_kstack
8509 + andl $~TS_COMPAT,TI_status(%r11)
8510 /* clear IF, that popfq doesn't enable interrupts early */
8511 andl $~0x200,EFLAGS-R11(%rsp)
8512 movl RIP-R11(%rsp),%edx /* User %eip */
8513 @@ -191,6 +231,9 @@ sysexit_from_sys_call:
8514 movl %eax,%esi /* 2nd arg: syscall number */
8515 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
8516 call __audit_syscall_entry
8517 +
8518 + pax_erase_kstack
8519 +
8520 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
8521 cmpq $(IA32_NR_syscalls-1),%rax
8522 ja ia32_badsys
8523 @@ -202,7 +245,7 @@ sysexit_from_sys_call:
8524 .endm
8525
8526 .macro auditsys_exit exit
8527 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8528 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8529 jnz ia32_ret_from_sys_call
8530 TRACE_IRQS_ON
8531 sti
8532 @@ -213,11 +256,12 @@ sysexit_from_sys_call:
8533 1: setbe %al /* 1 if error, 0 if not */
8534 movzbl %al,%edi /* zero-extend that into %edi */
8535 call __audit_syscall_exit
8536 + GET_THREAD_INFO(%r11)
8537 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
8538 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
8539 cli
8540 TRACE_IRQS_OFF
8541 - testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8542 + testl %edi,TI_flags(%r11)
8543 jz \exit
8544 CLEAR_RREGS -ARGOFFSET
8545 jmp int_with_check
8546 @@ -235,7 +279,7 @@ sysexit_audit:
8547
8548 sysenter_tracesys:
8549 #ifdef CONFIG_AUDITSYSCALL
8550 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8551 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8552 jz sysenter_auditsys
8553 #endif
8554 SAVE_REST
8555 @@ -243,6 +287,9 @@ sysenter_tracesys:
8556 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
8557 movq %rsp,%rdi /* &pt_regs -> arg1 */
8558 call syscall_trace_enter
8559 +
8560 + pax_erase_kstack
8561 +
8562 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8563 RESTORE_REST
8564 cmpq $(IA32_NR_syscalls-1),%rax
8565 @@ -274,19 +321,20 @@ ENDPROC(ia32_sysenter_target)
8566 ENTRY(ia32_cstar_target)
8567 CFI_STARTPROC32 simple
8568 CFI_SIGNAL_FRAME
8569 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8570 + CFI_DEF_CFA rsp,0
8571 CFI_REGISTER rip,rcx
8572 /*CFI_REGISTER rflags,r11*/
8573 SWAPGS_UNSAFE_STACK
8574 movl %esp,%r8d
8575 CFI_REGISTER rsp,r8
8576 movq PER_CPU_VAR(kernel_stack),%rsp
8577 + SAVE_ARGS 8*6,0,0
8578 + pax_enter_kernel_user
8579 /*
8580 * No need to follow this irqs on/off section: the syscall
8581 * disabled irqs and here we enable it straight after entry:
8582 */
8583 ENABLE_INTERRUPTS(CLBR_NONE)
8584 - SAVE_ARGS 8,0,0
8585 movl %eax,%eax /* zero extension */
8586 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8587 movq %rcx,RIP-ARGOFFSET(%rsp)
8588 @@ -302,12 +350,19 @@ ENTRY(ia32_cstar_target)
8589 /* no need to do an access_ok check here because r8 has been
8590 32bit zero extended */
8591 /* hardware stack frame is complete now */
8592 +
8593 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8594 + mov $PAX_USER_SHADOW_BASE,%r11
8595 + add %r11,%r8
8596 +#endif
8597 +
8598 1: movl (%r8),%r9d
8599 .section __ex_table,"a"
8600 .quad 1b,ia32_badarg
8601 .previous
8602 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8603 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8604 + GET_THREAD_INFO(%r11)
8605 + orl $TS_COMPAT,TI_status(%r11)
8606 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8607 CFI_REMEMBER_STATE
8608 jnz cstar_tracesys
8609 cmpq $IA32_NR_syscalls-1,%rax
8610 @@ -317,12 +372,15 @@ cstar_do_call:
8611 cstar_dispatch:
8612 call *ia32_sys_call_table(,%rax,8)
8613 movq %rax,RAX-ARGOFFSET(%rsp)
8614 + GET_THREAD_INFO(%r11)
8615 DISABLE_INTERRUPTS(CLBR_NONE)
8616 TRACE_IRQS_OFF
8617 - testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8618 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8619 jnz sysretl_audit
8620 sysretl_from_sys_call:
8621 - andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8622 + pax_exit_kernel_user
8623 + pax_erase_kstack
8624 + andl $~TS_COMPAT,TI_status(%r11)
8625 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
8626 movl RIP-ARGOFFSET(%rsp),%ecx
8627 CFI_REGISTER rip,rcx
8628 @@ -350,7 +408,7 @@ sysretl_audit:
8629
8630 cstar_tracesys:
8631 #ifdef CONFIG_AUDITSYSCALL
8632 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8633 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8634 jz cstar_auditsys
8635 #endif
8636 xchgl %r9d,%ebp
8637 @@ -359,6 +417,9 @@ cstar_tracesys:
8638 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8639 movq %rsp,%rdi /* &pt_regs -> arg1 */
8640 call syscall_trace_enter
8641 +
8642 + pax_erase_kstack
8643 +
8644 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
8645 RESTORE_REST
8646 xchgl %ebp,%r9d
8647 @@ -404,19 +465,21 @@ ENTRY(ia32_syscall)
8648 CFI_REL_OFFSET rip,RIP-RIP
8649 PARAVIRT_ADJUST_EXCEPTION_FRAME
8650 SWAPGS
8651 - /*
8652 - * No need to follow this irqs on/off section: the syscall
8653 - * disabled irqs and here we enable it straight after entry:
8654 - */
8655 - ENABLE_INTERRUPTS(CLBR_NONE)
8656 movl %eax,%eax
8657 pushq_cfi %rax
8658 cld
8659 /* note the registers are not zero extended to the sf.
8660 this could be a problem. */
8661 SAVE_ARGS 0,1,0
8662 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8663 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8664 + pax_enter_kernel_user
8665 + /*
8666 + * No need to follow this irqs on/off section: the syscall
8667 + * disabled irqs and here we enable it straight after entry:
8668 + */
8669 + ENABLE_INTERRUPTS(CLBR_NONE)
8670 + GET_THREAD_INFO(%r11)
8671 + orl $TS_COMPAT,TI_status(%r11)
8672 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8673 jnz ia32_tracesys
8674 cmpq $(IA32_NR_syscalls-1),%rax
8675 ja ia32_badsys
8676 @@ -435,6 +498,9 @@ ia32_tracesys:
8677 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8678 movq %rsp,%rdi /* &pt_regs -> arg1 */
8679 call syscall_trace_enter
8680 +
8681 + pax_erase_kstack
8682 +
8683 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8684 RESTORE_REST
8685 cmpq $(IA32_NR_syscalls-1),%rax
8686 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
8687 index f6f5c53..b358b28 100644
8688 --- a/arch/x86/ia32/sys_ia32.c
8689 +++ b/arch/x86/ia32/sys_ia32.c
8690 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
8691 */
8692 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
8693 {
8694 - typeof(ubuf->st_uid) uid = 0;
8695 - typeof(ubuf->st_gid) gid = 0;
8696 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
8697 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
8698 SET_UID(uid, stat->uid);
8699 SET_GID(gid, stat->gid);
8700 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
8701 @@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
8702 }
8703 set_fs(KERNEL_DS);
8704 ret = sys_rt_sigprocmask(how,
8705 - set ? (sigset_t __user *)&s : NULL,
8706 - oset ? (sigset_t __user *)&s : NULL,
8707 + set ? (sigset_t __force_user *)&s : NULL,
8708 + oset ? (sigset_t __force_user *)&s : NULL,
8709 sigsetsize);
8710 set_fs(old_fs);
8711 if (ret)
8712 @@ -332,7 +332,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
8713 return alarm_setitimer(seconds);
8714 }
8715
8716 -asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
8717 +asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
8718 int options)
8719 {
8720 return compat_sys_wait4(pid, stat_addr, options, NULL);
8721 @@ -353,7 +353,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
8722 mm_segment_t old_fs = get_fs();
8723
8724 set_fs(KERNEL_DS);
8725 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
8726 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
8727 set_fs(old_fs);
8728 if (put_compat_timespec(&t, interval))
8729 return -EFAULT;
8730 @@ -369,7 +369,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
8731 mm_segment_t old_fs = get_fs();
8732
8733 set_fs(KERNEL_DS);
8734 - ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
8735 + ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
8736 set_fs(old_fs);
8737 if (!ret) {
8738 switch (_NSIG_WORDS) {
8739 @@ -394,7 +394,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
8740 if (copy_siginfo_from_user32(&info, uinfo))
8741 return -EFAULT;
8742 set_fs(KERNEL_DS);
8743 - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
8744 + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
8745 set_fs(old_fs);
8746 return ret;
8747 }
8748 @@ -439,7 +439,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
8749 return -EFAULT;
8750
8751 set_fs(KERNEL_DS);
8752 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
8753 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
8754 count);
8755 set_fs(old_fs);
8756
8757 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
8758 index 952bd01..7692c6f 100644
8759 --- a/arch/x86/include/asm/alternative-asm.h
8760 +++ b/arch/x86/include/asm/alternative-asm.h
8761 @@ -15,6 +15,45 @@
8762 .endm
8763 #endif
8764
8765 +#ifdef KERNEXEC_PLUGIN
8766 + .macro pax_force_retaddr_bts rip=0
8767 + btsq $63,\rip(%rsp)
8768 + .endm
8769 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
8770 + .macro pax_force_retaddr rip=0, reload=0
8771 + btsq $63,\rip(%rsp)
8772 + .endm
8773 + .macro pax_force_fptr ptr
8774 + btsq $63,\ptr
8775 + .endm
8776 + .macro pax_set_fptr_mask
8777 + .endm
8778 +#endif
8779 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
8780 + .macro pax_force_retaddr rip=0, reload=0
8781 + .if \reload
8782 + pax_set_fptr_mask
8783 + .endif
8784 + orq %r10,\rip(%rsp)
8785 + .endm
8786 + .macro pax_force_fptr ptr
8787 + orq %r10,\ptr
8788 + .endm
8789 + .macro pax_set_fptr_mask
8790 + movabs $0x8000000000000000,%r10
8791 + .endm
8792 +#endif
8793 +#else
8794 + .macro pax_force_retaddr rip=0, reload=0
8795 + .endm
8796 + .macro pax_force_fptr ptr
8797 + .endm
8798 + .macro pax_force_retaddr_bts rip=0
8799 + .endm
8800 + .macro pax_set_fptr_mask
8801 + .endm
8802 +#endif
8803 +
8804 .macro altinstruction_entry orig alt feature orig_len alt_len
8805 .long \orig - .
8806 .long \alt - .
8807 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
8808 index 37ad100..7d47faa 100644
8809 --- a/arch/x86/include/asm/alternative.h
8810 +++ b/arch/x86/include/asm/alternative.h
8811 @@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
8812 ".section .discard,\"aw\",@progbits\n" \
8813 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
8814 ".previous\n" \
8815 - ".section .altinstr_replacement, \"ax\"\n" \
8816 + ".section .altinstr_replacement, \"a\"\n" \
8817 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
8818 ".previous"
8819
8820 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
8821 index 3ab9bdd..238033e 100644
8822 --- a/arch/x86/include/asm/apic.h
8823 +++ b/arch/x86/include/asm/apic.h
8824 @@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
8825
8826 #ifdef CONFIG_X86_LOCAL_APIC
8827
8828 -extern unsigned int apic_verbosity;
8829 +extern int apic_verbosity;
8830 extern int local_apic_timer_c2_ok;
8831
8832 extern int disable_apic;
8833 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
8834 index 20370c6..a2eb9b0 100644
8835 --- a/arch/x86/include/asm/apm.h
8836 +++ b/arch/x86/include/asm/apm.h
8837 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
8838 __asm__ __volatile__(APM_DO_ZERO_SEGS
8839 "pushl %%edi\n\t"
8840 "pushl %%ebp\n\t"
8841 - "lcall *%%cs:apm_bios_entry\n\t"
8842 + "lcall *%%ss:apm_bios_entry\n\t"
8843 "setc %%al\n\t"
8844 "popl %%ebp\n\t"
8845 "popl %%edi\n\t"
8846 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
8847 __asm__ __volatile__(APM_DO_ZERO_SEGS
8848 "pushl %%edi\n\t"
8849 "pushl %%ebp\n\t"
8850 - "lcall *%%cs:apm_bios_entry\n\t"
8851 + "lcall *%%ss:apm_bios_entry\n\t"
8852 "setc %%bl\n\t"
8853 "popl %%ebp\n\t"
8854 "popl %%edi\n\t"
8855 diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
8856 index 58cb6d4..ca9010d 100644
8857 --- a/arch/x86/include/asm/atomic.h
8858 +++ b/arch/x86/include/asm/atomic.h
8859 @@ -22,7 +22,18 @@
8860 */
8861 static inline int atomic_read(const atomic_t *v)
8862 {
8863 - return (*(volatile int *)&(v)->counter);
8864 + return (*(volatile const int *)&(v)->counter);
8865 +}
8866 +
8867 +/**
8868 + * atomic_read_unchecked - read atomic variable
8869 + * @v: pointer of type atomic_unchecked_t
8870 + *
8871 + * Atomically reads the value of @v.
8872 + */
8873 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8874 +{
8875 + return (*(volatile const int *)&(v)->counter);
8876 }
8877
8878 /**
8879 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
8880 }
8881
8882 /**
8883 + * atomic_set_unchecked - set atomic variable
8884 + * @v: pointer of type atomic_unchecked_t
8885 + * @i: required value
8886 + *
8887 + * Atomically sets the value of @v to @i.
8888 + */
8889 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8890 +{
8891 + v->counter = i;
8892 +}
8893 +
8894 +/**
8895 * atomic_add - add integer to atomic variable
8896 * @i: integer value to add
8897 * @v: pointer of type atomic_t
8898 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
8899 */
8900 static inline void atomic_add(int i, atomic_t *v)
8901 {
8902 - asm volatile(LOCK_PREFIX "addl %1,%0"
8903 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
8904 +
8905 +#ifdef CONFIG_PAX_REFCOUNT
8906 + "jno 0f\n"
8907 + LOCK_PREFIX "subl %1,%0\n"
8908 + "int $4\n0:\n"
8909 + _ASM_EXTABLE(0b, 0b)
8910 +#endif
8911 +
8912 + : "+m" (v->counter)
8913 + : "ir" (i));
8914 +}
8915 +
8916 +/**
8917 + * atomic_add_unchecked - add integer to atomic variable
8918 + * @i: integer value to add
8919 + * @v: pointer of type atomic_unchecked_t
8920 + *
8921 + * Atomically adds @i to @v.
8922 + */
8923 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
8924 +{
8925 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
8926 : "+m" (v->counter)
8927 : "ir" (i));
8928 }
8929 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
8930 */
8931 static inline void atomic_sub(int i, atomic_t *v)
8932 {
8933 - asm volatile(LOCK_PREFIX "subl %1,%0"
8934 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
8935 +
8936 +#ifdef CONFIG_PAX_REFCOUNT
8937 + "jno 0f\n"
8938 + LOCK_PREFIX "addl %1,%0\n"
8939 + "int $4\n0:\n"
8940 + _ASM_EXTABLE(0b, 0b)
8941 +#endif
8942 +
8943 + : "+m" (v->counter)
8944 + : "ir" (i));
8945 +}
8946 +
8947 +/**
8948 + * atomic_sub_unchecked - subtract integer from atomic variable
8949 + * @i: integer value to subtract
8950 + * @v: pointer of type atomic_unchecked_t
8951 + *
8952 + * Atomically subtracts @i from @v.
8953 + */
8954 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
8955 +{
8956 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
8957 : "+m" (v->counter)
8958 : "ir" (i));
8959 }
8960 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8961 {
8962 unsigned char c;
8963
8964 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
8965 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
8966 +
8967 +#ifdef CONFIG_PAX_REFCOUNT
8968 + "jno 0f\n"
8969 + LOCK_PREFIX "addl %2,%0\n"
8970 + "int $4\n0:\n"
8971 + _ASM_EXTABLE(0b, 0b)
8972 +#endif
8973 +
8974 + "sete %1\n"
8975 : "+m" (v->counter), "=qm" (c)
8976 : "ir" (i) : "memory");
8977 return c;
8978 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8979 */
8980 static inline void atomic_inc(atomic_t *v)
8981 {
8982 - asm volatile(LOCK_PREFIX "incl %0"
8983 + asm volatile(LOCK_PREFIX "incl %0\n"
8984 +
8985 +#ifdef CONFIG_PAX_REFCOUNT
8986 + "jno 0f\n"
8987 + LOCK_PREFIX "decl %0\n"
8988 + "int $4\n0:\n"
8989 + _ASM_EXTABLE(0b, 0b)
8990 +#endif
8991 +
8992 + : "+m" (v->counter));
8993 +}
8994 +
8995 +/**
8996 + * atomic_inc_unchecked - increment atomic variable
8997 + * @v: pointer of type atomic_unchecked_t
8998 + *
8999 + * Atomically increments @v by 1.
9000 + */
9001 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9002 +{
9003 + asm volatile(LOCK_PREFIX "incl %0\n"
9004 : "+m" (v->counter));
9005 }
9006
9007 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
9008 */
9009 static inline void atomic_dec(atomic_t *v)
9010 {
9011 - asm volatile(LOCK_PREFIX "decl %0"
9012 + asm volatile(LOCK_PREFIX "decl %0\n"
9013 +
9014 +#ifdef CONFIG_PAX_REFCOUNT
9015 + "jno 0f\n"
9016 + LOCK_PREFIX "incl %0\n"
9017 + "int $4\n0:\n"
9018 + _ASM_EXTABLE(0b, 0b)
9019 +#endif
9020 +
9021 + : "+m" (v->counter));
9022 +}
9023 +
9024 +/**
9025 + * atomic_dec_unchecked - decrement atomic variable
9026 + * @v: pointer of type atomic_unchecked_t
9027 + *
9028 + * Atomically decrements @v by 1.
9029 + */
9030 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9031 +{
9032 + asm volatile(LOCK_PREFIX "decl %0\n"
9033 : "+m" (v->counter));
9034 }
9035
9036 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
9037 {
9038 unsigned char c;
9039
9040 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
9041 + asm volatile(LOCK_PREFIX "decl %0\n"
9042 +
9043 +#ifdef CONFIG_PAX_REFCOUNT
9044 + "jno 0f\n"
9045 + LOCK_PREFIX "incl %0\n"
9046 + "int $4\n0:\n"
9047 + _ASM_EXTABLE(0b, 0b)
9048 +#endif
9049 +
9050 + "sete %1\n"
9051 : "+m" (v->counter), "=qm" (c)
9052 : : "memory");
9053 return c != 0;
9054 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
9055 {
9056 unsigned char c;
9057
9058 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
9059 + asm volatile(LOCK_PREFIX "incl %0\n"
9060 +
9061 +#ifdef CONFIG_PAX_REFCOUNT
9062 + "jno 0f\n"
9063 + LOCK_PREFIX "decl %0\n"
9064 + "int $4\n0:\n"
9065 + _ASM_EXTABLE(0b, 0b)
9066 +#endif
9067 +
9068 + "sete %1\n"
9069 + : "+m" (v->counter), "=qm" (c)
9070 + : : "memory");
9071 + return c != 0;
9072 +}
9073 +
9074 +/**
9075 + * atomic_inc_and_test_unchecked - increment and test
9076 + * @v: pointer of type atomic_unchecked_t
9077 + *
9078 + * Atomically increments @v by 1
9079 + * and returns true if the result is zero, or false for all
9080 + * other cases.
9081 + */
9082 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9083 +{
9084 + unsigned char c;
9085 +
9086 + asm volatile(LOCK_PREFIX "incl %0\n"
9087 + "sete %1\n"
9088 : "+m" (v->counter), "=qm" (c)
9089 : : "memory");
9090 return c != 0;
9091 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9092 {
9093 unsigned char c;
9094
9095 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9096 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
9097 +
9098 +#ifdef CONFIG_PAX_REFCOUNT
9099 + "jno 0f\n"
9100 + LOCK_PREFIX "subl %2,%0\n"
9101 + "int $4\n0:\n"
9102 + _ASM_EXTABLE(0b, 0b)
9103 +#endif
9104 +
9105 + "sets %1\n"
9106 : "+m" (v->counter), "=qm" (c)
9107 : "ir" (i) : "memory");
9108 return c;
9109 @@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
9110 goto no_xadd;
9111 #endif
9112 /* Modern 486+ processor */
9113 - return i + xadd(&v->counter, i);
9114 + return i + xadd_check_overflow(&v->counter, i);
9115
9116 #ifdef CONFIG_M386
9117 no_xadd: /* Legacy 386 processor */
9118 @@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
9119 }
9120
9121 /**
9122 + * atomic_add_return_unchecked - add integer and return
9123 + * @i: integer value to add
9124 + * @v: pointer of type atomic_unchecked_t
9125 + *
9126 + * Atomically adds @i to @v and returns @i + @v
9127 + */
9128 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9129 +{
9130 +#ifdef CONFIG_M386
9131 + int __i;
9132 + unsigned long flags;
9133 + if (unlikely(boot_cpu_data.x86 <= 3))
9134 + goto no_xadd;
9135 +#endif
9136 + /* Modern 486+ processor */
9137 + return i + xadd(&v->counter, i);
9138 +
9139 +#ifdef CONFIG_M386
9140 +no_xadd: /* Legacy 386 processor */
9141 + raw_local_irq_save(flags);
9142 + __i = atomic_read_unchecked(v);
9143 + atomic_set_unchecked(v, i + __i);
9144 + raw_local_irq_restore(flags);
9145 + return i + __i;
9146 +#endif
9147 +}
9148 +
9149 +/**
9150 * atomic_sub_return - subtract integer and return
9151 * @v: pointer of type atomic_t
9152 * @i: integer value to subtract
9153 @@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
9154 }
9155
9156 #define atomic_inc_return(v) (atomic_add_return(1, v))
9157 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9158 +{
9159 + return atomic_add_return_unchecked(1, v);
9160 +}
9161 #define atomic_dec_return(v) (atomic_sub_return(1, v))
9162
9163 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
9164 @@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
9165 return cmpxchg(&v->counter, old, new);
9166 }
9167
9168 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9169 +{
9170 + return cmpxchg(&v->counter, old, new);
9171 +}
9172 +
9173 static inline int atomic_xchg(atomic_t *v, int new)
9174 {
9175 return xchg(&v->counter, new);
9176 }
9177
9178 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9179 +{
9180 + return xchg(&v->counter, new);
9181 +}
9182 +
9183 /**
9184 * __atomic_add_unless - add unless the number is already a given value
9185 * @v: pointer of type atomic_t
9186 @@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
9187 */
9188 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9189 {
9190 - int c, old;
9191 + int c, old, new;
9192 c = atomic_read(v);
9193 for (;;) {
9194 - if (unlikely(c == (u)))
9195 + if (unlikely(c == u))
9196 break;
9197 - old = atomic_cmpxchg((v), c, c + (a));
9198 +
9199 + asm volatile("addl %2,%0\n"
9200 +
9201 +#ifdef CONFIG_PAX_REFCOUNT
9202 + "jno 0f\n"
9203 + "subl %2,%0\n"
9204 + "int $4\n0:\n"
9205 + _ASM_EXTABLE(0b, 0b)
9206 +#endif
9207 +
9208 + : "=r" (new)
9209 + : "0" (c), "ir" (a));
9210 +
9211 + old = atomic_cmpxchg(v, c, new);
9212 if (likely(old == c))
9213 break;
9214 c = old;
9215 @@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9216 return c;
9217 }
9218
9219 +/**
9220 + * atomic_inc_not_zero_hint - increment if not null
9221 + * @v: pointer of type atomic_t
9222 + * @hint: probable value of the atomic before the increment
9223 + *
9224 + * This version of atomic_inc_not_zero() gives a hint of probable
9225 + * value of the atomic. This helps processor to not read the memory
9226 + * before doing the atomic read/modify/write cycle, lowering
9227 + * number of bus transactions on some arches.
9228 + *
9229 + * Returns: 0 if increment was not done, 1 otherwise.
9230 + */
9231 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
9232 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
9233 +{
9234 + int val, c = hint, new;
9235 +
9236 + /* sanity test, should be removed by compiler if hint is a constant */
9237 + if (!hint)
9238 + return __atomic_add_unless(v, 1, 0);
9239 +
9240 + do {
9241 + asm volatile("incl %0\n"
9242 +
9243 +#ifdef CONFIG_PAX_REFCOUNT
9244 + "jno 0f\n"
9245 + "decl %0\n"
9246 + "int $4\n0:\n"
9247 + _ASM_EXTABLE(0b, 0b)
9248 +#endif
9249 +
9250 + : "=r" (new)
9251 + : "0" (c));
9252 +
9253 + val = atomic_cmpxchg(v, c, new);
9254 + if (val == c)
9255 + return 1;
9256 + c = val;
9257 + } while (c);
9258 +
9259 + return 0;
9260 +}
9261
9262 /*
9263 * atomic_dec_if_positive - decrement by 1 if old value positive
9264 diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
9265 index fa13f0e..27c2e08 100644
9266 --- a/arch/x86/include/asm/atomic64_32.h
9267 +++ b/arch/x86/include/asm/atomic64_32.h
9268 @@ -12,6 +12,14 @@ typedef struct {
9269 u64 __aligned(8) counter;
9270 } atomic64_t;
9271
9272 +#ifdef CONFIG_PAX_REFCOUNT
9273 +typedef struct {
9274 + u64 __aligned(8) counter;
9275 +} atomic64_unchecked_t;
9276 +#else
9277 +typedef atomic64_t atomic64_unchecked_t;
9278 +#endif
9279 +
9280 #define ATOMIC64_INIT(val) { (val) }
9281
9282 #ifdef CONFIG_X86_CMPXCHG64
9283 @@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
9284 }
9285
9286 /**
9287 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
9288 + * @p: pointer to type atomic64_unchecked_t
9289 + * @o: expected value
9290 + * @n: new value
9291 + *
9292 + * Atomically sets @v to @n if it was equal to @o and returns
9293 + * the old value.
9294 + */
9295 +
9296 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
9297 +{
9298 + return cmpxchg64(&v->counter, o, n);
9299 +}
9300 +
9301 +/**
9302 * atomic64_xchg - xchg atomic64 variable
9303 * @v: pointer to type atomic64_t
9304 * @n: value to assign
9305 @@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64_t *v, long long i)
9306 }
9307
9308 /**
9309 + * atomic64_set_unchecked - set atomic64 variable
9310 + * @v: pointer to type atomic64_unchecked_t
9311 + * @n: value to assign
9312 + *
9313 + * Atomically sets the value of @v to @n.
9314 + */
9315 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
9316 +{
9317 + unsigned high = (unsigned)(i >> 32);
9318 + unsigned low = (unsigned)i;
9319 + asm volatile(ATOMIC64_ALTERNATIVE(set)
9320 + : "+b" (low), "+c" (high)
9321 + : "S" (v)
9322 + : "eax", "edx", "memory"
9323 + );
9324 +}
9325 +
9326 +/**
9327 * atomic64_read - read atomic64 variable
9328 * @v: pointer to type atomic64_t
9329 *
9330 @@ -93,6 +134,22 @@ static inline long long atomic64_read(const atomic64_t *v)
9331 }
9332
9333 /**
9334 + * atomic64_read_unchecked - read atomic64 variable
9335 + * @v: pointer to type atomic64_unchecked_t
9336 + *
9337 + * Atomically reads the value of @v and returns it.
9338 + */
9339 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
9340 +{
9341 + long long r;
9342 + asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
9343 + : "=A" (r), "+c" (v)
9344 + : : "memory"
9345 + );
9346 + return r;
9347 + }
9348 +
9349 +/**
9350 * atomic64_add_return - add and return
9351 * @i: integer value to add
9352 * @v: pointer to type atomic64_t
9353 @@ -108,6 +165,22 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
9354 return i;
9355 }
9356
9357 +/**
9358 + * atomic64_add_return_unchecked - add and return
9359 + * @i: integer value to add
9360 + * @v: pointer to type atomic64_unchecked_t
9361 + *
9362 + * Atomically adds @i to @v and returns @i + *@v
9363 + */
9364 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
9365 +{
9366 + asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
9367 + : "+A" (i), "+c" (v)
9368 + : : "memory"
9369 + );
9370 + return i;
9371 +}
9372 +
9373 /*
9374 * Other variants with different arithmetic operators:
9375 */
9376 @@ -131,6 +204,17 @@ static inline long long atomic64_inc_return(atomic64_t *v)
9377 return a;
9378 }
9379
9380 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9381 +{
9382 + long long a;
9383 + asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
9384 + : "=A" (a)
9385 + : "S" (v)
9386 + : "memory", "ecx"
9387 + );
9388 + return a;
9389 +}
9390 +
9391 static inline long long atomic64_dec_return(atomic64_t *v)
9392 {
9393 long long a;
9394 @@ -159,6 +243,22 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
9395 }
9396
9397 /**
9398 + * atomic64_add_unchecked - add integer to atomic64 variable
9399 + * @i: integer value to add
9400 + * @v: pointer to type atomic64_unchecked_t
9401 + *
9402 + * Atomically adds @i to @v.
9403 + */
9404 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
9405 +{
9406 + asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
9407 + : "+A" (i), "+c" (v)
9408 + : : "memory"
9409 + );
9410 + return i;
9411 +}
9412 +
9413 +/**
9414 * atomic64_sub - subtract the atomic64 variable
9415 * @i: integer value to subtract
9416 * @v: pointer to type atomic64_t
9417 diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
9418 index 0e1cbfc..5623683 100644
9419 --- a/arch/x86/include/asm/atomic64_64.h
9420 +++ b/arch/x86/include/asm/atomic64_64.h
9421 @@ -18,7 +18,19 @@
9422 */
9423 static inline long atomic64_read(const atomic64_t *v)
9424 {
9425 - return (*(volatile long *)&(v)->counter);
9426 + return (*(volatile const long *)&(v)->counter);
9427 +}
9428 +
9429 +/**
9430 + * atomic64_read_unchecked - read atomic64 variable
9431 + * @v: pointer of type atomic64_unchecked_t
9432 + *
9433 + * Atomically reads the value of @v.
9434 + * Doesn't imply a read memory barrier.
9435 + */
9436 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9437 +{
9438 + return (*(volatile const long *)&(v)->counter);
9439 }
9440
9441 /**
9442 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
9443 }
9444
9445 /**
9446 + * atomic64_set_unchecked - set atomic64 variable
9447 + * @v: pointer to type atomic64_unchecked_t
9448 + * @i: required value
9449 + *
9450 + * Atomically sets the value of @v to @i.
9451 + */
9452 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9453 +{
9454 + v->counter = i;
9455 +}
9456 +
9457 +/**
9458 * atomic64_add - add integer to atomic64 variable
9459 * @i: integer value to add
9460 * @v: pointer to type atomic64_t
9461 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
9462 */
9463 static inline void atomic64_add(long i, atomic64_t *v)
9464 {
9465 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
9466 +
9467 +#ifdef CONFIG_PAX_REFCOUNT
9468 + "jno 0f\n"
9469 + LOCK_PREFIX "subq %1,%0\n"
9470 + "int $4\n0:\n"
9471 + _ASM_EXTABLE(0b, 0b)
9472 +#endif
9473 +
9474 + : "=m" (v->counter)
9475 + : "er" (i), "m" (v->counter));
9476 +}
9477 +
9478 +/**
9479 + * atomic64_add_unchecked - add integer to atomic64 variable
9480 + * @i: integer value to add
9481 + * @v: pointer to type atomic64_unchecked_t
9482 + *
9483 + * Atomically adds @i to @v.
9484 + */
9485 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
9486 +{
9487 asm volatile(LOCK_PREFIX "addq %1,%0"
9488 : "=m" (v->counter)
9489 : "er" (i), "m" (v->counter));
9490 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
9491 */
9492 static inline void atomic64_sub(long i, atomic64_t *v)
9493 {
9494 - asm volatile(LOCK_PREFIX "subq %1,%0"
9495 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
9496 +
9497 +#ifdef CONFIG_PAX_REFCOUNT
9498 + "jno 0f\n"
9499 + LOCK_PREFIX "addq %1,%0\n"
9500 + "int $4\n0:\n"
9501 + _ASM_EXTABLE(0b, 0b)
9502 +#endif
9503 +
9504 + : "=m" (v->counter)
9505 + : "er" (i), "m" (v->counter));
9506 +}
9507 +
9508 +/**
9509 + * atomic64_sub_unchecked - subtract the atomic64 variable
9510 + * @i: integer value to subtract
9511 + * @v: pointer to type atomic64_unchecked_t
9512 + *
9513 + * Atomically subtracts @i from @v.
9514 + */
9515 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
9516 +{
9517 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
9518 : "=m" (v->counter)
9519 : "er" (i), "m" (v->counter));
9520 }
9521 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9522 {
9523 unsigned char c;
9524
9525 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9526 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
9527 +
9528 +#ifdef CONFIG_PAX_REFCOUNT
9529 + "jno 0f\n"
9530 + LOCK_PREFIX "addq %2,%0\n"
9531 + "int $4\n0:\n"
9532 + _ASM_EXTABLE(0b, 0b)
9533 +#endif
9534 +
9535 + "sete %1\n"
9536 : "=m" (v->counter), "=qm" (c)
9537 : "er" (i), "m" (v->counter) : "memory");
9538 return c;
9539 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9540 */
9541 static inline void atomic64_inc(atomic64_t *v)
9542 {
9543 + asm volatile(LOCK_PREFIX "incq %0\n"
9544 +
9545 +#ifdef CONFIG_PAX_REFCOUNT
9546 + "jno 0f\n"
9547 + LOCK_PREFIX "decq %0\n"
9548 + "int $4\n0:\n"
9549 + _ASM_EXTABLE(0b, 0b)
9550 +#endif
9551 +
9552 + : "=m" (v->counter)
9553 + : "m" (v->counter));
9554 +}
9555 +
9556 +/**
9557 + * atomic64_inc_unchecked - increment atomic64 variable
9558 + * @v: pointer to type atomic64_unchecked_t
9559 + *
9560 + * Atomically increments @v by 1.
9561 + */
9562 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9563 +{
9564 asm volatile(LOCK_PREFIX "incq %0"
9565 : "=m" (v->counter)
9566 : "m" (v->counter));
9567 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
9568 */
9569 static inline void atomic64_dec(atomic64_t *v)
9570 {
9571 - asm volatile(LOCK_PREFIX "decq %0"
9572 + asm volatile(LOCK_PREFIX "decq %0\n"
9573 +
9574 +#ifdef CONFIG_PAX_REFCOUNT
9575 + "jno 0f\n"
9576 + LOCK_PREFIX "incq %0\n"
9577 + "int $4\n0:\n"
9578 + _ASM_EXTABLE(0b, 0b)
9579 +#endif
9580 +
9581 + : "=m" (v->counter)
9582 + : "m" (v->counter));
9583 +}
9584 +
9585 +/**
9586 + * atomic64_dec_unchecked - decrement atomic64 variable
9587 + * @v: pointer to type atomic64_t
9588 + *
9589 + * Atomically decrements @v by 1.
9590 + */
9591 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9592 +{
9593 + asm volatile(LOCK_PREFIX "decq %0\n"
9594 : "=m" (v->counter)
9595 : "m" (v->counter));
9596 }
9597 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
9598 {
9599 unsigned char c;
9600
9601 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
9602 + asm volatile(LOCK_PREFIX "decq %0\n"
9603 +
9604 +#ifdef CONFIG_PAX_REFCOUNT
9605 + "jno 0f\n"
9606 + LOCK_PREFIX "incq %0\n"
9607 + "int $4\n0:\n"
9608 + _ASM_EXTABLE(0b, 0b)
9609 +#endif
9610 +
9611 + "sete %1\n"
9612 : "=m" (v->counter), "=qm" (c)
9613 : "m" (v->counter) : "memory");
9614 return c != 0;
9615 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
9616 {
9617 unsigned char c;
9618
9619 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
9620 + asm volatile(LOCK_PREFIX "incq %0\n"
9621 +
9622 +#ifdef CONFIG_PAX_REFCOUNT
9623 + "jno 0f\n"
9624 + LOCK_PREFIX "decq %0\n"
9625 + "int $4\n0:\n"
9626 + _ASM_EXTABLE(0b, 0b)
9627 +#endif
9628 +
9629 + "sete %1\n"
9630 : "=m" (v->counter), "=qm" (c)
9631 : "m" (v->counter) : "memory");
9632 return c != 0;
9633 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9634 {
9635 unsigned char c;
9636
9637 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
9638 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
9639 +
9640 +#ifdef CONFIG_PAX_REFCOUNT
9641 + "jno 0f\n"
9642 + LOCK_PREFIX "subq %2,%0\n"
9643 + "int $4\n0:\n"
9644 + _ASM_EXTABLE(0b, 0b)
9645 +#endif
9646 +
9647 + "sets %1\n"
9648 : "=m" (v->counter), "=qm" (c)
9649 : "er" (i), "m" (v->counter) : "memory");
9650 return c;
9651 @@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9652 */
9653 static inline long atomic64_add_return(long i, atomic64_t *v)
9654 {
9655 + return i + xadd_check_overflow(&v->counter, i);
9656 +}
9657 +
9658 +/**
9659 + * atomic64_add_return_unchecked - add and return
9660 + * @i: integer value to add
9661 + * @v: pointer to type atomic64_unchecked_t
9662 + *
9663 + * Atomically adds @i to @v and returns @i + @v
9664 + */
9665 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
9666 +{
9667 return i + xadd(&v->counter, i);
9668 }
9669
9670 @@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
9671 }
9672
9673 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
9674 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9675 +{
9676 + return atomic64_add_return_unchecked(1, v);
9677 +}
9678 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
9679
9680 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9681 @@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9682 return cmpxchg(&v->counter, old, new);
9683 }
9684
9685 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
9686 +{
9687 + return cmpxchg(&v->counter, old, new);
9688 +}
9689 +
9690 static inline long atomic64_xchg(atomic64_t *v, long new)
9691 {
9692 return xchg(&v->counter, new);
9693 @@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
9694 */
9695 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
9696 {
9697 - long c, old;
9698 + long c, old, new;
9699 c = atomic64_read(v);
9700 for (;;) {
9701 - if (unlikely(c == (u)))
9702 + if (unlikely(c == u))
9703 break;
9704 - old = atomic64_cmpxchg((v), c, c + (a));
9705 +
9706 + asm volatile("add %2,%0\n"
9707 +
9708 +#ifdef CONFIG_PAX_REFCOUNT
9709 + "jno 0f\n"
9710 + "sub %2,%0\n"
9711 + "int $4\n0:\n"
9712 + _ASM_EXTABLE(0b, 0b)
9713 +#endif
9714 +
9715 + : "=r" (new)
9716 + : "0" (c), "ir" (a));
9717 +
9718 + old = atomic64_cmpxchg(v, c, new);
9719 if (likely(old == c))
9720 break;
9721 c = old;
9722 }
9723 - return c != (u);
9724 + return c != u;
9725 }
9726
9727 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9728 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
9729 index b97596e..9bd48b06 100644
9730 --- a/arch/x86/include/asm/bitops.h
9731 +++ b/arch/x86/include/asm/bitops.h
9732 @@ -38,7 +38,7 @@
9733 * a mask operation on a byte.
9734 */
9735 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
9736 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
9737 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
9738 #define CONST_MASK(nr) (1 << ((nr) & 7))
9739
9740 /**
9741 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
9742 index 5e1a2ee..c9f9533 100644
9743 --- a/arch/x86/include/asm/boot.h
9744 +++ b/arch/x86/include/asm/boot.h
9745 @@ -11,10 +11,15 @@
9746 #include <asm/pgtable_types.h>
9747
9748 /* Physical address where kernel should be loaded. */
9749 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9750 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9751 + (CONFIG_PHYSICAL_ALIGN - 1)) \
9752 & ~(CONFIG_PHYSICAL_ALIGN - 1))
9753
9754 +#ifndef __ASSEMBLY__
9755 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
9756 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
9757 +#endif
9758 +
9759 /* Minimum kernel alignment, as a power of two */
9760 #ifdef CONFIG_X86_64
9761 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
9762 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
9763 index 48f99f1..d78ebf9 100644
9764 --- a/arch/x86/include/asm/cache.h
9765 +++ b/arch/x86/include/asm/cache.h
9766 @@ -5,12 +5,13 @@
9767
9768 /* L1 cache line size */
9769 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
9770 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9771 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9772
9773 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
9774 +#define __read_only __attribute__((__section__(".data..read_only")))
9775
9776 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
9777 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
9778 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
9779
9780 #ifdef CONFIG_X86_VSMP
9781 #ifdef CONFIG_SMP
9782 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
9783 index 4e12668..501d239 100644
9784 --- a/arch/x86/include/asm/cacheflush.h
9785 +++ b/arch/x86/include/asm/cacheflush.h
9786 @@ -26,7 +26,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
9787 unsigned long pg_flags = pg->flags & _PGMT_MASK;
9788
9789 if (pg_flags == _PGMT_DEFAULT)
9790 - return -1;
9791 + return ~0UL;
9792 else if (pg_flags == _PGMT_WC)
9793 return _PAGE_CACHE_WC;
9794 else if (pg_flags == _PGMT_UC_MINUS)
9795 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
9796 index 46fc474..b02b0f9 100644
9797 --- a/arch/x86/include/asm/checksum_32.h
9798 +++ b/arch/x86/include/asm/checksum_32.h
9799 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
9800 int len, __wsum sum,
9801 int *src_err_ptr, int *dst_err_ptr);
9802
9803 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
9804 + int len, __wsum sum,
9805 + int *src_err_ptr, int *dst_err_ptr);
9806 +
9807 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
9808 + int len, __wsum sum,
9809 + int *src_err_ptr, int *dst_err_ptr);
9810 +
9811 /*
9812 * Note: when you get a NULL pointer exception here this means someone
9813 * passed in an incorrect kernel address to one of these functions.
9814 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
9815 int *err_ptr)
9816 {
9817 might_sleep();
9818 - return csum_partial_copy_generic((__force void *)src, dst,
9819 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
9820 len, sum, err_ptr, NULL);
9821 }
9822
9823 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
9824 {
9825 might_sleep();
9826 if (access_ok(VERIFY_WRITE, dst, len))
9827 - return csum_partial_copy_generic(src, (__force void *)dst,
9828 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
9829 len, sum, NULL, err_ptr);
9830
9831 if (len)
9832 diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
9833 index 99480e5..d81165b 100644
9834 --- a/arch/x86/include/asm/cmpxchg.h
9835 +++ b/arch/x86/include/asm/cmpxchg.h
9836 @@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
9837 __compiletime_error("Bad argument size for cmpxchg");
9838 extern void __xadd_wrong_size(void)
9839 __compiletime_error("Bad argument size for xadd");
9840 +extern void __xadd_check_overflow_wrong_size(void)
9841 + __compiletime_error("Bad argument size for xadd_check_overflow");
9842 extern void __add_wrong_size(void)
9843 __compiletime_error("Bad argument size for add");
9844 +extern void __add_check_overflow_wrong_size(void)
9845 + __compiletime_error("Bad argument size for add_check_overflow");
9846
9847 /*
9848 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
9849 @@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
9850 __ret; \
9851 })
9852
9853 +#define __xchg_op_check_overflow(ptr, arg, op, lock) \
9854 + ({ \
9855 + __typeof__ (*(ptr)) __ret = (arg); \
9856 + switch (sizeof(*(ptr))) { \
9857 + case __X86_CASE_L: \
9858 + asm volatile (lock #op "l %0, %1\n" \
9859 + "jno 0f\n" \
9860 + "mov %0,%1\n" \
9861 + "int $4\n0:\n" \
9862 + _ASM_EXTABLE(0b, 0b) \
9863 + : "+r" (__ret), "+m" (*(ptr)) \
9864 + : : "memory", "cc"); \
9865 + break; \
9866 + case __X86_CASE_Q: \
9867 + asm volatile (lock #op "q %q0, %1\n" \
9868 + "jno 0f\n" \
9869 + "mov %0,%1\n" \
9870 + "int $4\n0:\n" \
9871 + _ASM_EXTABLE(0b, 0b) \
9872 + : "+r" (__ret), "+m" (*(ptr)) \
9873 + : : "memory", "cc"); \
9874 + break; \
9875 + default: \
9876 + __ ## op ## _check_overflow_wrong_size(); \
9877 + } \
9878 + __ret; \
9879 + })
9880 +
9881 /*
9882 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
9883 * Since this is generally used to protect other memory information, we
9884 @@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
9885 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
9886 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
9887
9888 +#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
9889 +#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
9890 +
9891 #define __add(ptr, inc, lock) \
9892 ({ \
9893 __typeof__ (*(ptr)) __ret = (inc); \
9894 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
9895 index 8d67d42..183d0eb 100644
9896 --- a/arch/x86/include/asm/cpufeature.h
9897 +++ b/arch/x86/include/asm/cpufeature.h
9898 @@ -367,7 +367,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
9899 ".section .discard,\"aw\",@progbits\n"
9900 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
9901 ".previous\n"
9902 - ".section .altinstr_replacement,\"ax\"\n"
9903 + ".section .altinstr_replacement,\"a\"\n"
9904 "3: movb $1,%0\n"
9905 "4:\n"
9906 ".previous\n"
9907 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
9908 index e95822d..a90010e 100644
9909 --- a/arch/x86/include/asm/desc.h
9910 +++ b/arch/x86/include/asm/desc.h
9911 @@ -4,6 +4,7 @@
9912 #include <asm/desc_defs.h>
9913 #include <asm/ldt.h>
9914 #include <asm/mmu.h>
9915 +#include <asm/pgtable.h>
9916
9917 #include <linux/smp.h>
9918
9919 @@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
9920
9921 desc->type = (info->read_exec_only ^ 1) << 1;
9922 desc->type |= info->contents << 2;
9923 + desc->type |= info->seg_not_present ^ 1;
9924
9925 desc->s = 1;
9926 desc->dpl = 0x3;
9927 @@ -34,19 +36,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
9928 }
9929
9930 extern struct desc_ptr idt_descr;
9931 -extern gate_desc idt_table[];
9932 extern struct desc_ptr nmi_idt_descr;
9933 -extern gate_desc nmi_idt_table[];
9934 -
9935 -struct gdt_page {
9936 - struct desc_struct gdt[GDT_ENTRIES];
9937 -} __attribute__((aligned(PAGE_SIZE)));
9938 -
9939 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
9940 +extern gate_desc idt_table[256];
9941 +extern gate_desc nmi_idt_table[256];
9942
9943 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
9944 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
9945 {
9946 - return per_cpu(gdt_page, cpu).gdt;
9947 + return cpu_gdt_table[cpu];
9948 }
9949
9950 #ifdef CONFIG_X86_64
9951 @@ -71,8 +68,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
9952 unsigned long base, unsigned dpl, unsigned flags,
9953 unsigned short seg)
9954 {
9955 - gate->a = (seg << 16) | (base & 0xffff);
9956 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
9957 + gate->gate.offset_low = base;
9958 + gate->gate.seg = seg;
9959 + gate->gate.reserved = 0;
9960 + gate->gate.type = type;
9961 + gate->gate.s = 0;
9962 + gate->gate.dpl = dpl;
9963 + gate->gate.p = 1;
9964 + gate->gate.offset_high = base >> 16;
9965 }
9966
9967 #endif
9968 @@ -117,12 +120,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
9969
9970 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
9971 {
9972 + pax_open_kernel();
9973 memcpy(&idt[entry], gate, sizeof(*gate));
9974 + pax_close_kernel();
9975 }
9976
9977 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
9978 {
9979 + pax_open_kernel();
9980 memcpy(&ldt[entry], desc, 8);
9981 + pax_close_kernel();
9982 }
9983
9984 static inline void
9985 @@ -136,7 +143,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
9986 default: size = sizeof(*gdt); break;
9987 }
9988
9989 + pax_open_kernel();
9990 memcpy(&gdt[entry], desc, size);
9991 + pax_close_kernel();
9992 }
9993
9994 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
9995 @@ -209,7 +218,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
9996
9997 static inline void native_load_tr_desc(void)
9998 {
9999 + pax_open_kernel();
10000 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
10001 + pax_close_kernel();
10002 }
10003
10004 static inline void native_load_gdt(const struct desc_ptr *dtr)
10005 @@ -246,8 +257,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
10006 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
10007 unsigned int i;
10008
10009 + pax_open_kernel();
10010 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
10011 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
10012 + pax_close_kernel();
10013 }
10014
10015 #define _LDT_empty(info) \
10016 @@ -310,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
10017 }
10018
10019 #ifdef CONFIG_X86_64
10020 -static inline void set_nmi_gate(int gate, void *addr)
10021 +static inline void set_nmi_gate(int gate, const void *addr)
10022 {
10023 gate_desc s;
10024
10025 @@ -319,7 +332,7 @@ static inline void set_nmi_gate(int gate, void *addr)
10026 }
10027 #endif
10028
10029 -static inline void _set_gate(int gate, unsigned type, void *addr,
10030 +static inline void _set_gate(int gate, unsigned type, const void *addr,
10031 unsigned dpl, unsigned ist, unsigned seg)
10032 {
10033 gate_desc s;
10034 @@ -338,7 +351,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
10035 * Pentium F0 0F bugfix can have resulted in the mapped
10036 * IDT being write-protected.
10037 */
10038 -static inline void set_intr_gate(unsigned int n, void *addr)
10039 +static inline void set_intr_gate(unsigned int n, const void *addr)
10040 {
10041 BUG_ON((unsigned)n > 0xFF);
10042 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
10043 @@ -368,19 +381,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
10044 /*
10045 * This routine sets up an interrupt gate at directory privilege level 3.
10046 */
10047 -static inline void set_system_intr_gate(unsigned int n, void *addr)
10048 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
10049 {
10050 BUG_ON((unsigned)n > 0xFF);
10051 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
10052 }
10053
10054 -static inline void set_system_trap_gate(unsigned int n, void *addr)
10055 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
10056 {
10057 BUG_ON((unsigned)n > 0xFF);
10058 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
10059 }
10060
10061 -static inline void set_trap_gate(unsigned int n, void *addr)
10062 +static inline void set_trap_gate(unsigned int n, const void *addr)
10063 {
10064 BUG_ON((unsigned)n > 0xFF);
10065 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
10066 @@ -389,19 +402,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
10067 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
10068 {
10069 BUG_ON((unsigned)n > 0xFF);
10070 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
10071 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
10072 }
10073
10074 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
10075 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
10076 {
10077 BUG_ON((unsigned)n > 0xFF);
10078 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
10079 }
10080
10081 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
10082 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
10083 {
10084 BUG_ON((unsigned)n > 0xFF);
10085 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
10086 }
10087
10088 +#ifdef CONFIG_X86_32
10089 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
10090 +{
10091 + struct desc_struct d;
10092 +
10093 + if (likely(limit))
10094 + limit = (limit - 1UL) >> PAGE_SHIFT;
10095 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
10096 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
10097 +}
10098 +#endif
10099 +
10100 #endif /* _ASM_X86_DESC_H */
10101 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
10102 index 278441f..b95a174 100644
10103 --- a/arch/x86/include/asm/desc_defs.h
10104 +++ b/arch/x86/include/asm/desc_defs.h
10105 @@ -31,6 +31,12 @@ struct desc_struct {
10106 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
10107 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
10108 };
10109 + struct {
10110 + u16 offset_low;
10111 + u16 seg;
10112 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
10113 + unsigned offset_high: 16;
10114 + } gate;
10115 };
10116 } __attribute__((packed));
10117
10118 diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
10119 index 3778256..c5d4fce 100644
10120 --- a/arch/x86/include/asm/e820.h
10121 +++ b/arch/x86/include/asm/e820.h
10122 @@ -69,7 +69,7 @@ struct e820map {
10123 #define ISA_START_ADDRESS 0xa0000
10124 #define ISA_END_ADDRESS 0x100000
10125
10126 -#define BIOS_BEGIN 0x000a0000
10127 +#define BIOS_BEGIN 0x000c0000
10128 #define BIOS_END 0x00100000
10129
10130 #define BIOS_ROM_BASE 0xffe00000
10131 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
10132 index 5f962df..7289f09 100644
10133 --- a/arch/x86/include/asm/elf.h
10134 +++ b/arch/x86/include/asm/elf.h
10135 @@ -238,7 +238,25 @@ extern int force_personality32;
10136 the loader. We need to make sure that it is out of the way of the program
10137 that it will "exec", and that there is sufficient room for the brk. */
10138
10139 +#ifdef CONFIG_PAX_SEGMEXEC
10140 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
10141 +#else
10142 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
10143 +#endif
10144 +
10145 +#ifdef CONFIG_PAX_ASLR
10146 +#ifdef CONFIG_X86_32
10147 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
10148 +
10149 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10150 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10151 +#else
10152 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
10153 +
10154 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10155 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10156 +#endif
10157 +#endif
10158
10159 /* This yields a mask that user programs can use to figure out what
10160 instruction set this CPU supports. This could be done in user space,
10161 @@ -291,9 +309,7 @@ do { \
10162
10163 #define ARCH_DLINFO \
10164 do { \
10165 - if (vdso_enabled) \
10166 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10167 - (unsigned long)current->mm->context.vdso); \
10168 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10169 } while (0)
10170
10171 #define AT_SYSINFO 32
10172 @@ -304,7 +320,7 @@ do { \
10173
10174 #endif /* !CONFIG_X86_32 */
10175
10176 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
10177 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
10178
10179 #define VDSO_ENTRY \
10180 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
10181 @@ -318,9 +334,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
10182 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
10183 #define compat_arch_setup_additional_pages syscall32_setup_pages
10184
10185 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
10186 -#define arch_randomize_brk arch_randomize_brk
10187 -
10188 /*
10189 * True on X86_32 or when emulating IA32 on X86_64
10190 */
10191 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
10192 index cc70c1c..d96d011 100644
10193 --- a/arch/x86/include/asm/emergency-restart.h
10194 +++ b/arch/x86/include/asm/emergency-restart.h
10195 @@ -15,6 +15,6 @@ enum reboot_type {
10196
10197 extern enum reboot_type reboot_type;
10198
10199 -extern void machine_emergency_restart(void);
10200 +extern void machine_emergency_restart(void) __noreturn;
10201
10202 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
10203 diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
10204 index dbe82a5..c6d8a00 100644
10205 --- a/arch/x86/include/asm/floppy.h
10206 +++ b/arch/x86/include/asm/floppy.h
10207 @@ -157,6 +157,7 @@ static unsigned long dma_mem_alloc(unsigned long size)
10208 }
10209
10210
10211 +static unsigned long vdma_mem_alloc(unsigned long size) __size_overflow(1);
10212 static unsigned long vdma_mem_alloc(unsigned long size)
10213 {
10214 return (unsigned long)vmalloc(size);
10215 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10216 index d09bb03..4ea4194 100644
10217 --- a/arch/x86/include/asm/futex.h
10218 +++ b/arch/x86/include/asm/futex.h
10219 @@ -12,16 +12,18 @@
10220 #include <asm/system.h>
10221
10222 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10223 + typecheck(u32 __user *, uaddr); \
10224 asm volatile("1:\t" insn "\n" \
10225 "2:\t.section .fixup,\"ax\"\n" \
10226 "3:\tmov\t%3, %1\n" \
10227 "\tjmp\t2b\n" \
10228 "\t.previous\n" \
10229 _ASM_EXTABLE(1b, 3b) \
10230 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
10231 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
10232 : "i" (-EFAULT), "0" (oparg), "1" (0))
10233
10234 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
10235 + typecheck(u32 __user *, uaddr); \
10236 asm volatile("1:\tmovl %2, %0\n" \
10237 "\tmovl\t%0, %3\n" \
10238 "\t" insn "\n" \
10239 @@ -34,7 +36,7 @@
10240 _ASM_EXTABLE(1b, 4b) \
10241 _ASM_EXTABLE(2b, 4b) \
10242 : "=&a" (oldval), "=&r" (ret), \
10243 - "+m" (*uaddr), "=&r" (tem) \
10244 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
10245 : "r" (oparg), "i" (-EFAULT), "1" (0))
10246
10247 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10248 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10249
10250 switch (op) {
10251 case FUTEX_OP_SET:
10252 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
10253 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
10254 break;
10255 case FUTEX_OP_ADD:
10256 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
10257 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
10258 uaddr, oparg);
10259 break;
10260 case FUTEX_OP_OR:
10261 @@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
10262 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
10263 return -EFAULT;
10264
10265 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
10266 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
10267 "2:\t.section .fixup, \"ax\"\n"
10268 "3:\tmov %3, %0\n"
10269 "\tjmp 2b\n"
10270 "\t.previous\n"
10271 _ASM_EXTABLE(1b, 3b)
10272 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
10273 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
10274 : "i" (-EFAULT), "r" (newval), "1" (oldval)
10275 : "memory"
10276 );
10277 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
10278 index eb92a6e..b98b2f4 100644
10279 --- a/arch/x86/include/asm/hw_irq.h
10280 +++ b/arch/x86/include/asm/hw_irq.h
10281 @@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
10282 extern void enable_IO_APIC(void);
10283
10284 /* Statistics */
10285 -extern atomic_t irq_err_count;
10286 -extern atomic_t irq_mis_count;
10287 +extern atomic_unchecked_t irq_err_count;
10288 +extern atomic_unchecked_t irq_mis_count;
10289
10290 /* EISA */
10291 extern void eisa_set_level_irq(unsigned int irq);
10292 diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
10293 index 2479049..3fb9795 100644
10294 --- a/arch/x86/include/asm/i387.h
10295 +++ b/arch/x86/include/asm/i387.h
10296 @@ -93,6 +93,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10297 {
10298 int err;
10299
10300 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10301 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10302 + fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
10303 +#endif
10304 +
10305 /* See comment in fxsave() below. */
10306 #ifdef CONFIG_AS_FXSAVEQ
10307 asm volatile("1: fxrstorq %[fx]\n\t"
10308 @@ -122,6 +127,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
10309 {
10310 int err;
10311
10312 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10313 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10314 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10315 +#endif
10316 +
10317 /*
10318 * Clear the bytes not touched by the fxsave and reserved
10319 * for the SW usage.
10320 @@ -278,7 +288,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
10321 "emms\n\t" /* clear stack tags */
10322 "fildl %P[addr]", /* set F?P to defined value */
10323 X86_FEATURE_FXSAVE_LEAK,
10324 - [addr] "m" (tsk->thread.fpu.has_fpu));
10325 + [addr] "m" (init_tss[smp_processor_id()].x86_tss.sp0));
10326
10327 return fpu_restore_checking(&tsk->thread.fpu);
10328 }
10329 @@ -445,7 +455,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
10330 static inline bool interrupted_user_mode(void)
10331 {
10332 struct pt_regs *regs = get_irq_regs();
10333 - return regs && user_mode_vm(regs);
10334 + return regs && user_mode(regs);
10335 }
10336
10337 /*
10338 diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
10339 index d8e8eef..99f81ae 100644
10340 --- a/arch/x86/include/asm/io.h
10341 +++ b/arch/x86/include/asm/io.h
10342 @@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
10343
10344 #include <linux/vmalloc.h>
10345
10346 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10347 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10348 +{
10349 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10350 +}
10351 +
10352 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10353 +{
10354 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10355 +}
10356 +
10357 /*
10358 * Convert a virtual cached pointer to an uncached pointer
10359 */
10360 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10361 index bba3cf8..06bc8da 100644
10362 --- a/arch/x86/include/asm/irqflags.h
10363 +++ b/arch/x86/include/asm/irqflags.h
10364 @@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
10365 sti; \
10366 sysexit
10367
10368 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
10369 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10370 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
10371 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
10372 +
10373 #else
10374 #define INTERRUPT_RETURN iret
10375 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
10376 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10377 index 5478825..839e88c 100644
10378 --- a/arch/x86/include/asm/kprobes.h
10379 +++ b/arch/x86/include/asm/kprobes.h
10380 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
10381 #define RELATIVEJUMP_SIZE 5
10382 #define RELATIVECALL_OPCODE 0xe8
10383 #define RELATIVE_ADDR_SIZE 4
10384 -#define MAX_STACK_SIZE 64
10385 -#define MIN_STACK_SIZE(ADDR) \
10386 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10387 - THREAD_SIZE - (unsigned long)(ADDR))) \
10388 - ? (MAX_STACK_SIZE) \
10389 - : (((unsigned long)current_thread_info()) + \
10390 - THREAD_SIZE - (unsigned long)(ADDR)))
10391 +#define MAX_STACK_SIZE 64UL
10392 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10393
10394 #define flush_insn_slot(p) do { } while (0)
10395
10396 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
10397 index 52d6640..136b3bd 100644
10398 --- a/arch/x86/include/asm/kvm_host.h
10399 +++ b/arch/x86/include/asm/kvm_host.h
10400 @@ -663,7 +663,7 @@ struct kvm_x86_ops {
10401 int (*check_intercept)(struct kvm_vcpu *vcpu,
10402 struct x86_instruction_info *info,
10403 enum x86_intercept_stage stage);
10404 -};
10405 +} __do_const;
10406
10407 struct kvm_arch_async_pf {
10408 u32 token;
10409 @@ -694,7 +694,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
10410 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
10411
10412 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
10413 - const void *val, int bytes);
10414 + const void *val, int bytes) __size_overflow(2);
10415 u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
10416
10417 extern bool tdp_enabled;
10418 @@ -781,7 +781,7 @@ int fx_init(struct kvm_vcpu *vcpu);
10419
10420 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
10421 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
10422 - const u8 *new, int bytes);
10423 + const u8 *new, int bytes) __size_overflow(2);
10424 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
10425 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
10426 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
10427 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10428 index 9cdae5d..300d20f 100644
10429 --- a/arch/x86/include/asm/local.h
10430 +++ b/arch/x86/include/asm/local.h
10431 @@ -18,26 +18,58 @@ typedef struct {
10432
10433 static inline void local_inc(local_t *l)
10434 {
10435 - asm volatile(_ASM_INC "%0"
10436 + asm volatile(_ASM_INC "%0\n"
10437 +
10438 +#ifdef CONFIG_PAX_REFCOUNT
10439 + "jno 0f\n"
10440 + _ASM_DEC "%0\n"
10441 + "int $4\n0:\n"
10442 + _ASM_EXTABLE(0b, 0b)
10443 +#endif
10444 +
10445 : "+m" (l->a.counter));
10446 }
10447
10448 static inline void local_dec(local_t *l)
10449 {
10450 - asm volatile(_ASM_DEC "%0"
10451 + asm volatile(_ASM_DEC "%0\n"
10452 +
10453 +#ifdef CONFIG_PAX_REFCOUNT
10454 + "jno 0f\n"
10455 + _ASM_INC "%0\n"
10456 + "int $4\n0:\n"
10457 + _ASM_EXTABLE(0b, 0b)
10458 +#endif
10459 +
10460 : "+m" (l->a.counter));
10461 }
10462
10463 static inline void local_add(long i, local_t *l)
10464 {
10465 - asm volatile(_ASM_ADD "%1,%0"
10466 + asm volatile(_ASM_ADD "%1,%0\n"
10467 +
10468 +#ifdef CONFIG_PAX_REFCOUNT
10469 + "jno 0f\n"
10470 + _ASM_SUB "%1,%0\n"
10471 + "int $4\n0:\n"
10472 + _ASM_EXTABLE(0b, 0b)
10473 +#endif
10474 +
10475 : "+m" (l->a.counter)
10476 : "ir" (i));
10477 }
10478
10479 static inline void local_sub(long i, local_t *l)
10480 {
10481 - asm volatile(_ASM_SUB "%1,%0"
10482 + asm volatile(_ASM_SUB "%1,%0\n"
10483 +
10484 +#ifdef CONFIG_PAX_REFCOUNT
10485 + "jno 0f\n"
10486 + _ASM_ADD "%1,%0\n"
10487 + "int $4\n0:\n"
10488 + _ASM_EXTABLE(0b, 0b)
10489 +#endif
10490 +
10491 : "+m" (l->a.counter)
10492 : "ir" (i));
10493 }
10494 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
10495 {
10496 unsigned char c;
10497
10498 - asm volatile(_ASM_SUB "%2,%0; sete %1"
10499 + asm volatile(_ASM_SUB "%2,%0\n"
10500 +
10501 +#ifdef CONFIG_PAX_REFCOUNT
10502 + "jno 0f\n"
10503 + _ASM_ADD "%2,%0\n"
10504 + "int $4\n0:\n"
10505 + _ASM_EXTABLE(0b, 0b)
10506 +#endif
10507 +
10508 + "sete %1\n"
10509 : "+m" (l->a.counter), "=qm" (c)
10510 : "ir" (i) : "memory");
10511 return c;
10512 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
10513 {
10514 unsigned char c;
10515
10516 - asm volatile(_ASM_DEC "%0; sete %1"
10517 + asm volatile(_ASM_DEC "%0\n"
10518 +
10519 +#ifdef CONFIG_PAX_REFCOUNT
10520 + "jno 0f\n"
10521 + _ASM_INC "%0\n"
10522 + "int $4\n0:\n"
10523 + _ASM_EXTABLE(0b, 0b)
10524 +#endif
10525 +
10526 + "sete %1\n"
10527 : "+m" (l->a.counter), "=qm" (c)
10528 : : "memory");
10529 return c != 0;
10530 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
10531 {
10532 unsigned char c;
10533
10534 - asm volatile(_ASM_INC "%0; sete %1"
10535 + asm volatile(_ASM_INC "%0\n"
10536 +
10537 +#ifdef CONFIG_PAX_REFCOUNT
10538 + "jno 0f\n"
10539 + _ASM_DEC "%0\n"
10540 + "int $4\n0:\n"
10541 + _ASM_EXTABLE(0b, 0b)
10542 +#endif
10543 +
10544 + "sete %1\n"
10545 : "+m" (l->a.counter), "=qm" (c)
10546 : : "memory");
10547 return c != 0;
10548 @@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
10549 {
10550 unsigned char c;
10551
10552 - asm volatile(_ASM_ADD "%2,%0; sets %1"
10553 + asm volatile(_ASM_ADD "%2,%0\n"
10554 +
10555 +#ifdef CONFIG_PAX_REFCOUNT
10556 + "jno 0f\n"
10557 + _ASM_SUB "%2,%0\n"
10558 + "int $4\n0:\n"
10559 + _ASM_EXTABLE(0b, 0b)
10560 +#endif
10561 +
10562 + "sets %1\n"
10563 : "+m" (l->a.counter), "=qm" (c)
10564 : "ir" (i) : "memory");
10565 return c;
10566 @@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
10567 #endif
10568 /* Modern 486+ processor */
10569 __i = i;
10570 - asm volatile(_ASM_XADD "%0, %1;"
10571 + asm volatile(_ASM_XADD "%0, %1\n"
10572 +
10573 +#ifdef CONFIG_PAX_REFCOUNT
10574 + "jno 0f\n"
10575 + _ASM_MOV "%0,%1\n"
10576 + "int $4\n0:\n"
10577 + _ASM_EXTABLE(0b, 0b)
10578 +#endif
10579 +
10580 : "+r" (i), "+m" (l->a.counter)
10581 : : "memory");
10582 return i + __i;
10583 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10584 index 593e51d..fa69c9a 100644
10585 --- a/arch/x86/include/asm/mman.h
10586 +++ b/arch/x86/include/asm/mman.h
10587 @@ -5,4 +5,14 @@
10588
10589 #include <asm-generic/mman.h>
10590
10591 +#ifdef __KERNEL__
10592 +#ifndef __ASSEMBLY__
10593 +#ifdef CONFIG_X86_32
10594 +#define arch_mmap_check i386_mmap_check
10595 +int i386_mmap_check(unsigned long addr, unsigned long len,
10596 + unsigned long flags);
10597 +#endif
10598 +#endif
10599 +#endif
10600 +
10601 #endif /* _ASM_X86_MMAN_H */
10602 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10603 index 5f55e69..e20bfb1 100644
10604 --- a/arch/x86/include/asm/mmu.h
10605 +++ b/arch/x86/include/asm/mmu.h
10606 @@ -9,7 +9,7 @@
10607 * we put the segment information here.
10608 */
10609 typedef struct {
10610 - void *ldt;
10611 + struct desc_struct *ldt;
10612 int size;
10613
10614 #ifdef CONFIG_X86_64
10615 @@ -18,7 +18,19 @@ typedef struct {
10616 #endif
10617
10618 struct mutex lock;
10619 - void *vdso;
10620 + unsigned long vdso;
10621 +
10622 +#ifdef CONFIG_X86_32
10623 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10624 + unsigned long user_cs_base;
10625 + unsigned long user_cs_limit;
10626 +
10627 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10628 + cpumask_t cpu_user_cs_mask;
10629 +#endif
10630 +
10631 +#endif
10632 +#endif
10633 } mm_context_t;
10634
10635 #ifdef CONFIG_SMP
10636 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
10637 index 6902152..399f3a2 100644
10638 --- a/arch/x86/include/asm/mmu_context.h
10639 +++ b/arch/x86/include/asm/mmu_context.h
10640 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
10641
10642 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10643 {
10644 +
10645 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10646 + unsigned int i;
10647 + pgd_t *pgd;
10648 +
10649 + pax_open_kernel();
10650 + pgd = get_cpu_pgd(smp_processor_id());
10651 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
10652 + set_pgd_batched(pgd+i, native_make_pgd(0));
10653 + pax_close_kernel();
10654 +#endif
10655 +
10656 #ifdef CONFIG_SMP
10657 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
10658 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
10659 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10660 struct task_struct *tsk)
10661 {
10662 unsigned cpu = smp_processor_id();
10663 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10664 + int tlbstate = TLBSTATE_OK;
10665 +#endif
10666
10667 if (likely(prev != next)) {
10668 #ifdef CONFIG_SMP
10669 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10670 + tlbstate = percpu_read(cpu_tlbstate.state);
10671 +#endif
10672 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10673 percpu_write(cpu_tlbstate.active_mm, next);
10674 #endif
10675 cpumask_set_cpu(cpu, mm_cpumask(next));
10676
10677 /* Re-load page tables */
10678 +#ifdef CONFIG_PAX_PER_CPU_PGD
10679 + pax_open_kernel();
10680 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10681 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10682 + pax_close_kernel();
10683 + load_cr3(get_cpu_pgd(cpu));
10684 +#else
10685 load_cr3(next->pgd);
10686 +#endif
10687
10688 /* stop flush ipis for the previous mm */
10689 cpumask_clear_cpu(cpu, mm_cpumask(prev));
10690 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10691 */
10692 if (unlikely(prev->context.ldt != next->context.ldt))
10693 load_LDT_nolock(&next->context);
10694 - }
10695 +
10696 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10697 + if (!(__supported_pte_mask & _PAGE_NX)) {
10698 + smp_mb__before_clear_bit();
10699 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
10700 + smp_mb__after_clear_bit();
10701 + cpu_set(cpu, next->context.cpu_user_cs_mask);
10702 + }
10703 +#endif
10704 +
10705 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10706 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
10707 + prev->context.user_cs_limit != next->context.user_cs_limit))
10708 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10709 #ifdef CONFIG_SMP
10710 + else if (unlikely(tlbstate != TLBSTATE_OK))
10711 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10712 +#endif
10713 +#endif
10714 +
10715 + }
10716 else {
10717 +
10718 +#ifdef CONFIG_PAX_PER_CPU_PGD
10719 + pax_open_kernel();
10720 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10721 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10722 + pax_close_kernel();
10723 + load_cr3(get_cpu_pgd(cpu));
10724 +#endif
10725 +
10726 +#ifdef CONFIG_SMP
10727 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10728 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
10729
10730 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10731 * tlb flush IPI delivery. We must reload CR3
10732 * to make sure to use no freed page tables.
10733 */
10734 +
10735 +#ifndef CONFIG_PAX_PER_CPU_PGD
10736 load_cr3(next->pgd);
10737 +#endif
10738 +
10739 load_LDT_nolock(&next->context);
10740 +
10741 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
10742 + if (!(__supported_pte_mask & _PAGE_NX))
10743 + cpu_set(cpu, next->context.cpu_user_cs_mask);
10744 +#endif
10745 +
10746 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10747 +#ifdef CONFIG_PAX_PAGEEXEC
10748 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
10749 +#endif
10750 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10751 +#endif
10752 +
10753 }
10754 +#endif
10755 }
10756 -#endif
10757 }
10758
10759 #define activate_mm(prev, next) \
10760 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
10761 index 9eae775..c914fea 100644
10762 --- a/arch/x86/include/asm/module.h
10763 +++ b/arch/x86/include/asm/module.h
10764 @@ -5,6 +5,7 @@
10765
10766 #ifdef CONFIG_X86_64
10767 /* X86_64 does not define MODULE_PROC_FAMILY */
10768 +#define MODULE_PROC_FAMILY ""
10769 #elif defined CONFIG_M386
10770 #define MODULE_PROC_FAMILY "386 "
10771 #elif defined CONFIG_M486
10772 @@ -59,8 +60,20 @@
10773 #error unknown processor family
10774 #endif
10775
10776 -#ifdef CONFIG_X86_32
10777 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
10778 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
10779 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
10780 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
10781 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
10782 +#else
10783 +#define MODULE_PAX_KERNEXEC ""
10784 #endif
10785
10786 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10787 +#define MODULE_PAX_UDEREF "UDEREF "
10788 +#else
10789 +#define MODULE_PAX_UDEREF ""
10790 +#endif
10791 +
10792 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
10793 +
10794 #endif /* _ASM_X86_MODULE_H */
10795 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
10796 index 7639dbf..e08a58c 100644
10797 --- a/arch/x86/include/asm/page_64_types.h
10798 +++ b/arch/x86/include/asm/page_64_types.h
10799 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
10800
10801 /* duplicated to the one in bootmem.h */
10802 extern unsigned long max_pfn;
10803 -extern unsigned long phys_base;
10804 +extern const unsigned long phys_base;
10805
10806 extern unsigned long __phys_addr(unsigned long);
10807 #define __phys_reloc_hide(x) (x)
10808 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
10809 index a7d2db9..edb023e 100644
10810 --- a/arch/x86/include/asm/paravirt.h
10811 +++ b/arch/x86/include/asm/paravirt.h
10812 @@ -667,6 +667,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
10813 val);
10814 }
10815
10816 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
10817 +{
10818 + pgdval_t val = native_pgd_val(pgd);
10819 +
10820 + if (sizeof(pgdval_t) > sizeof(long))
10821 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
10822 + val, (u64)val >> 32);
10823 + else
10824 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
10825 + val);
10826 +}
10827 +
10828 static inline void pgd_clear(pgd_t *pgdp)
10829 {
10830 set_pgd(pgdp, __pgd(0));
10831 @@ -748,6 +760,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
10832 pv_mmu_ops.set_fixmap(idx, phys, flags);
10833 }
10834
10835 +#ifdef CONFIG_PAX_KERNEXEC
10836 +static inline unsigned long pax_open_kernel(void)
10837 +{
10838 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
10839 +}
10840 +
10841 +static inline unsigned long pax_close_kernel(void)
10842 +{
10843 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
10844 +}
10845 +#else
10846 +static inline unsigned long pax_open_kernel(void) { return 0; }
10847 +static inline unsigned long pax_close_kernel(void) { return 0; }
10848 +#endif
10849 +
10850 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
10851
10852 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
10853 @@ -964,7 +991,7 @@ extern void default_banner(void);
10854
10855 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
10856 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
10857 -#define PARA_INDIRECT(addr) *%cs:addr
10858 +#define PARA_INDIRECT(addr) *%ss:addr
10859 #endif
10860
10861 #define INTERRUPT_RETURN \
10862 @@ -1041,6 +1068,21 @@ extern void default_banner(void);
10863 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
10864 CLBR_NONE, \
10865 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
10866 +
10867 +#define GET_CR0_INTO_RDI \
10868 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
10869 + mov %rax,%rdi
10870 +
10871 +#define SET_RDI_INTO_CR0 \
10872 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
10873 +
10874 +#define GET_CR3_INTO_RDI \
10875 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
10876 + mov %rax,%rdi
10877 +
10878 +#define SET_RDI_INTO_CR3 \
10879 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
10880 +
10881 #endif /* CONFIG_X86_32 */
10882
10883 #endif /* __ASSEMBLY__ */
10884 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
10885 index 8e8b9a4..f07d725 100644
10886 --- a/arch/x86/include/asm/paravirt_types.h
10887 +++ b/arch/x86/include/asm/paravirt_types.h
10888 @@ -84,20 +84,20 @@ struct pv_init_ops {
10889 */
10890 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
10891 unsigned long addr, unsigned len);
10892 -};
10893 +} __no_const;
10894
10895
10896 struct pv_lazy_ops {
10897 /* Set deferred update mode, used for batching operations. */
10898 void (*enter)(void);
10899 void (*leave)(void);
10900 -};
10901 +} __no_const;
10902
10903 struct pv_time_ops {
10904 unsigned long long (*sched_clock)(void);
10905 unsigned long long (*steal_clock)(int cpu);
10906 unsigned long (*get_tsc_khz)(void);
10907 -};
10908 +} __no_const;
10909
10910 struct pv_cpu_ops {
10911 /* hooks for various privileged instructions */
10912 @@ -193,7 +193,7 @@ struct pv_cpu_ops {
10913
10914 void (*start_context_switch)(struct task_struct *prev);
10915 void (*end_context_switch)(struct task_struct *next);
10916 -};
10917 +} __no_const;
10918
10919 struct pv_irq_ops {
10920 /*
10921 @@ -224,7 +224,7 @@ struct pv_apic_ops {
10922 unsigned long start_eip,
10923 unsigned long start_esp);
10924 #endif
10925 -};
10926 +} __no_const;
10927
10928 struct pv_mmu_ops {
10929 unsigned long (*read_cr2)(void);
10930 @@ -313,6 +313,7 @@ struct pv_mmu_ops {
10931 struct paravirt_callee_save make_pud;
10932
10933 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
10934 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
10935 #endif /* PAGETABLE_LEVELS == 4 */
10936 #endif /* PAGETABLE_LEVELS >= 3 */
10937
10938 @@ -324,6 +325,12 @@ struct pv_mmu_ops {
10939 an mfn. We can tell which is which from the index. */
10940 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
10941 phys_addr_t phys, pgprot_t flags);
10942 +
10943 +#ifdef CONFIG_PAX_KERNEXEC
10944 + unsigned long (*pax_open_kernel)(void);
10945 + unsigned long (*pax_close_kernel)(void);
10946 +#endif
10947 +
10948 };
10949
10950 struct arch_spinlock;
10951 @@ -334,7 +341,7 @@ struct pv_lock_ops {
10952 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
10953 int (*spin_trylock)(struct arch_spinlock *lock);
10954 void (*spin_unlock)(struct arch_spinlock *lock);
10955 -};
10956 +} __no_const;
10957
10958 /* This contains all the paravirt structures: we get a convenient
10959 * number for each function using the offset which we use to indicate
10960 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
10961 index b4389a4..7024269 100644
10962 --- a/arch/x86/include/asm/pgalloc.h
10963 +++ b/arch/x86/include/asm/pgalloc.h
10964 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
10965 pmd_t *pmd, pte_t *pte)
10966 {
10967 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
10968 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
10969 +}
10970 +
10971 +static inline void pmd_populate_user(struct mm_struct *mm,
10972 + pmd_t *pmd, pte_t *pte)
10973 +{
10974 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
10975 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
10976 }
10977
10978 @@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
10979
10980 #ifdef CONFIG_X86_PAE
10981 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
10982 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
10983 +{
10984 + pud_populate(mm, pudp, pmd);
10985 +}
10986 #else /* !CONFIG_X86_PAE */
10987 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
10988 {
10989 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
10990 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
10991 }
10992 +
10993 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
10994 +{
10995 + paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
10996 + set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
10997 +}
10998 #endif /* CONFIG_X86_PAE */
10999
11000 #if PAGETABLE_LEVELS > 3
11001 @@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
11002 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
11003 }
11004
11005 +static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
11006 +{
11007 + paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
11008 + set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
11009 +}
11010 +
11011 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
11012 {
11013 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
11014 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
11015 index 98391db..8f6984e 100644
11016 --- a/arch/x86/include/asm/pgtable-2level.h
11017 +++ b/arch/x86/include/asm/pgtable-2level.h
11018 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
11019
11020 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11021 {
11022 + pax_open_kernel();
11023 *pmdp = pmd;
11024 + pax_close_kernel();
11025 }
11026
11027 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11028 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
11029 index effff47..f9e4035 100644
11030 --- a/arch/x86/include/asm/pgtable-3level.h
11031 +++ b/arch/x86/include/asm/pgtable-3level.h
11032 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11033
11034 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11035 {
11036 + pax_open_kernel();
11037 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
11038 + pax_close_kernel();
11039 }
11040
11041 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11042 {
11043 + pax_open_kernel();
11044 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
11045 + pax_close_kernel();
11046 }
11047
11048 /*
11049 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
11050 index 49afb3f..ed14d07 100644
11051 --- a/arch/x86/include/asm/pgtable.h
11052 +++ b/arch/x86/include/asm/pgtable.h
11053 @@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
11054
11055 #ifndef __PAGETABLE_PUD_FOLDED
11056 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
11057 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
11058 #define pgd_clear(pgd) native_pgd_clear(pgd)
11059 #endif
11060
11061 @@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
11062
11063 #define arch_end_context_switch(prev) do {} while(0)
11064
11065 +#define pax_open_kernel() native_pax_open_kernel()
11066 +#define pax_close_kernel() native_pax_close_kernel()
11067 #endif /* CONFIG_PARAVIRT */
11068
11069 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
11070 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
11071 +
11072 +#ifdef CONFIG_PAX_KERNEXEC
11073 +static inline unsigned long native_pax_open_kernel(void)
11074 +{
11075 + unsigned long cr0;
11076 +
11077 + preempt_disable();
11078 + barrier();
11079 + cr0 = read_cr0() ^ X86_CR0_WP;
11080 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
11081 + write_cr0(cr0);
11082 + return cr0 ^ X86_CR0_WP;
11083 +}
11084 +
11085 +static inline unsigned long native_pax_close_kernel(void)
11086 +{
11087 + unsigned long cr0;
11088 +
11089 + cr0 = read_cr0() ^ X86_CR0_WP;
11090 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
11091 + write_cr0(cr0);
11092 + barrier();
11093 + preempt_enable_no_resched();
11094 + return cr0 ^ X86_CR0_WP;
11095 +}
11096 +#else
11097 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
11098 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
11099 +#endif
11100 +
11101 /*
11102 * The following only work if pte_present() is true.
11103 * Undefined behaviour if not..
11104 */
11105 +static inline int pte_user(pte_t pte)
11106 +{
11107 + return pte_val(pte) & _PAGE_USER;
11108 +}
11109 +
11110 static inline int pte_dirty(pte_t pte)
11111 {
11112 return pte_flags(pte) & _PAGE_DIRTY;
11113 @@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
11114 return pte_clear_flags(pte, _PAGE_RW);
11115 }
11116
11117 +static inline pte_t pte_mkread(pte_t pte)
11118 +{
11119 + return __pte(pte_val(pte) | _PAGE_USER);
11120 +}
11121 +
11122 static inline pte_t pte_mkexec(pte_t pte)
11123 {
11124 - return pte_clear_flags(pte, _PAGE_NX);
11125 +#ifdef CONFIG_X86_PAE
11126 + if (__supported_pte_mask & _PAGE_NX)
11127 + return pte_clear_flags(pte, _PAGE_NX);
11128 + else
11129 +#endif
11130 + return pte_set_flags(pte, _PAGE_USER);
11131 +}
11132 +
11133 +static inline pte_t pte_exprotect(pte_t pte)
11134 +{
11135 +#ifdef CONFIG_X86_PAE
11136 + if (__supported_pte_mask & _PAGE_NX)
11137 + return pte_set_flags(pte, _PAGE_NX);
11138 + else
11139 +#endif
11140 + return pte_clear_flags(pte, _PAGE_USER);
11141 }
11142
11143 static inline pte_t pte_mkdirty(pte_t pte)
11144 @@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
11145 #endif
11146
11147 #ifndef __ASSEMBLY__
11148 +
11149 +#ifdef CONFIG_PAX_PER_CPU_PGD
11150 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
11151 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
11152 +{
11153 + return cpu_pgd[cpu];
11154 +}
11155 +#endif
11156 +
11157 #include <linux/mm_types.h>
11158
11159 static inline int pte_none(pte_t pte)
11160 @@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
11161
11162 static inline int pgd_bad(pgd_t pgd)
11163 {
11164 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
11165 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
11166 }
11167
11168 static inline int pgd_none(pgd_t pgd)
11169 @@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
11170 * pgd_offset() returns a (pgd_t *)
11171 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
11172 */
11173 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
11174 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
11175 +
11176 +#ifdef CONFIG_PAX_PER_CPU_PGD
11177 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
11178 +#endif
11179 +
11180 /*
11181 * a shortcut which implies the use of the kernel's pgd, instead
11182 * of a process's
11183 @@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
11184 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
11185 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
11186
11187 +#ifdef CONFIG_X86_32
11188 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
11189 +#else
11190 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
11191 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
11192 +
11193 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11194 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
11195 +#else
11196 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
11197 +#endif
11198 +
11199 +#endif
11200 +
11201 #ifndef __ASSEMBLY__
11202
11203 extern int direct_gbpages;
11204 @@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
11205 * dst and src can be on the same page, but the range must not overlap,
11206 * and must not cross a page boundary.
11207 */
11208 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
11209 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
11210 {
11211 - memcpy(dst, src, count * sizeof(pgd_t));
11212 + pax_open_kernel();
11213 + while (count--)
11214 + *dst++ = *src++;
11215 + pax_close_kernel();
11216 }
11217
11218 +#ifdef CONFIG_PAX_PER_CPU_PGD
11219 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11220 +#endif
11221 +
11222 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11223 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11224 +#else
11225 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
11226 +#endif
11227
11228 #include <asm-generic/pgtable.h>
11229 #endif /* __ASSEMBLY__ */
11230 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
11231 index 0c92113..34a77c6 100644
11232 --- a/arch/x86/include/asm/pgtable_32.h
11233 +++ b/arch/x86/include/asm/pgtable_32.h
11234 @@ -25,9 +25,6 @@
11235 struct mm_struct;
11236 struct vm_area_struct;
11237
11238 -extern pgd_t swapper_pg_dir[1024];
11239 -extern pgd_t initial_page_table[1024];
11240 -
11241 static inline void pgtable_cache_init(void) { }
11242 static inline void check_pgt_cache(void) { }
11243 void paging_init(void);
11244 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11245 # include <asm/pgtable-2level.h>
11246 #endif
11247
11248 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
11249 +extern pgd_t initial_page_table[PTRS_PER_PGD];
11250 +#ifdef CONFIG_X86_PAE
11251 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
11252 +#endif
11253 +
11254 #if defined(CONFIG_HIGHPTE)
11255 #define pte_offset_map(dir, address) \
11256 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
11257 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11258 /* Clear a kernel PTE and flush it from the TLB */
11259 #define kpte_clear_flush(ptep, vaddr) \
11260 do { \
11261 + pax_open_kernel(); \
11262 pte_clear(&init_mm, (vaddr), (ptep)); \
11263 + pax_close_kernel(); \
11264 __flush_tlb_one((vaddr)); \
11265 } while (0)
11266
11267 @@ -74,6 +79,9 @@ do { \
11268
11269 #endif /* !__ASSEMBLY__ */
11270
11271 +#define HAVE_ARCH_UNMAPPED_AREA
11272 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11273 +
11274 /*
11275 * kern_addr_valid() is (1) for FLATMEM and (0) for
11276 * SPARSEMEM and DISCONTIGMEM
11277 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11278 index ed5903b..c7fe163 100644
11279 --- a/arch/x86/include/asm/pgtable_32_types.h
11280 +++ b/arch/x86/include/asm/pgtable_32_types.h
11281 @@ -8,7 +8,7 @@
11282 */
11283 #ifdef CONFIG_X86_PAE
11284 # include <asm/pgtable-3level_types.h>
11285 -# define PMD_SIZE (1UL << PMD_SHIFT)
11286 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11287 # define PMD_MASK (~(PMD_SIZE - 1))
11288 #else
11289 # include <asm/pgtable-2level_types.h>
11290 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11291 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11292 #endif
11293
11294 +#ifdef CONFIG_PAX_KERNEXEC
11295 +#ifndef __ASSEMBLY__
11296 +extern unsigned char MODULES_EXEC_VADDR[];
11297 +extern unsigned char MODULES_EXEC_END[];
11298 +#endif
11299 +#include <asm/boot.h>
11300 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11301 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11302 +#else
11303 +#define ktla_ktva(addr) (addr)
11304 +#define ktva_ktla(addr) (addr)
11305 +#endif
11306 +
11307 #define MODULES_VADDR VMALLOC_START
11308 #define MODULES_END VMALLOC_END
11309 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11310 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
11311 index 975f709..9f779c9 100644
11312 --- a/arch/x86/include/asm/pgtable_64.h
11313 +++ b/arch/x86/include/asm/pgtable_64.h
11314 @@ -16,10 +16,14 @@
11315
11316 extern pud_t level3_kernel_pgt[512];
11317 extern pud_t level3_ident_pgt[512];
11318 +extern pud_t level3_vmalloc_start_pgt[512];
11319 +extern pud_t level3_vmalloc_end_pgt[512];
11320 +extern pud_t level3_vmemmap_pgt[512];
11321 +extern pud_t level2_vmemmap_pgt[512];
11322 extern pmd_t level2_kernel_pgt[512];
11323 extern pmd_t level2_fixmap_pgt[512];
11324 -extern pmd_t level2_ident_pgt[512];
11325 -extern pgd_t init_level4_pgt[];
11326 +extern pmd_t level2_ident_pgt[512*2];
11327 +extern pgd_t init_level4_pgt[512];
11328
11329 #define swapper_pg_dir init_level4_pgt
11330
11331 @@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11332
11333 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11334 {
11335 + pax_open_kernel();
11336 *pmdp = pmd;
11337 + pax_close_kernel();
11338 }
11339
11340 static inline void native_pmd_clear(pmd_t *pmd)
11341 @@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
11342
11343 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11344 {
11345 + pax_open_kernel();
11346 *pudp = pud;
11347 + pax_close_kernel();
11348 }
11349
11350 static inline void native_pud_clear(pud_t *pud)
11351 @@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
11352
11353 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11354 {
11355 + pax_open_kernel();
11356 + *pgdp = pgd;
11357 + pax_close_kernel();
11358 +}
11359 +
11360 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11361 +{
11362 *pgdp = pgd;
11363 }
11364
11365 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11366 index 766ea16..5b96cb3 100644
11367 --- a/arch/x86/include/asm/pgtable_64_types.h
11368 +++ b/arch/x86/include/asm/pgtable_64_types.h
11369 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11370 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11371 #define MODULES_END _AC(0xffffffffff000000, UL)
11372 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11373 +#define MODULES_EXEC_VADDR MODULES_VADDR
11374 +#define MODULES_EXEC_END MODULES_END
11375 +
11376 +#define ktla_ktva(addr) (addr)
11377 +#define ktva_ktla(addr) (addr)
11378
11379 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11380 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11381 index 013286a..8b42f4f 100644
11382 --- a/arch/x86/include/asm/pgtable_types.h
11383 +++ b/arch/x86/include/asm/pgtable_types.h
11384 @@ -16,13 +16,12 @@
11385 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11386 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11387 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11388 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11389 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11390 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11391 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11392 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11393 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11394 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
11395 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
11396 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
11397 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
11398 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11399
11400 /* If _PAGE_BIT_PRESENT is clear, we use these: */
11401 @@ -40,7 +39,6 @@
11402 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11403 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11404 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11405 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11406 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11407 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11408 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
11409 @@ -57,8 +55,10 @@
11410
11411 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11412 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11413 -#else
11414 +#elif defined(CONFIG_KMEMCHECK)
11415 #define _PAGE_NX (_AT(pteval_t, 0))
11416 +#else
11417 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11418 #endif
11419
11420 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
11421 @@ -96,6 +96,9 @@
11422 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11423 _PAGE_ACCESSED)
11424
11425 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
11426 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
11427 +
11428 #define __PAGE_KERNEL_EXEC \
11429 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11430 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
11431 @@ -106,7 +109,7 @@
11432 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11433 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11434 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11435 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
11436 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
11437 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
11438 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
11439 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
11440 @@ -168,8 +171,8 @@
11441 * bits are combined, this will alow user to access the high address mapped
11442 * VDSO in the presence of CONFIG_COMPAT_VDSO
11443 */
11444 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11445 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11446 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11447 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11448 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11449 #endif
11450
11451 @@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
11452 {
11453 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11454 }
11455 +#endif
11456
11457 +#if PAGETABLE_LEVELS == 3
11458 +#include <asm-generic/pgtable-nopud.h>
11459 +#endif
11460 +
11461 +#if PAGETABLE_LEVELS == 2
11462 +#include <asm-generic/pgtable-nopmd.h>
11463 +#endif
11464 +
11465 +#ifndef __ASSEMBLY__
11466 #if PAGETABLE_LEVELS > 3
11467 typedef struct { pudval_t pud; } pud_t;
11468
11469 @@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
11470 return pud.pud;
11471 }
11472 #else
11473 -#include <asm-generic/pgtable-nopud.h>
11474 -
11475 static inline pudval_t native_pud_val(pud_t pud)
11476 {
11477 return native_pgd_val(pud.pgd);
11478 @@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
11479 return pmd.pmd;
11480 }
11481 #else
11482 -#include <asm-generic/pgtable-nopmd.h>
11483 -
11484 static inline pmdval_t native_pmd_val(pmd_t pmd)
11485 {
11486 return native_pgd_val(pmd.pud.pgd);
11487 @@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
11488
11489 extern pteval_t __supported_pte_mask;
11490 extern void set_nx(void);
11491 -extern int nx_enabled;
11492
11493 #define pgprot_writecombine pgprot_writecombine
11494 extern pgprot_t pgprot_writecombine(pgprot_t prot);
11495 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
11496 index 58545c9..fe6fc38e 100644
11497 --- a/arch/x86/include/asm/processor.h
11498 +++ b/arch/x86/include/asm/processor.h
11499 @@ -266,7 +266,7 @@ struct tss_struct {
11500
11501 } ____cacheline_aligned;
11502
11503 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11504 +extern struct tss_struct init_tss[NR_CPUS];
11505
11506 /*
11507 * Save the original ist values for checking stack pointers during debugging
11508 @@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(const void *x)
11509 */
11510 #define TASK_SIZE PAGE_OFFSET
11511 #define TASK_SIZE_MAX TASK_SIZE
11512 +
11513 +#ifdef CONFIG_PAX_SEGMEXEC
11514 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
11515 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11516 +#else
11517 #define STACK_TOP TASK_SIZE
11518 -#define STACK_TOP_MAX STACK_TOP
11519 +#endif
11520 +
11521 +#define STACK_TOP_MAX TASK_SIZE
11522
11523 #define INIT_THREAD { \
11524 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11525 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11526 .vm86_info = NULL, \
11527 .sysenter_cs = __KERNEL_CS, \
11528 .io_bitmap_ptr = NULL, \
11529 @@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(const void *x)
11530 */
11531 #define INIT_TSS { \
11532 .x86_tss = { \
11533 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11534 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11535 .ss0 = __KERNEL_DS, \
11536 .ss1 = __KERNEL_CS, \
11537 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
11538 @@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(const void *x)
11539 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11540
11541 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11542 -#define KSTK_TOP(info) \
11543 -({ \
11544 - unsigned long *__ptr = (unsigned long *)(info); \
11545 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11546 -})
11547 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
11548
11549 /*
11550 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
11551 @@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11552 #define task_pt_regs(task) \
11553 ({ \
11554 struct pt_regs *__regs__; \
11555 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11556 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11557 __regs__ - 1; \
11558 })
11559
11560 @@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11561 /*
11562 * User space process size. 47bits minus one guard page.
11563 */
11564 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11565 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11566
11567 /* This decides where the kernel will search for a free chunk of vm
11568 * space during mmap's.
11569 */
11570 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11571 - 0xc0000000 : 0xFFFFe000)
11572 + 0xc0000000 : 0xFFFFf000)
11573
11574 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
11575 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
11576 @@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11577 #define STACK_TOP_MAX TASK_SIZE_MAX
11578
11579 #define INIT_THREAD { \
11580 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11581 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11582 }
11583
11584 #define INIT_TSS { \
11585 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11586 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11587 }
11588
11589 /*
11590 @@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
11591 */
11592 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11593
11594 +#ifdef CONFIG_PAX_SEGMEXEC
11595 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
11596 +#endif
11597 +
11598 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
11599
11600 /* Get/set a process' ability to use the timestamp counter instruction */
11601 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
11602 index 3566454..4bdfb8c 100644
11603 --- a/arch/x86/include/asm/ptrace.h
11604 +++ b/arch/x86/include/asm/ptrace.h
11605 @@ -156,28 +156,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
11606 }
11607
11608 /*
11609 - * user_mode_vm(regs) determines whether a register set came from user mode.
11610 + * user_mode(regs) determines whether a register set came from user mode.
11611 * This is true if V8086 mode was enabled OR if the register set was from
11612 * protected mode with RPL-3 CS value. This tricky test checks that with
11613 * one comparison. Many places in the kernel can bypass this full check
11614 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
11615 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
11616 + * be used.
11617 */
11618 -static inline int user_mode(struct pt_regs *regs)
11619 +static inline int user_mode_novm(struct pt_regs *regs)
11620 {
11621 #ifdef CONFIG_X86_32
11622 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
11623 #else
11624 - return !!(regs->cs & 3);
11625 + return !!(regs->cs & SEGMENT_RPL_MASK);
11626 #endif
11627 }
11628
11629 -static inline int user_mode_vm(struct pt_regs *regs)
11630 +static inline int user_mode(struct pt_regs *regs)
11631 {
11632 #ifdef CONFIG_X86_32
11633 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
11634 USER_RPL;
11635 #else
11636 - return user_mode(regs);
11637 + return user_mode_novm(regs);
11638 #endif
11639 }
11640
11641 @@ -193,15 +194,16 @@ static inline int v8086_mode(struct pt_regs *regs)
11642 #ifdef CONFIG_X86_64
11643 static inline bool user_64bit_mode(struct pt_regs *regs)
11644 {
11645 + unsigned long cs = regs->cs & 0xffff;
11646 #ifndef CONFIG_PARAVIRT
11647 /*
11648 * On non-paravirt systems, this is the only long mode CPL 3
11649 * selector. We do not allow long mode selectors in the LDT.
11650 */
11651 - return regs->cs == __USER_CS;
11652 + return cs == __USER_CS;
11653 #else
11654 /* Headers are too twisted for this to go in paravirt.h. */
11655 - return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
11656 + return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
11657 #endif
11658 }
11659 #endif
11660 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
11661 index 92f29706..a79cbbb 100644
11662 --- a/arch/x86/include/asm/reboot.h
11663 +++ b/arch/x86/include/asm/reboot.h
11664 @@ -6,19 +6,19 @@
11665 struct pt_regs;
11666
11667 struct machine_ops {
11668 - void (*restart)(char *cmd);
11669 - void (*halt)(void);
11670 - void (*power_off)(void);
11671 + void (* __noreturn restart)(char *cmd);
11672 + void (* __noreturn halt)(void);
11673 + void (* __noreturn power_off)(void);
11674 void (*shutdown)(void);
11675 void (*crash_shutdown)(struct pt_regs *);
11676 - void (*emergency_restart)(void);
11677 -};
11678 + void (* __noreturn emergency_restart)(void);
11679 +} __no_const;
11680
11681 extern struct machine_ops machine_ops;
11682
11683 void native_machine_crash_shutdown(struct pt_regs *regs);
11684 void native_machine_shutdown(void);
11685 -void machine_real_restart(unsigned int type);
11686 +void machine_real_restart(unsigned int type) __noreturn;
11687 /* These must match dispatch_table in reboot_32.S */
11688 #define MRR_BIOS 0
11689 #define MRR_APM 1
11690 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
11691 index 2dbe4a7..ce1db00 100644
11692 --- a/arch/x86/include/asm/rwsem.h
11693 +++ b/arch/x86/include/asm/rwsem.h
11694 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
11695 {
11696 asm volatile("# beginning down_read\n\t"
11697 LOCK_PREFIX _ASM_INC "(%1)\n\t"
11698 +
11699 +#ifdef CONFIG_PAX_REFCOUNT
11700 + "jno 0f\n"
11701 + LOCK_PREFIX _ASM_DEC "(%1)\n"
11702 + "int $4\n0:\n"
11703 + _ASM_EXTABLE(0b, 0b)
11704 +#endif
11705 +
11706 /* adds 0x00000001 */
11707 " jns 1f\n"
11708 " call call_rwsem_down_read_failed\n"
11709 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
11710 "1:\n\t"
11711 " mov %1,%2\n\t"
11712 " add %3,%2\n\t"
11713 +
11714 +#ifdef CONFIG_PAX_REFCOUNT
11715 + "jno 0f\n"
11716 + "sub %3,%2\n"
11717 + "int $4\n0:\n"
11718 + _ASM_EXTABLE(0b, 0b)
11719 +#endif
11720 +
11721 " jle 2f\n\t"
11722 LOCK_PREFIX " cmpxchg %2,%0\n\t"
11723 " jnz 1b\n\t"
11724 @@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
11725 long tmp;
11726 asm volatile("# beginning down_write\n\t"
11727 LOCK_PREFIX " xadd %1,(%2)\n\t"
11728 +
11729 +#ifdef CONFIG_PAX_REFCOUNT
11730 + "jno 0f\n"
11731 + "mov %1,(%2)\n"
11732 + "int $4\n0:\n"
11733 + _ASM_EXTABLE(0b, 0b)
11734 +#endif
11735 +
11736 /* adds 0xffff0001, returns the old value */
11737 " test %1,%1\n\t"
11738 /* was the count 0 before? */
11739 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
11740 long tmp;
11741 asm volatile("# beginning __up_read\n\t"
11742 LOCK_PREFIX " xadd %1,(%2)\n\t"
11743 +
11744 +#ifdef CONFIG_PAX_REFCOUNT
11745 + "jno 0f\n"
11746 + "mov %1,(%2)\n"
11747 + "int $4\n0:\n"
11748 + _ASM_EXTABLE(0b, 0b)
11749 +#endif
11750 +
11751 /* subtracts 1, returns the old value */
11752 " jns 1f\n\t"
11753 " call call_rwsem_wake\n" /* expects old value in %edx */
11754 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
11755 long tmp;
11756 asm volatile("# beginning __up_write\n\t"
11757 LOCK_PREFIX " xadd %1,(%2)\n\t"
11758 +
11759 +#ifdef CONFIG_PAX_REFCOUNT
11760 + "jno 0f\n"
11761 + "mov %1,(%2)\n"
11762 + "int $4\n0:\n"
11763 + _ASM_EXTABLE(0b, 0b)
11764 +#endif
11765 +
11766 /* subtracts 0xffff0001, returns the old value */
11767 " jns 1f\n\t"
11768 " call call_rwsem_wake\n" /* expects old value in %edx */
11769 @@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11770 {
11771 asm volatile("# beginning __downgrade_write\n\t"
11772 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
11773 +
11774 +#ifdef CONFIG_PAX_REFCOUNT
11775 + "jno 0f\n"
11776 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
11777 + "int $4\n0:\n"
11778 + _ASM_EXTABLE(0b, 0b)
11779 +#endif
11780 +
11781 /*
11782 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
11783 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
11784 @@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11785 */
11786 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
11787 {
11788 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
11789 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
11790 +
11791 +#ifdef CONFIG_PAX_REFCOUNT
11792 + "jno 0f\n"
11793 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
11794 + "int $4\n0:\n"
11795 + _ASM_EXTABLE(0b, 0b)
11796 +#endif
11797 +
11798 : "+m" (sem->count)
11799 : "er" (delta));
11800 }
11801 @@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
11802 */
11803 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
11804 {
11805 - return delta + xadd(&sem->count, delta);
11806 + return delta + xadd_check_overflow(&sem->count, delta);
11807 }
11808
11809 #endif /* __KERNEL__ */
11810 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
11811 index 5e64171..f58957e 100644
11812 --- a/arch/x86/include/asm/segment.h
11813 +++ b/arch/x86/include/asm/segment.h
11814 @@ -64,10 +64,15 @@
11815 * 26 - ESPFIX small SS
11816 * 27 - per-cpu [ offset to per-cpu data area ]
11817 * 28 - stack_canary-20 [ for stack protector ]
11818 - * 29 - unused
11819 - * 30 - unused
11820 + * 29 - PCI BIOS CS
11821 + * 30 - PCI BIOS DS
11822 * 31 - TSS for double fault handler
11823 */
11824 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
11825 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
11826 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
11827 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
11828 +
11829 #define GDT_ENTRY_TLS_MIN 6
11830 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
11831
11832 @@ -79,6 +84,8 @@
11833
11834 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
11835
11836 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
11837 +
11838 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
11839
11840 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
11841 @@ -104,6 +111,12 @@
11842 #define __KERNEL_STACK_CANARY 0
11843 #endif
11844
11845 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
11846 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
11847 +
11848 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
11849 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
11850 +
11851 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
11852
11853 /*
11854 @@ -141,7 +154,7 @@
11855 */
11856
11857 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
11858 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
11859 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
11860
11861
11862 #else
11863 @@ -165,6 +178,8 @@
11864 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
11865 #define __USER32_DS __USER_DS
11866
11867 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
11868 +
11869 #define GDT_ENTRY_TSS 8 /* needs two entries */
11870 #define GDT_ENTRY_LDT 10 /* needs two entries */
11871 #define GDT_ENTRY_TLS_MIN 12
11872 @@ -185,6 +200,7 @@
11873 #endif
11874
11875 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
11876 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
11877 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
11878 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
11879 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
11880 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
11881 index 0434c40..1714bf0 100644
11882 --- a/arch/x86/include/asm/smp.h
11883 +++ b/arch/x86/include/asm/smp.h
11884 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
11885 /* cpus sharing the last level cache: */
11886 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
11887 DECLARE_PER_CPU(u16, cpu_llc_id);
11888 -DECLARE_PER_CPU(int, cpu_number);
11889 +DECLARE_PER_CPU(unsigned int, cpu_number);
11890
11891 static inline struct cpumask *cpu_sibling_mask(int cpu)
11892 {
11893 @@ -77,7 +77,7 @@ struct smp_ops {
11894
11895 void (*send_call_func_ipi)(const struct cpumask *mask);
11896 void (*send_call_func_single_ipi)(int cpu);
11897 -};
11898 +} __no_const;
11899
11900 /* Globals due to paravirt */
11901 extern void set_cpu_sibling_map(int cpu);
11902 @@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
11903 extern int safe_smp_processor_id(void);
11904
11905 #elif defined(CONFIG_X86_64_SMP)
11906 -#define raw_smp_processor_id() (percpu_read(cpu_number))
11907 -
11908 -#define stack_smp_processor_id() \
11909 -({ \
11910 - struct thread_info *ti; \
11911 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
11912 - ti->cpu; \
11913 -})
11914 +#define raw_smp_processor_id() (percpu_read(cpu_number))
11915 +#define stack_smp_processor_id() raw_smp_processor_id()
11916 #define safe_smp_processor_id() smp_processor_id()
11917
11918 #endif
11919 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
11920 index a82c2bf..2198f61 100644
11921 --- a/arch/x86/include/asm/spinlock.h
11922 +++ b/arch/x86/include/asm/spinlock.h
11923 @@ -175,6 +175,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
11924 static inline void arch_read_lock(arch_rwlock_t *rw)
11925 {
11926 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
11927 +
11928 +#ifdef CONFIG_PAX_REFCOUNT
11929 + "jno 0f\n"
11930 + LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
11931 + "int $4\n0:\n"
11932 + _ASM_EXTABLE(0b, 0b)
11933 +#endif
11934 +
11935 "jns 1f\n"
11936 "call __read_lock_failed\n\t"
11937 "1:\n"
11938 @@ -184,6 +192,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
11939 static inline void arch_write_lock(arch_rwlock_t *rw)
11940 {
11941 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
11942 +
11943 +#ifdef CONFIG_PAX_REFCOUNT
11944 + "jno 0f\n"
11945 + LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
11946 + "int $4\n0:\n"
11947 + _ASM_EXTABLE(0b, 0b)
11948 +#endif
11949 +
11950 "jz 1f\n"
11951 "call __write_lock_failed\n\t"
11952 "1:\n"
11953 @@ -213,13 +229,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
11954
11955 static inline void arch_read_unlock(arch_rwlock_t *rw)
11956 {
11957 - asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
11958 + asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
11959 +
11960 +#ifdef CONFIG_PAX_REFCOUNT
11961 + "jno 0f\n"
11962 + LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
11963 + "int $4\n0:\n"
11964 + _ASM_EXTABLE(0b, 0b)
11965 +#endif
11966 +
11967 :"+m" (rw->lock) : : "memory");
11968 }
11969
11970 static inline void arch_write_unlock(arch_rwlock_t *rw)
11971 {
11972 - asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
11973 + asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
11974 +
11975 +#ifdef CONFIG_PAX_REFCOUNT
11976 + "jno 0f\n"
11977 + LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
11978 + "int $4\n0:\n"
11979 + _ASM_EXTABLE(0b, 0b)
11980 +#endif
11981 +
11982 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
11983 }
11984
11985 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
11986 index 1575177..cb23f52 100644
11987 --- a/arch/x86/include/asm/stackprotector.h
11988 +++ b/arch/x86/include/asm/stackprotector.h
11989 @@ -48,7 +48,7 @@
11990 * head_32 for boot CPU and setup_per_cpu_areas() for others.
11991 */
11992 #define GDT_STACK_CANARY_INIT \
11993 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
11994 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
11995
11996 /*
11997 * Initialize the stackprotector canary value.
11998 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
11999
12000 static inline void load_stack_canary_segment(void)
12001 {
12002 -#ifdef CONFIG_X86_32
12003 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
12004 asm volatile ("mov %0, %%gs" : : "r" (0));
12005 #endif
12006 }
12007 diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
12008 index 70bbe39..4ae2bd4 100644
12009 --- a/arch/x86/include/asm/stacktrace.h
12010 +++ b/arch/x86/include/asm/stacktrace.h
12011 @@ -11,28 +11,20 @@
12012
12013 extern int kstack_depth_to_print;
12014
12015 -struct thread_info;
12016 +struct task_struct;
12017 struct stacktrace_ops;
12018
12019 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
12020 - unsigned long *stack,
12021 - unsigned long bp,
12022 - const struct stacktrace_ops *ops,
12023 - void *data,
12024 - unsigned long *end,
12025 - int *graph);
12026 +typedef unsigned long walk_stack_t(struct task_struct *task,
12027 + void *stack_start,
12028 + unsigned long *stack,
12029 + unsigned long bp,
12030 + const struct stacktrace_ops *ops,
12031 + void *data,
12032 + unsigned long *end,
12033 + int *graph);
12034
12035 -extern unsigned long
12036 -print_context_stack(struct thread_info *tinfo,
12037 - unsigned long *stack, unsigned long bp,
12038 - const struct stacktrace_ops *ops, void *data,
12039 - unsigned long *end, int *graph);
12040 -
12041 -extern unsigned long
12042 -print_context_stack_bp(struct thread_info *tinfo,
12043 - unsigned long *stack, unsigned long bp,
12044 - const struct stacktrace_ops *ops, void *data,
12045 - unsigned long *end, int *graph);
12046 +extern walk_stack_t print_context_stack;
12047 +extern walk_stack_t print_context_stack_bp;
12048
12049 /* Generic stack tracer with callbacks */
12050
12051 @@ -40,7 +32,7 @@ struct stacktrace_ops {
12052 void (*address)(void *data, unsigned long address, int reliable);
12053 /* On negative return stop dumping */
12054 int (*stack)(void *data, char *name);
12055 - walk_stack_t walk_stack;
12056 + walk_stack_t *walk_stack;
12057 };
12058
12059 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
12060 diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
12061 index cb23852..2dde194 100644
12062 --- a/arch/x86/include/asm/sys_ia32.h
12063 +++ b/arch/x86/include/asm/sys_ia32.h
12064 @@ -40,7 +40,7 @@ asmlinkage long sys32_rt_sigprocmask(int, compat_sigset_t __user *,
12065 compat_sigset_t __user *, unsigned int);
12066 asmlinkage long sys32_alarm(unsigned int);
12067
12068 -asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
12069 +asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
12070 asmlinkage long sys32_sysfs(int, u32, u32);
12071
12072 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
12073 diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
12074 index f1d8b44..a4de8b7 100644
12075 --- a/arch/x86/include/asm/syscalls.h
12076 +++ b/arch/x86/include/asm/syscalls.h
12077 @@ -30,7 +30,7 @@ long sys_clone(unsigned long, unsigned long, void __user *,
12078 void __user *, struct pt_regs *);
12079
12080 /* kernel/ldt.c */
12081 -asmlinkage int sys_modify_ldt(int, void __user *, unsigned long);
12082 +asmlinkage int sys_modify_ldt(int, void __user *, unsigned long) __size_overflow(3);
12083
12084 /* kernel/signal.c */
12085 long sys_rt_sigreturn(struct pt_regs *);
12086 diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
12087 index 2d2f01c..f985723 100644
12088 --- a/arch/x86/include/asm/system.h
12089 +++ b/arch/x86/include/asm/system.h
12090 @@ -129,7 +129,7 @@ do { \
12091 "call __switch_to\n\t" \
12092 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
12093 __switch_canary \
12094 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
12095 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
12096 "movq %%rax,%%rdi\n\t" \
12097 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
12098 "jnz ret_from_fork\n\t" \
12099 @@ -140,7 +140,7 @@ do { \
12100 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
12101 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
12102 [_tif_fork] "i" (_TIF_FORK), \
12103 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
12104 + [thread_info] "m" (current_tinfo), \
12105 [current_task] "m" (current_task) \
12106 __switch_canary_iparam \
12107 : "memory", "cc" __EXTRA_CLOBBER)
12108 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
12109 {
12110 unsigned long __limit;
12111 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
12112 - return __limit + 1;
12113 + return __limit;
12114 }
12115
12116 static inline void native_clts(void)
12117 @@ -397,13 +397,13 @@ void enable_hlt(void);
12118
12119 void cpu_idle_wait(void);
12120
12121 -extern unsigned long arch_align_stack(unsigned long sp);
12122 +#define arch_align_stack(x) ((x) & ~0xfUL)
12123 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
12124
12125 void default_idle(void);
12126 bool set_pm_idle_to_default(void);
12127
12128 -void stop_this_cpu(void *dummy);
12129 +void stop_this_cpu(void *dummy) __noreturn;
12130
12131 /*
12132 * Force strict CPU ordering.
12133 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
12134 index cfd8144..a62ab95 100644
12135 --- a/arch/x86/include/asm/thread_info.h
12136 +++ b/arch/x86/include/asm/thread_info.h
12137 @@ -10,6 +10,7 @@
12138 #include <linux/compiler.h>
12139 #include <asm/page.h>
12140 #include <asm/types.h>
12141 +#include <asm/percpu.h>
12142
12143 /*
12144 * low level task data that entry.S needs immediate access to
12145 @@ -24,7 +25,6 @@ struct exec_domain;
12146 #include <linux/atomic.h>
12147
12148 struct thread_info {
12149 - struct task_struct *task; /* main task structure */
12150 struct exec_domain *exec_domain; /* execution domain */
12151 __u32 flags; /* low level flags */
12152 __u32 status; /* thread synchronous flags */
12153 @@ -34,19 +34,13 @@ struct thread_info {
12154 mm_segment_t addr_limit;
12155 struct restart_block restart_block;
12156 void __user *sysenter_return;
12157 -#ifdef CONFIG_X86_32
12158 - unsigned long previous_esp; /* ESP of the previous stack in
12159 - case of nested (IRQ) stacks
12160 - */
12161 - __u8 supervisor_stack[0];
12162 -#endif
12163 + unsigned long lowest_stack;
12164 unsigned int sig_on_uaccess_error:1;
12165 unsigned int uaccess_err:1; /* uaccess failed */
12166 };
12167
12168 -#define INIT_THREAD_INFO(tsk) \
12169 +#define INIT_THREAD_INFO \
12170 { \
12171 - .task = &tsk, \
12172 .exec_domain = &default_exec_domain, \
12173 .flags = 0, \
12174 .cpu = 0, \
12175 @@ -57,7 +51,7 @@ struct thread_info {
12176 }, \
12177 }
12178
12179 -#define init_thread_info (init_thread_union.thread_info)
12180 +#define init_thread_info (init_thread_union.stack)
12181 #define init_stack (init_thread_union.stack)
12182
12183 #else /* !__ASSEMBLY__ */
12184 @@ -95,6 +89,7 @@ struct thread_info {
12185 #define TIF_BLOCKSTEP 25 /* set when we want DEBUGCTLMSR_BTF */
12186 #define TIF_LAZY_MMU_UPDATES 27 /* task is updating the mmu lazily */
12187 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
12188 +#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
12189
12190 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
12191 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
12192 @@ -116,16 +111,17 @@ struct thread_info {
12193 #define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
12194 #define _TIF_LAZY_MMU_UPDATES (1 << TIF_LAZY_MMU_UPDATES)
12195 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
12196 +#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
12197
12198 /* work to do in syscall_trace_enter() */
12199 #define _TIF_WORK_SYSCALL_ENTRY \
12200 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
12201 - _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
12202 + _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
12203
12204 /* work to do in syscall_trace_leave() */
12205 #define _TIF_WORK_SYSCALL_EXIT \
12206 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
12207 - _TIF_SYSCALL_TRACEPOINT)
12208 + _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
12209
12210 /* work to do on interrupt/exception return */
12211 #define _TIF_WORK_MASK \
12212 @@ -135,7 +131,7 @@ struct thread_info {
12213
12214 /* work to do on any return to user space */
12215 #define _TIF_ALLWORK_MASK \
12216 - ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT)
12217 + ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
12218
12219 /* Only used for 64 bit */
12220 #define _TIF_DO_NOTIFY_MASK \
12221 @@ -169,45 +165,40 @@ struct thread_info {
12222 ret; \
12223 })
12224
12225 -#ifdef CONFIG_X86_32
12226 -
12227 -#define STACK_WARN (THREAD_SIZE/8)
12228 -/*
12229 - * macros/functions for gaining access to the thread information structure
12230 - *
12231 - * preempt_count needs to be 1 initially, until the scheduler is functional.
12232 - */
12233 -#ifndef __ASSEMBLY__
12234 -
12235 -
12236 -/* how to get the current stack pointer from C */
12237 -register unsigned long current_stack_pointer asm("esp") __used;
12238 -
12239 -/* how to get the thread information struct from C */
12240 -static inline struct thread_info *current_thread_info(void)
12241 -{
12242 - return (struct thread_info *)
12243 - (current_stack_pointer & ~(THREAD_SIZE - 1));
12244 -}
12245 -
12246 -#else /* !__ASSEMBLY__ */
12247 -
12248 +#ifdef __ASSEMBLY__
12249 /* how to get the thread information struct from ASM */
12250 #define GET_THREAD_INFO(reg) \
12251 - movl $-THREAD_SIZE, reg; \
12252 - andl %esp, reg
12253 + mov PER_CPU_VAR(current_tinfo), reg
12254
12255 /* use this one if reg already contains %esp */
12256 -#define GET_THREAD_INFO_WITH_ESP(reg) \
12257 - andl $-THREAD_SIZE, reg
12258 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
12259 +#else
12260 +/* how to get the thread information struct from C */
12261 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
12262 +
12263 +static __always_inline struct thread_info *current_thread_info(void)
12264 +{
12265 + return percpu_read_stable(current_tinfo);
12266 +}
12267 +#endif
12268 +
12269 +#ifdef CONFIG_X86_32
12270 +
12271 +#define STACK_WARN (THREAD_SIZE/8)
12272 +/*
12273 + * macros/functions for gaining access to the thread information structure
12274 + *
12275 + * preempt_count needs to be 1 initially, until the scheduler is functional.
12276 + */
12277 +#ifndef __ASSEMBLY__
12278 +
12279 +/* how to get the current stack pointer from C */
12280 +register unsigned long current_stack_pointer asm("esp") __used;
12281
12282 #endif
12283
12284 #else /* X86_32 */
12285
12286 -#include <asm/percpu.h>
12287 -#define KERNEL_STACK_OFFSET (5*8)
12288 -
12289 /*
12290 * macros/functions for gaining access to the thread information structure
12291 * preempt_count needs to be 1 initially, until the scheduler is functional.
12292 @@ -215,27 +206,8 @@ static inline struct thread_info *current_thread_info(void)
12293 #ifndef __ASSEMBLY__
12294 DECLARE_PER_CPU(unsigned long, kernel_stack);
12295
12296 -static inline struct thread_info *current_thread_info(void)
12297 -{
12298 - struct thread_info *ti;
12299 - ti = (void *)(percpu_read_stable(kernel_stack) +
12300 - KERNEL_STACK_OFFSET - THREAD_SIZE);
12301 - return ti;
12302 -}
12303 -
12304 -#else /* !__ASSEMBLY__ */
12305 -
12306 -/* how to get the thread information struct from ASM */
12307 -#define GET_THREAD_INFO(reg) \
12308 - movq PER_CPU_VAR(kernel_stack),reg ; \
12309 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
12310 -
12311 -/*
12312 - * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
12313 - * a certain register (to be used in assembler memory operands).
12314 - */
12315 -#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
12316 -
12317 +/* how to get the current stack pointer from C */
12318 +register unsigned long current_stack_pointer asm("rsp") __used;
12319 #endif
12320
12321 #endif /* !X86_32 */
12322 @@ -269,5 +241,16 @@ extern void arch_task_cache_init(void);
12323 extern void free_thread_info(struct thread_info *ti);
12324 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
12325 #define arch_task_cache_init arch_task_cache_init
12326 +
12327 +#define __HAVE_THREAD_FUNCTIONS
12328 +#define task_thread_info(task) (&(task)->tinfo)
12329 +#define task_stack_page(task) ((task)->stack)
12330 +#define setup_thread_stack(p, org) do {} while (0)
12331 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12332 +
12333 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
12334 +extern struct task_struct *alloc_task_struct_node(int node);
12335 +extern void free_task_struct(struct task_struct *);
12336 +
12337 #endif
12338 #endif /* _ASM_X86_THREAD_INFO_H */
12339 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
12340 index 8be5f54..7ae826d 100644
12341 --- a/arch/x86/include/asm/uaccess.h
12342 +++ b/arch/x86/include/asm/uaccess.h
12343 @@ -7,12 +7,15 @@
12344 #include <linux/compiler.h>
12345 #include <linux/thread_info.h>
12346 #include <linux/string.h>
12347 +#include <linux/sched.h>
12348 #include <asm/asm.h>
12349 #include <asm/page.h>
12350
12351 #define VERIFY_READ 0
12352 #define VERIFY_WRITE 1
12353
12354 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
12355 +
12356 /*
12357 * The fs value determines whether argument validity checking should be
12358 * performed or not. If get_fs() == USER_DS, checking is performed, with
12359 @@ -28,7 +31,12 @@
12360
12361 #define get_ds() (KERNEL_DS)
12362 #define get_fs() (current_thread_info()->addr_limit)
12363 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12364 +void __set_fs(mm_segment_t x);
12365 +void set_fs(mm_segment_t x);
12366 +#else
12367 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12368 +#endif
12369
12370 #define segment_eq(a, b) ((a).seg == (b).seg)
12371
12372 @@ -76,7 +84,33 @@
12373 * checks that the pointer is in the user space range - after calling
12374 * this function, memory access functions may still return -EFAULT.
12375 */
12376 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12377 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12378 +#define access_ok(type, addr, size) \
12379 +({ \
12380 + long __size = size; \
12381 + unsigned long __addr = (unsigned long)addr; \
12382 + unsigned long __addr_ao = __addr & PAGE_MASK; \
12383 + unsigned long __end_ao = __addr + __size - 1; \
12384 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
12385 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12386 + while(__addr_ao <= __end_ao) { \
12387 + char __c_ao; \
12388 + __addr_ao += PAGE_SIZE; \
12389 + if (__size > PAGE_SIZE) \
12390 + cond_resched(); \
12391 + if (__get_user(__c_ao, (char __user *)__addr)) \
12392 + break; \
12393 + if (type != VERIFY_WRITE) { \
12394 + __addr = __addr_ao; \
12395 + continue; \
12396 + } \
12397 + if (__put_user(__c_ao, (char __user *)__addr)) \
12398 + break; \
12399 + __addr = __addr_ao; \
12400 + } \
12401 + } \
12402 + __ret_ao; \
12403 +})
12404
12405 /*
12406 * The exception table consists of pairs of addresses: the first is the
12407 @@ -182,12 +216,20 @@ extern int __get_user_bad(void);
12408 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12409 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12410
12411 -
12412 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12413 +#define __copyuser_seg "gs;"
12414 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12415 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12416 +#else
12417 +#define __copyuser_seg
12418 +#define __COPYUSER_SET_ES
12419 +#define __COPYUSER_RESTORE_ES
12420 +#endif
12421
12422 #ifdef CONFIG_X86_32
12423 #define __put_user_asm_u64(x, addr, err, errret) \
12424 - asm volatile("1: movl %%eax,0(%2)\n" \
12425 - "2: movl %%edx,4(%2)\n" \
12426 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12427 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12428 "3:\n" \
12429 ".section .fixup,\"ax\"\n" \
12430 "4: movl %3,%0\n" \
12431 @@ -199,8 +241,8 @@ extern int __get_user_bad(void);
12432 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12433
12434 #define __put_user_asm_ex_u64(x, addr) \
12435 - asm volatile("1: movl %%eax,0(%1)\n" \
12436 - "2: movl %%edx,4(%1)\n" \
12437 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12438 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12439 "3:\n" \
12440 _ASM_EXTABLE(1b, 2b - 1b) \
12441 _ASM_EXTABLE(2b, 3b - 2b) \
12442 @@ -252,7 +294,7 @@ extern void __put_user_8(void);
12443 __typeof__(*(ptr)) __pu_val; \
12444 __chk_user_ptr(ptr); \
12445 might_fault(); \
12446 - __pu_val = x; \
12447 + __pu_val = (x); \
12448 switch (sizeof(*(ptr))) { \
12449 case 1: \
12450 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12451 @@ -373,7 +415,7 @@ do { \
12452 } while (0)
12453
12454 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12455 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12456 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12457 "2:\n" \
12458 ".section .fixup,\"ax\"\n" \
12459 "3: mov %3,%0\n" \
12460 @@ -381,7 +423,7 @@ do { \
12461 " jmp 2b\n" \
12462 ".previous\n" \
12463 _ASM_EXTABLE(1b, 3b) \
12464 - : "=r" (err), ltype(x) \
12465 + : "=r" (err), ltype (x) \
12466 : "m" (__m(addr)), "i" (errret), "0" (err))
12467
12468 #define __get_user_size_ex(x, ptr, size) \
12469 @@ -406,7 +448,7 @@ do { \
12470 } while (0)
12471
12472 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12473 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12474 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12475 "2:\n" \
12476 _ASM_EXTABLE(1b, 2b - 1b) \
12477 : ltype(x) : "m" (__m(addr)))
12478 @@ -423,13 +465,24 @@ do { \
12479 int __gu_err; \
12480 unsigned long __gu_val; \
12481 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12482 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
12483 + (x) = (__typeof__(*(ptr)))__gu_val; \
12484 __gu_err; \
12485 })
12486
12487 /* FIXME: this hack is definitely wrong -AK */
12488 struct __large_struct { unsigned long buf[100]; };
12489 -#define __m(x) (*(struct __large_struct __user *)(x))
12490 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12491 +#define ____m(x) \
12492 +({ \
12493 + unsigned long ____x = (unsigned long)(x); \
12494 + if (____x < PAX_USER_SHADOW_BASE) \
12495 + ____x += PAX_USER_SHADOW_BASE; \
12496 + (void __user *)____x; \
12497 +})
12498 +#else
12499 +#define ____m(x) (x)
12500 +#endif
12501 +#define __m(x) (*(struct __large_struct __user *)____m(x))
12502
12503 /*
12504 * Tell gcc we read from memory instead of writing: this is because
12505 @@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
12506 * aliasing issues.
12507 */
12508 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12509 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12510 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12511 "2:\n" \
12512 ".section .fixup,\"ax\"\n" \
12513 "3: mov %3,%0\n" \
12514 @@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
12515 ".previous\n" \
12516 _ASM_EXTABLE(1b, 3b) \
12517 : "=r"(err) \
12518 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12519 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12520
12521 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12522 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12523 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12524 "2:\n" \
12525 _ASM_EXTABLE(1b, 2b - 1b) \
12526 : : ltype(x), "m" (__m(addr)))
12527 @@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
12528 * On error, the variable @x is set to zero.
12529 */
12530
12531 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12532 +#define __get_user(x, ptr) get_user((x), (ptr))
12533 +#else
12534 #define __get_user(x, ptr) \
12535 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12536 +#endif
12537
12538 /**
12539 * __put_user: - Write a simple value into user space, with less checking.
12540 @@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
12541 * Returns zero on success, or -EFAULT on error.
12542 */
12543
12544 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12545 +#define __put_user(x, ptr) put_user((x), (ptr))
12546 +#else
12547 #define __put_user(x, ptr) \
12548 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12549 +#endif
12550
12551 #define __get_user_unaligned __get_user
12552 #define __put_user_unaligned __put_user
12553 @@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
12554 #define get_user_ex(x, ptr) do { \
12555 unsigned long __gue_val; \
12556 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12557 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
12558 + (x) = (__typeof__(*(ptr)))__gue_val; \
12559 } while (0)
12560
12561 #ifdef CONFIG_X86_WP_WORKS_OK
12562 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
12563 index 566e803..7183d0b 100644
12564 --- a/arch/x86/include/asm/uaccess_32.h
12565 +++ b/arch/x86/include/asm/uaccess_32.h
12566 @@ -11,15 +11,15 @@
12567 #include <asm/page.h>
12568
12569 unsigned long __must_check __copy_to_user_ll
12570 - (void __user *to, const void *from, unsigned long n);
12571 + (void __user *to, const void *from, unsigned long n) __size_overflow(3);
12572 unsigned long __must_check __copy_from_user_ll
12573 - (void *to, const void __user *from, unsigned long n);
12574 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12575 unsigned long __must_check __copy_from_user_ll_nozero
12576 - (void *to, const void __user *from, unsigned long n);
12577 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12578 unsigned long __must_check __copy_from_user_ll_nocache
12579 - (void *to, const void __user *from, unsigned long n);
12580 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12581 unsigned long __must_check __copy_from_user_ll_nocache_nozero
12582 - (void *to, const void __user *from, unsigned long n);
12583 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12584
12585 /**
12586 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
12587 @@ -41,8 +41,13 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
12588 */
12589
12590 static __always_inline unsigned long __must_check
12591 +__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) __size_overflow(3);
12592 +static __always_inline unsigned long __must_check
12593 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12594 {
12595 + if ((long)n < 0)
12596 + return n;
12597 +
12598 if (__builtin_constant_p(n)) {
12599 unsigned long ret;
12600
12601 @@ -61,6 +66,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12602 return ret;
12603 }
12604 }
12605 + if (!__builtin_constant_p(n))
12606 + check_object_size(from, n, true);
12607 return __copy_to_user_ll(to, from, n);
12608 }
12609
12610 @@ -79,15 +86,23 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12611 * On success, this will be zero.
12612 */
12613 static __always_inline unsigned long __must_check
12614 +__copy_to_user(void __user *to, const void *from, unsigned long n) __size_overflow(3);
12615 +static __always_inline unsigned long __must_check
12616 __copy_to_user(void __user *to, const void *from, unsigned long n)
12617 {
12618 might_fault();
12619 +
12620 return __copy_to_user_inatomic(to, from, n);
12621 }
12622
12623 static __always_inline unsigned long
12624 +__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) __size_overflow(3);
12625 +static __always_inline unsigned long
12626 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12627 {
12628 + if ((long)n < 0)
12629 + return n;
12630 +
12631 /* Avoid zeroing the tail if the copy fails..
12632 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
12633 * but as the zeroing behaviour is only significant when n is not
12634 @@ -134,9 +149,15 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12635 * for explanation of why this is needed.
12636 */
12637 static __always_inline unsigned long
12638 +__copy_from_user(void *to, const void __user *from, unsigned long n) __size_overflow(3);
12639 +static __always_inline unsigned long
12640 __copy_from_user(void *to, const void __user *from, unsigned long n)
12641 {
12642 might_fault();
12643 +
12644 + if ((long)n < 0)
12645 + return n;
12646 +
12647 if (__builtin_constant_p(n)) {
12648 unsigned long ret;
12649
12650 @@ -152,13 +173,21 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
12651 return ret;
12652 }
12653 }
12654 + if (!__builtin_constant_p(n))
12655 + check_object_size(to, n, false);
12656 return __copy_from_user_ll(to, from, n);
12657 }
12658
12659 static __always_inline unsigned long __copy_from_user_nocache(void *to,
12660 + const void __user *from, unsigned long n) __size_overflow(3);
12661 +static __always_inline unsigned long __copy_from_user_nocache(void *to,
12662 const void __user *from, unsigned long n)
12663 {
12664 might_fault();
12665 +
12666 + if ((long)n < 0)
12667 + return n;
12668 +
12669 if (__builtin_constant_p(n)) {
12670 unsigned long ret;
12671
12672 @@ -179,17 +208,24 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
12673
12674 static __always_inline unsigned long
12675 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
12676 + unsigned long n) __size_overflow(3);
12677 +static __always_inline unsigned long
12678 +__copy_from_user_inatomic_nocache(void *to, const void __user *from,
12679 unsigned long n)
12680 {
12681 - return __copy_from_user_ll_nocache_nozero(to, from, n);
12682 + if ((long)n < 0)
12683 + return n;
12684 +
12685 + return __copy_from_user_ll_nocache_nozero(to, from, n);
12686 }
12687
12688 -unsigned long __must_check copy_to_user(void __user *to,
12689 - const void *from, unsigned long n);
12690 -unsigned long __must_check _copy_from_user(void *to,
12691 - const void __user *from,
12692 - unsigned long n);
12693 -
12694 +extern void copy_to_user_overflow(void)
12695 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12696 + __compiletime_error("copy_to_user() buffer size is not provably correct")
12697 +#else
12698 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
12699 +#endif
12700 +;
12701
12702 extern void copy_from_user_overflow(void)
12703 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12704 @@ -199,17 +235,65 @@ extern void copy_from_user_overflow(void)
12705 #endif
12706 ;
12707
12708 -static inline unsigned long __must_check copy_from_user(void *to,
12709 - const void __user *from,
12710 - unsigned long n)
12711 +/**
12712 + * copy_to_user: - Copy a block of data into user space.
12713 + * @to: Destination address, in user space.
12714 + * @from: Source address, in kernel space.
12715 + * @n: Number of bytes to copy.
12716 + *
12717 + * Context: User context only. This function may sleep.
12718 + *
12719 + * Copy data from kernel space to user space.
12720 + *
12721 + * Returns number of bytes that could not be copied.
12722 + * On success, this will be zero.
12723 + */
12724 +static inline unsigned long __must_check
12725 +copy_to_user(void __user *to, const void *from, unsigned long n) __size_overflow(3);
12726 +static inline unsigned long __must_check
12727 +copy_to_user(void __user *to, const void *from, unsigned long n)
12728 +{
12729 + int sz = __compiletime_object_size(from);
12730 +
12731 + if (unlikely(sz != -1 && sz < n))
12732 + copy_to_user_overflow();
12733 + else if (access_ok(VERIFY_WRITE, to, n))
12734 + n = __copy_to_user(to, from, n);
12735 + return n;
12736 +}
12737 +
12738 +/**
12739 + * copy_from_user: - Copy a block of data from user space.
12740 + * @to: Destination address, in kernel space.
12741 + * @from: Source address, in user space.
12742 + * @n: Number of bytes to copy.
12743 + *
12744 + * Context: User context only. This function may sleep.
12745 + *
12746 + * Copy data from user space to kernel space.
12747 + *
12748 + * Returns number of bytes that could not be copied.
12749 + * On success, this will be zero.
12750 + *
12751 + * If some data could not be copied, this function will pad the copied
12752 + * data to the requested size using zero bytes.
12753 + */
12754 +static inline unsigned long __must_check
12755 +copy_from_user(void *to, const void __user *from, unsigned long n) __size_overflow(3);
12756 +static inline unsigned long __must_check
12757 +copy_from_user(void *to, const void __user *from, unsigned long n)
12758 {
12759 int sz = __compiletime_object_size(to);
12760
12761 - if (likely(sz == -1 || sz >= n))
12762 - n = _copy_from_user(to, from, n);
12763 - else
12764 + if (unlikely(sz != -1 && sz < n))
12765 copy_from_user_overflow();
12766 -
12767 + else if (access_ok(VERIFY_READ, from, n))
12768 + n = __copy_from_user(to, from, n);
12769 + else if ((long)n > 0) {
12770 + if (!__builtin_constant_p(n))
12771 + check_object_size(to, n, false);
12772 + memset(to, 0, n);
12773 + }
12774 return n;
12775 }
12776
12777 @@ -235,7 +319,7 @@ long __must_check __strncpy_from_user(char *dst,
12778 #define strlen_user(str) strnlen_user(str, LONG_MAX)
12779
12780 long strnlen_user(const char __user *str, long n);
12781 -unsigned long __must_check clear_user(void __user *mem, unsigned long len);
12782 -unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
12783 +unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
12784 +unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
12785
12786 #endif /* _ASM_X86_UACCESS_32_H */
12787 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
12788 index 1c66d30..e294b5f 100644
12789 --- a/arch/x86/include/asm/uaccess_64.h
12790 +++ b/arch/x86/include/asm/uaccess_64.h
12791 @@ -10,6 +10,9 @@
12792 #include <asm/alternative.h>
12793 #include <asm/cpufeature.h>
12794 #include <asm/page.h>
12795 +#include <asm/pgtable.h>
12796 +
12797 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
12798
12799 /*
12800 * Copy To/From Userspace
12801 @@ -17,12 +20,14 @@
12802
12803 /* Handles exceptions in both to and from, but doesn't do access_ok */
12804 __must_check unsigned long
12805 -copy_user_generic_string(void *to, const void *from, unsigned len);
12806 +copy_user_generic_string(void *to, const void *from, unsigned long len) __size_overflow(3);
12807 __must_check unsigned long
12808 -copy_user_generic_unrolled(void *to, const void *from, unsigned len);
12809 +copy_user_generic_unrolled(void *to, const void *from, unsigned long len) __size_overflow(3);
12810
12811 static __always_inline __must_check unsigned long
12812 -copy_user_generic(void *to, const void *from, unsigned len)
12813 +copy_user_generic(void *to, const void *from, unsigned long len) __size_overflow(3);
12814 +static __always_inline __must_check unsigned long
12815 +copy_user_generic(void *to, const void *from, unsigned long len)
12816 {
12817 unsigned ret;
12818
12819 @@ -32,142 +37,237 @@ copy_user_generic(void *to, const void *from, unsigned len)
12820 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
12821 "=d" (len)),
12822 "1" (to), "2" (from), "3" (len)
12823 - : "memory", "rcx", "r8", "r9", "r10", "r11");
12824 + : "memory", "rcx", "r8", "r9", "r11");
12825 return ret;
12826 }
12827
12828 +static __always_inline __must_check unsigned long
12829 +__copy_to_user(void __user *to, const void *from, unsigned long len) __size_overflow(3);
12830 +static __always_inline __must_check unsigned long
12831 +__copy_from_user(void *to, const void __user *from, unsigned long len) __size_overflow(3);
12832 __must_check unsigned long
12833 -_copy_to_user(void __user *to, const void *from, unsigned len);
12834 -__must_check unsigned long
12835 -_copy_from_user(void *to, const void __user *from, unsigned len);
12836 -__must_check unsigned long
12837 -copy_in_user(void __user *to, const void __user *from, unsigned len);
12838 +copy_in_user(void __user *to, const void __user *from, unsigned long len) __size_overflow(3);
12839
12840 static inline unsigned long __must_check copy_from_user(void *to,
12841 const void __user *from,
12842 + unsigned long n) __size_overflow(3);
12843 +static inline unsigned long __must_check copy_from_user(void *to,
12844 + const void __user *from,
12845 unsigned long n)
12846 {
12847 - int sz = __compiletime_object_size(to);
12848 -
12849 might_fault();
12850 - if (likely(sz == -1 || sz >= n))
12851 - n = _copy_from_user(to, from, n);
12852 -#ifdef CONFIG_DEBUG_VM
12853 - else
12854 - WARN(1, "Buffer overflow detected!\n");
12855 -#endif
12856 +
12857 + if (access_ok(VERIFY_READ, from, n))
12858 + n = __copy_from_user(to, from, n);
12859 + else if (n < INT_MAX) {
12860 + if (!__builtin_constant_p(n))
12861 + check_object_size(to, n, false);
12862 + memset(to, 0, n);
12863 + }
12864 return n;
12865 }
12866
12867 static __always_inline __must_check
12868 -int copy_to_user(void __user *dst, const void *src, unsigned size)
12869 +int copy_to_user(void __user *dst, const void *src, unsigned long size) __size_overflow(3);
12870 +static __always_inline __must_check
12871 +int copy_to_user(void __user *dst, const void *src, unsigned long size)
12872 {
12873 might_fault();
12874
12875 - return _copy_to_user(dst, src, size);
12876 + if (access_ok(VERIFY_WRITE, dst, size))
12877 + size = __copy_to_user(dst, src, size);
12878 + return size;
12879 }
12880
12881 static __always_inline __must_check
12882 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
12883 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size) __size_overflow(3);
12884 +static __always_inline __must_check
12885 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
12886 {
12887 - int ret = 0;
12888 + int sz = __compiletime_object_size(dst);
12889 + unsigned ret = 0;
12890
12891 might_fault();
12892 - if (!__builtin_constant_p(size))
12893 - return copy_user_generic(dst, (__force void *)src, size);
12894 +
12895 + if (size > INT_MAX)
12896 + return size;
12897 +
12898 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12899 + if (!__access_ok(VERIFY_READ, src, size))
12900 + return size;
12901 +#endif
12902 +
12903 + if (unlikely(sz != -1 && sz < size)) {
12904 +#ifdef CONFIG_DEBUG_VM
12905 + WARN(1, "Buffer overflow detected!\n");
12906 +#endif
12907 + return size;
12908 + }
12909 +
12910 + if (!__builtin_constant_p(size)) {
12911 + check_object_size(dst, size, false);
12912 +
12913 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12914 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12915 + src += PAX_USER_SHADOW_BASE;
12916 +#endif
12917 +
12918 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
12919 + }
12920 switch (size) {
12921 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
12922 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
12923 ret, "b", "b", "=q", 1);
12924 return ret;
12925 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
12926 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
12927 ret, "w", "w", "=r", 2);
12928 return ret;
12929 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
12930 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
12931 ret, "l", "k", "=r", 4);
12932 return ret;
12933 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
12934 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12935 ret, "q", "", "=r", 8);
12936 return ret;
12937 case 10:
12938 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12939 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12940 ret, "q", "", "=r", 10);
12941 if (unlikely(ret))
12942 return ret;
12943 __get_user_asm(*(u16 *)(8 + (char *)dst),
12944 - (u16 __user *)(8 + (char __user *)src),
12945 + (const u16 __user *)(8 + (const char __user *)src),
12946 ret, "w", "w", "=r", 2);
12947 return ret;
12948 case 16:
12949 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12950 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12951 ret, "q", "", "=r", 16);
12952 if (unlikely(ret))
12953 return ret;
12954 __get_user_asm(*(u64 *)(8 + (char *)dst),
12955 - (u64 __user *)(8 + (char __user *)src),
12956 + (const u64 __user *)(8 + (const char __user *)src),
12957 ret, "q", "", "=r", 8);
12958 return ret;
12959 default:
12960 - return copy_user_generic(dst, (__force void *)src, size);
12961 +
12962 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12963 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12964 + src += PAX_USER_SHADOW_BASE;
12965 +#endif
12966 +
12967 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
12968 }
12969 }
12970
12971 static __always_inline __must_check
12972 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
12973 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size) __size_overflow(3);
12974 +static __always_inline __must_check
12975 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
12976 {
12977 - int ret = 0;
12978 + int sz = __compiletime_object_size(src);
12979 + unsigned ret = 0;
12980
12981 might_fault();
12982 - if (!__builtin_constant_p(size))
12983 - return copy_user_generic((__force void *)dst, src, size);
12984 +
12985 + if (size > INT_MAX)
12986 + return size;
12987 +
12988 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12989 + if (!__access_ok(VERIFY_WRITE, dst, size))
12990 + return size;
12991 +#endif
12992 +
12993 + if (unlikely(sz != -1 && sz < size)) {
12994 +#ifdef CONFIG_DEBUG_VM
12995 + WARN(1, "Buffer overflow detected!\n");
12996 +#endif
12997 + return size;
12998 + }
12999 +
13000 + if (!__builtin_constant_p(size)) {
13001 + check_object_size(src, size, true);
13002 +
13003 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13004 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13005 + dst += PAX_USER_SHADOW_BASE;
13006 +#endif
13007 +
13008 + return copy_user_generic((__force_kernel void *)dst, src, size);
13009 + }
13010 switch (size) {
13011 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
13012 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
13013 ret, "b", "b", "iq", 1);
13014 return ret;
13015 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
13016 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
13017 ret, "w", "w", "ir", 2);
13018 return ret;
13019 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
13020 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
13021 ret, "l", "k", "ir", 4);
13022 return ret;
13023 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
13024 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13025 ret, "q", "", "er", 8);
13026 return ret;
13027 case 10:
13028 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13029 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13030 ret, "q", "", "er", 10);
13031 if (unlikely(ret))
13032 return ret;
13033 asm("":::"memory");
13034 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
13035 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
13036 ret, "w", "w", "ir", 2);
13037 return ret;
13038 case 16:
13039 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13040 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13041 ret, "q", "", "er", 16);
13042 if (unlikely(ret))
13043 return ret;
13044 asm("":::"memory");
13045 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
13046 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
13047 ret, "q", "", "er", 8);
13048 return ret;
13049 default:
13050 - return copy_user_generic((__force void *)dst, src, size);
13051 +
13052 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13053 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13054 + dst += PAX_USER_SHADOW_BASE;
13055 +#endif
13056 +
13057 + return copy_user_generic((__force_kernel void *)dst, src, size);
13058 }
13059 }
13060
13061 static __always_inline __must_check
13062 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13063 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size) __size_overflow(3);
13064 +static __always_inline __must_check
13065 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
13066 {
13067 - int ret = 0;
13068 + unsigned ret = 0;
13069
13070 might_fault();
13071 - if (!__builtin_constant_p(size))
13072 - return copy_user_generic((__force void *)dst,
13073 - (__force void *)src, size);
13074 +
13075 + if (size > INT_MAX)
13076 + return size;
13077 +
13078 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13079 + if (!__access_ok(VERIFY_READ, src, size))
13080 + return size;
13081 + if (!__access_ok(VERIFY_WRITE, dst, size))
13082 + return size;
13083 +#endif
13084 +
13085 + if (!__builtin_constant_p(size)) {
13086 +
13087 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13088 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13089 + src += PAX_USER_SHADOW_BASE;
13090 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13091 + dst += PAX_USER_SHADOW_BASE;
13092 +#endif
13093 +
13094 + return copy_user_generic((__force_kernel void *)dst,
13095 + (__force_kernel const void *)src, size);
13096 + }
13097 switch (size) {
13098 case 1: {
13099 u8 tmp;
13100 - __get_user_asm(tmp, (u8 __user *)src,
13101 + __get_user_asm(tmp, (const u8 __user *)src,
13102 ret, "b", "b", "=q", 1);
13103 if (likely(!ret))
13104 __put_user_asm(tmp, (u8 __user *)dst,
13105 @@ -176,7 +276,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13106 }
13107 case 2: {
13108 u16 tmp;
13109 - __get_user_asm(tmp, (u16 __user *)src,
13110 + __get_user_asm(tmp, (const u16 __user *)src,
13111 ret, "w", "w", "=r", 2);
13112 if (likely(!ret))
13113 __put_user_asm(tmp, (u16 __user *)dst,
13114 @@ -186,7 +286,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13115
13116 case 4: {
13117 u32 tmp;
13118 - __get_user_asm(tmp, (u32 __user *)src,
13119 + __get_user_asm(tmp, (const u32 __user *)src,
13120 ret, "l", "k", "=r", 4);
13121 if (likely(!ret))
13122 __put_user_asm(tmp, (u32 __user *)dst,
13123 @@ -195,7 +295,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13124 }
13125 case 8: {
13126 u64 tmp;
13127 - __get_user_asm(tmp, (u64 __user *)src,
13128 + __get_user_asm(tmp, (const u64 __user *)src,
13129 ret, "q", "", "=r", 8);
13130 if (likely(!ret))
13131 __put_user_asm(tmp, (u64 __user *)dst,
13132 @@ -203,8 +303,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13133 return ret;
13134 }
13135 default:
13136 - return copy_user_generic((__force void *)dst,
13137 - (__force void *)src, size);
13138 +
13139 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13140 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13141 + src += PAX_USER_SHADOW_BASE;
13142 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13143 + dst += PAX_USER_SHADOW_BASE;
13144 +#endif
13145 +
13146 + return copy_user_generic((__force_kernel void *)dst,
13147 + (__force_kernel const void *)src, size);
13148 }
13149 }
13150
13151 @@ -215,39 +323,83 @@ __strncpy_from_user(char *dst, const char __user *src, long count);
13152 __must_check long strnlen_user(const char __user *str, long n);
13153 __must_check long __strnlen_user(const char __user *str, long n);
13154 __must_check long strlen_user(const char __user *str);
13155 -__must_check unsigned long clear_user(void __user *mem, unsigned long len);
13156 -__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
13157 +__must_check unsigned long clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13158 +__must_check unsigned long __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13159
13160 static __must_check __always_inline int
13161 -__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
13162 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size) __size_overflow(3);
13163 +static __must_check __always_inline int
13164 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
13165 {
13166 - return copy_user_generic(dst, (__force const void *)src, size);
13167 + if (size > INT_MAX)
13168 + return size;
13169 +
13170 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13171 + if (!__access_ok(VERIFY_READ, src, size))
13172 + return size;
13173 +
13174 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13175 + src += PAX_USER_SHADOW_BASE;
13176 +#endif
13177 +
13178 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
13179 }
13180
13181 -static __must_check __always_inline int
13182 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
13183 +static __must_check __always_inline unsigned long
13184 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size) __size_overflow(3);
13185 +static __must_check __always_inline unsigned long
13186 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
13187 {
13188 - return copy_user_generic((__force void *)dst, src, size);
13189 + if (size > INT_MAX)
13190 + return size;
13191 +
13192 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13193 + if (!__access_ok(VERIFY_WRITE, dst, size))
13194 + return size;
13195 +
13196 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13197 + dst += PAX_USER_SHADOW_BASE;
13198 +#endif
13199 +
13200 + return copy_user_generic((__force_kernel void *)dst, src, size);
13201 }
13202
13203 -extern long __copy_user_nocache(void *dst, const void __user *src,
13204 - unsigned size, int zerorest);
13205 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
13206 + unsigned long size, int zerorest) __size_overflow(3);
13207
13208 -static inline int
13209 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
13210 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size) __size_overflow(3);
13211 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
13212 {
13213 might_sleep();
13214 +
13215 + if (size > INT_MAX)
13216 + return size;
13217 +
13218 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13219 + if (!__access_ok(VERIFY_READ, src, size))
13220 + return size;
13221 +#endif
13222 +
13223 return __copy_user_nocache(dst, src, size, 1);
13224 }
13225
13226 -static inline int
13227 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13228 - unsigned size)
13229 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13230 + unsigned long size) __size_overflow(3);
13231 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13232 + unsigned long size)
13233 {
13234 + if (size > INT_MAX)
13235 + return size;
13236 +
13237 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13238 + if (!__access_ok(VERIFY_READ, src, size))
13239 + return size;
13240 +#endif
13241 +
13242 return __copy_user_nocache(dst, src, size, 0);
13243 }
13244
13245 -unsigned long
13246 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
13247 +extern unsigned long
13248 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
13249
13250 #endif /* _ASM_X86_UACCESS_64_H */
13251 diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
13252 index bb05228..d763d5b 100644
13253 --- a/arch/x86/include/asm/vdso.h
13254 +++ b/arch/x86/include/asm/vdso.h
13255 @@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
13256 #define VDSO32_SYMBOL(base, name) \
13257 ({ \
13258 extern const char VDSO32_##name[]; \
13259 - (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13260 + (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13261 })
13262 #endif
13263
13264 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
13265 index a609c39..7a68dc7 100644
13266 --- a/arch/x86/include/asm/x86_init.h
13267 +++ b/arch/x86/include/asm/x86_init.h
13268 @@ -29,7 +29,7 @@ struct x86_init_mpparse {
13269 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
13270 void (*find_smp_config)(void);
13271 void (*get_smp_config)(unsigned int early);
13272 -};
13273 +} __no_const;
13274
13275 /**
13276 * struct x86_init_resources - platform specific resource related ops
13277 @@ -43,7 +43,7 @@ struct x86_init_resources {
13278 void (*probe_roms)(void);
13279 void (*reserve_resources)(void);
13280 char *(*memory_setup)(void);
13281 -};
13282 +} __no_const;
13283
13284 /**
13285 * struct x86_init_irqs - platform specific interrupt setup
13286 @@ -56,7 +56,7 @@ struct x86_init_irqs {
13287 void (*pre_vector_init)(void);
13288 void (*intr_init)(void);
13289 void (*trap_init)(void);
13290 -};
13291 +} __no_const;
13292
13293 /**
13294 * struct x86_init_oem - oem platform specific customizing functions
13295 @@ -66,7 +66,7 @@ struct x86_init_irqs {
13296 struct x86_init_oem {
13297 void (*arch_setup)(void);
13298 void (*banner)(void);
13299 -};
13300 +} __no_const;
13301
13302 /**
13303 * struct x86_init_mapping - platform specific initial kernel pagetable setup
13304 @@ -77,7 +77,7 @@ struct x86_init_oem {
13305 */
13306 struct x86_init_mapping {
13307 void (*pagetable_reserve)(u64 start, u64 end);
13308 -};
13309 +} __no_const;
13310
13311 /**
13312 * struct x86_init_paging - platform specific paging functions
13313 @@ -87,7 +87,7 @@ struct x86_init_mapping {
13314 struct x86_init_paging {
13315 void (*pagetable_setup_start)(pgd_t *base);
13316 void (*pagetable_setup_done)(pgd_t *base);
13317 -};
13318 +} __no_const;
13319
13320 /**
13321 * struct x86_init_timers - platform specific timer setup
13322 @@ -102,7 +102,7 @@ struct x86_init_timers {
13323 void (*tsc_pre_init)(void);
13324 void (*timer_init)(void);
13325 void (*wallclock_init)(void);
13326 -};
13327 +} __no_const;
13328
13329 /**
13330 * struct x86_init_iommu - platform specific iommu setup
13331 @@ -110,7 +110,7 @@ struct x86_init_timers {
13332 */
13333 struct x86_init_iommu {
13334 int (*iommu_init)(void);
13335 -};
13336 +} __no_const;
13337
13338 /**
13339 * struct x86_init_pci - platform specific pci init functions
13340 @@ -124,7 +124,7 @@ struct x86_init_pci {
13341 int (*init)(void);
13342 void (*init_irq)(void);
13343 void (*fixup_irqs)(void);
13344 -};
13345 +} __no_const;
13346
13347 /**
13348 * struct x86_init_ops - functions for platform specific setup
13349 @@ -140,7 +140,7 @@ struct x86_init_ops {
13350 struct x86_init_timers timers;
13351 struct x86_init_iommu iommu;
13352 struct x86_init_pci pci;
13353 -};
13354 +} __no_const;
13355
13356 /**
13357 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
13358 @@ -149,7 +149,7 @@ struct x86_init_ops {
13359 struct x86_cpuinit_ops {
13360 void (*setup_percpu_clockev)(void);
13361 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
13362 -};
13363 +} __no_const;
13364
13365 /**
13366 * struct x86_platform_ops - platform specific runtime functions
13367 @@ -171,7 +171,7 @@ struct x86_platform_ops {
13368 void (*nmi_init)(void);
13369 unsigned char (*get_nmi_reason)(void);
13370 int (*i8042_detect)(void);
13371 -};
13372 +} __no_const;
13373
13374 struct pci_dev;
13375
13376 @@ -180,7 +180,7 @@ struct x86_msi_ops {
13377 void (*teardown_msi_irq)(unsigned int irq);
13378 void (*teardown_msi_irqs)(struct pci_dev *dev);
13379 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
13380 -};
13381 +} __no_const;
13382
13383 extern struct x86_init_ops x86_init;
13384 extern struct x86_cpuinit_ops x86_cpuinit;
13385 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
13386 index c6ce245..ffbdab7 100644
13387 --- a/arch/x86/include/asm/xsave.h
13388 +++ b/arch/x86/include/asm/xsave.h
13389 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13390 {
13391 int err;
13392
13393 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13394 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
13395 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
13396 +#endif
13397 +
13398 /*
13399 * Clear the xsave header first, so that reserved fields are
13400 * initialized to zero.
13401 @@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13402 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
13403 {
13404 int err;
13405 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
13406 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
13407 u32 lmask = mask;
13408 u32 hmask = mask >> 32;
13409
13410 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13411 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13412 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13413 +#endif
13414 +
13415 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13416 "2:\n"
13417 ".section .fixup,\"ax\"\n"
13418 diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
13419 index 6a564ac..9b1340c 100644
13420 --- a/arch/x86/kernel/acpi/realmode/Makefile
13421 +++ b/arch/x86/kernel/acpi/realmode/Makefile
13422 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
13423 $(call cc-option, -fno-stack-protector) \
13424 $(call cc-option, -mpreferred-stack-boundary=2)
13425 KBUILD_CFLAGS += $(call cc-option, -m32)
13426 +ifdef CONSTIFY_PLUGIN
13427 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
13428 +endif
13429 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13430 GCOV_PROFILE := n
13431
13432 diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
13433 index b4fd836..4358fe3 100644
13434 --- a/arch/x86/kernel/acpi/realmode/wakeup.S
13435 +++ b/arch/x86/kernel/acpi/realmode/wakeup.S
13436 @@ -108,6 +108,9 @@ wakeup_code:
13437 /* Do any other stuff... */
13438
13439 #ifndef CONFIG_64BIT
13440 + /* Recheck NX bit overrides (64bit path does this in trampoline */
13441 + call verify_cpu
13442 +
13443 /* This could also be done in C code... */
13444 movl pmode_cr3, %eax
13445 movl %eax, %cr3
13446 @@ -131,6 +134,7 @@ wakeup_code:
13447 movl pmode_cr0, %eax
13448 movl %eax, %cr0
13449 jmp pmode_return
13450 +# include "../../verify_cpu.S"
13451 #else
13452 pushw $0
13453 pushw trampoline_segment
13454 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13455 index 103b6ab..2004d0a 100644
13456 --- a/arch/x86/kernel/acpi/sleep.c
13457 +++ b/arch/x86/kernel/acpi/sleep.c
13458 @@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
13459 header->trampoline_segment = trampoline_address() >> 4;
13460 #ifdef CONFIG_SMP
13461 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
13462 +
13463 + pax_open_kernel();
13464 early_gdt_descr.address =
13465 (unsigned long)get_cpu_gdt_table(smp_processor_id());
13466 + pax_close_kernel();
13467 +
13468 initial_gs = per_cpu_offset(smp_processor_id());
13469 #endif
13470 initial_code = (unsigned long)wakeup_long64;
13471 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13472 index 13ab720..95d5442 100644
13473 --- a/arch/x86/kernel/acpi/wakeup_32.S
13474 +++ b/arch/x86/kernel/acpi/wakeup_32.S
13475 @@ -30,13 +30,11 @@ wakeup_pmode_return:
13476 # and restore the stack ... but you need gdt for this to work
13477 movl saved_context_esp, %esp
13478
13479 - movl %cs:saved_magic, %eax
13480 - cmpl $0x12345678, %eax
13481 + cmpl $0x12345678, saved_magic
13482 jne bogus_magic
13483
13484 # jump to place where we left off
13485 - movl saved_eip, %eax
13486 - jmp *%eax
13487 + jmp *(saved_eip)
13488
13489 bogus_magic:
13490 jmp bogus_magic
13491 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
13492 index 1f84794..e23f862 100644
13493 --- a/arch/x86/kernel/alternative.c
13494 +++ b/arch/x86/kernel/alternative.c
13495 @@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
13496 */
13497 for (a = start; a < end; a++) {
13498 instr = (u8 *)&a->instr_offset + a->instr_offset;
13499 +
13500 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13501 + instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13502 + if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
13503 + instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13504 +#endif
13505 +
13506 replacement = (u8 *)&a->repl_offset + a->repl_offset;
13507 BUG_ON(a->replacementlen > a->instrlen);
13508 BUG_ON(a->instrlen > sizeof(insnbuf));
13509 @@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
13510 for (poff = start; poff < end; poff++) {
13511 u8 *ptr = (u8 *)poff + *poff;
13512
13513 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13514 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13515 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13516 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13517 +#endif
13518 +
13519 if (!*poff || ptr < text || ptr >= text_end)
13520 continue;
13521 /* turn DS segment override prefix into lock prefix */
13522 - if (*ptr == 0x3e)
13523 + if (*ktla_ktva(ptr) == 0x3e)
13524 text_poke(ptr, ((unsigned char []){0xf0}), 1);
13525 };
13526 mutex_unlock(&text_mutex);
13527 @@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
13528 for (poff = start; poff < end; poff++) {
13529 u8 *ptr = (u8 *)poff + *poff;
13530
13531 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13532 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13533 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13534 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13535 +#endif
13536 +
13537 if (!*poff || ptr < text || ptr >= text_end)
13538 continue;
13539 /* turn lock prefix into DS segment override prefix */
13540 - if (*ptr == 0xf0)
13541 + if (*ktla_ktva(ptr) == 0xf0)
13542 text_poke(ptr, ((unsigned char []){0x3E}), 1);
13543 };
13544 mutex_unlock(&text_mutex);
13545 @@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
13546
13547 BUG_ON(p->len > MAX_PATCH_LEN);
13548 /* prep the buffer with the original instructions */
13549 - memcpy(insnbuf, p->instr, p->len);
13550 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13551 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13552 (unsigned long)p->instr, p->len);
13553
13554 @@ -568,7 +587,7 @@ void __init alternative_instructions(void)
13555 if (smp_alt_once)
13556 free_init_pages("SMP alternatives",
13557 (unsigned long)__smp_locks,
13558 - (unsigned long)__smp_locks_end);
13559 + PAGE_ALIGN((unsigned long)__smp_locks_end));
13560
13561 restart_nmi();
13562 }
13563 @@ -585,13 +604,17 @@ void __init alternative_instructions(void)
13564 * instructions. And on the local CPU you need to be protected again NMI or MCE
13565 * handlers seeing an inconsistent instruction while you patch.
13566 */
13567 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
13568 +void *__kprobes text_poke_early(void *addr, const void *opcode,
13569 size_t len)
13570 {
13571 unsigned long flags;
13572 local_irq_save(flags);
13573 - memcpy(addr, opcode, len);
13574 +
13575 + pax_open_kernel();
13576 + memcpy(ktla_ktva(addr), opcode, len);
13577 sync_core();
13578 + pax_close_kernel();
13579 +
13580 local_irq_restore(flags);
13581 /* Could also do a CLFLUSH here to speed up CPU recovery; but
13582 that causes hangs on some VIA CPUs. */
13583 @@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
13584 */
13585 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13586 {
13587 - unsigned long flags;
13588 - char *vaddr;
13589 + unsigned char *vaddr = ktla_ktva(addr);
13590 struct page *pages[2];
13591 - int i;
13592 + size_t i;
13593
13594 if (!core_kernel_text((unsigned long)addr)) {
13595 - pages[0] = vmalloc_to_page(addr);
13596 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
13597 + pages[0] = vmalloc_to_page(vaddr);
13598 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13599 } else {
13600 - pages[0] = virt_to_page(addr);
13601 + pages[0] = virt_to_page(vaddr);
13602 WARN_ON(!PageReserved(pages[0]));
13603 - pages[1] = virt_to_page(addr + PAGE_SIZE);
13604 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13605 }
13606 BUG_ON(!pages[0]);
13607 - local_irq_save(flags);
13608 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13609 - if (pages[1])
13610 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13611 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13612 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13613 - clear_fixmap(FIX_TEXT_POKE0);
13614 - if (pages[1])
13615 - clear_fixmap(FIX_TEXT_POKE1);
13616 - local_flush_tlb();
13617 - sync_core();
13618 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
13619 - that causes hangs on some VIA CPUs. */
13620 + text_poke_early(addr, opcode, len);
13621 for (i = 0; i < len; i++)
13622 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13623 - local_irq_restore(flags);
13624 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
13625 return addr;
13626 }
13627
13628 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
13629 index 5b3f88e..61232b4 100644
13630 --- a/arch/x86/kernel/apic/apic.c
13631 +++ b/arch/x86/kernel/apic/apic.c
13632 @@ -184,7 +184,7 @@ int first_system_vector = 0xfe;
13633 /*
13634 * Debug level, exported for io_apic.c
13635 */
13636 -unsigned int apic_verbosity;
13637 +int apic_verbosity;
13638
13639 int pic_mode;
13640
13641 @@ -1912,7 +1912,7 @@ void smp_error_interrupt(struct pt_regs *regs)
13642 apic_write(APIC_ESR, 0);
13643 v1 = apic_read(APIC_ESR);
13644 ack_APIC_irq();
13645 - atomic_inc(&irq_err_count);
13646 + atomic_inc_unchecked(&irq_err_count);
13647
13648 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
13649 smp_processor_id(), v0 , v1);
13650 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
13651 index fb07275..e06bb59 100644
13652 --- a/arch/x86/kernel/apic/io_apic.c
13653 +++ b/arch/x86/kernel/apic/io_apic.c
13654 @@ -1096,7 +1096,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
13655 }
13656 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
13657
13658 -void lock_vector_lock(void)
13659 +void lock_vector_lock(void) __acquires(vector_lock)
13660 {
13661 /* Used to the online set of cpus does not change
13662 * during assign_irq_vector.
13663 @@ -1104,7 +1104,7 @@ void lock_vector_lock(void)
13664 raw_spin_lock(&vector_lock);
13665 }
13666
13667 -void unlock_vector_lock(void)
13668 +void unlock_vector_lock(void) __releases(vector_lock)
13669 {
13670 raw_spin_unlock(&vector_lock);
13671 }
13672 @@ -2510,7 +2510,7 @@ static void ack_apic_edge(struct irq_data *data)
13673 ack_APIC_irq();
13674 }
13675
13676 -atomic_t irq_mis_count;
13677 +atomic_unchecked_t irq_mis_count;
13678
13679 static void ack_apic_level(struct irq_data *data)
13680 {
13681 @@ -2576,7 +2576,7 @@ static void ack_apic_level(struct irq_data *data)
13682 * at the cpu.
13683 */
13684 if (!(v & (1 << (i & 0x1f)))) {
13685 - atomic_inc(&irq_mis_count);
13686 + atomic_inc_unchecked(&irq_mis_count);
13687
13688 eoi_ioapic_irq(irq, cfg);
13689 }
13690 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
13691 index f76623c..aab694f 100644
13692 --- a/arch/x86/kernel/apm_32.c
13693 +++ b/arch/x86/kernel/apm_32.c
13694 @@ -411,7 +411,7 @@ static DEFINE_MUTEX(apm_mutex);
13695 * This is for buggy BIOS's that refer to (real mode) segment 0x40
13696 * even though they are called in protected mode.
13697 */
13698 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
13699 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
13700 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
13701
13702 static const char driver_version[] = "1.16ac"; /* no spaces */
13703 @@ -589,7 +589,10 @@ static long __apm_bios_call(void *_call)
13704 BUG_ON(cpu != 0);
13705 gdt = get_cpu_gdt_table(cpu);
13706 save_desc_40 = gdt[0x40 / 8];
13707 +
13708 + pax_open_kernel();
13709 gdt[0x40 / 8] = bad_bios_desc;
13710 + pax_close_kernel();
13711
13712 apm_irq_save(flags);
13713 APM_DO_SAVE_SEGS;
13714 @@ -598,7 +601,11 @@ static long __apm_bios_call(void *_call)
13715 &call->esi);
13716 APM_DO_RESTORE_SEGS;
13717 apm_irq_restore(flags);
13718 +
13719 + pax_open_kernel();
13720 gdt[0x40 / 8] = save_desc_40;
13721 + pax_close_kernel();
13722 +
13723 put_cpu();
13724
13725 return call->eax & 0xff;
13726 @@ -665,7 +672,10 @@ static long __apm_bios_call_simple(void *_call)
13727 BUG_ON(cpu != 0);
13728 gdt = get_cpu_gdt_table(cpu);
13729 save_desc_40 = gdt[0x40 / 8];
13730 +
13731 + pax_open_kernel();
13732 gdt[0x40 / 8] = bad_bios_desc;
13733 + pax_close_kernel();
13734
13735 apm_irq_save(flags);
13736 APM_DO_SAVE_SEGS;
13737 @@ -673,7 +683,11 @@ static long __apm_bios_call_simple(void *_call)
13738 &call->eax);
13739 APM_DO_RESTORE_SEGS;
13740 apm_irq_restore(flags);
13741 +
13742 + pax_open_kernel();
13743 gdt[0x40 / 8] = save_desc_40;
13744 + pax_close_kernel();
13745 +
13746 put_cpu();
13747 return error;
13748 }
13749 @@ -2347,12 +2361,15 @@ static int __init apm_init(void)
13750 * code to that CPU.
13751 */
13752 gdt = get_cpu_gdt_table(0);
13753 +
13754 + pax_open_kernel();
13755 set_desc_base(&gdt[APM_CS >> 3],
13756 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
13757 set_desc_base(&gdt[APM_CS_16 >> 3],
13758 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
13759 set_desc_base(&gdt[APM_DS >> 3],
13760 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
13761 + pax_close_kernel();
13762
13763 proc_create("apm", 0, NULL, &apm_file_ops);
13764
13765 diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
13766 index 68de2dc..1f3c720 100644
13767 --- a/arch/x86/kernel/asm-offsets.c
13768 +++ b/arch/x86/kernel/asm-offsets.c
13769 @@ -33,6 +33,8 @@ void common(void) {
13770 OFFSET(TI_status, thread_info, status);
13771 OFFSET(TI_addr_limit, thread_info, addr_limit);
13772 OFFSET(TI_preempt_count, thread_info, preempt_count);
13773 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
13774 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13775
13776 BLANK();
13777 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
13778 @@ -53,8 +55,26 @@ void common(void) {
13779 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13780 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13781 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
13782 +
13783 +#ifdef CONFIG_PAX_KERNEXEC
13784 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13785 #endif
13786
13787 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13788 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
13789 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
13790 +#ifdef CONFIG_X86_64
13791 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
13792 +#endif
13793 +#endif
13794 +
13795 +#endif
13796 +
13797 + BLANK();
13798 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13799 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
13800 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13801 +
13802 #ifdef CONFIG_XEN
13803 BLANK();
13804 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
13805 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
13806 index 834e897..dacddc8 100644
13807 --- a/arch/x86/kernel/asm-offsets_64.c
13808 +++ b/arch/x86/kernel/asm-offsets_64.c
13809 @@ -70,6 +70,7 @@ int main(void)
13810 BLANK();
13811 #undef ENTRY
13812
13813 + DEFINE(TSS_size, sizeof(struct tss_struct));
13814 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
13815 BLANK();
13816
13817 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
13818 index 25f24dc..4094a7f 100644
13819 --- a/arch/x86/kernel/cpu/Makefile
13820 +++ b/arch/x86/kernel/cpu/Makefile
13821 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
13822 CFLAGS_REMOVE_perf_event.o = -pg
13823 endif
13824
13825 -# Make sure load_percpu_segment has no stackprotector
13826 -nostackp := $(call cc-option, -fno-stack-protector)
13827 -CFLAGS_common.o := $(nostackp)
13828 -
13829 obj-y := intel_cacheinfo.o scattered.o topology.o
13830 obj-y += proc.o capflags.o powerflags.o common.o
13831 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
13832 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
13833 index 80ab83d..0a7b34e 100644
13834 --- a/arch/x86/kernel/cpu/amd.c
13835 +++ b/arch/x86/kernel/cpu/amd.c
13836 @@ -670,7 +670,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
13837 unsigned int size)
13838 {
13839 /* AMD errata T13 (order #21922) */
13840 - if ((c->x86 == 6)) {
13841 + if (c->x86 == 6) {
13842 /* Duron Rev A0 */
13843 if (c->x86_model == 3 && c->x86_mask == 0)
13844 size = 64;
13845 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
13846 index 1a810e4..9fa8201 100644
13847 --- a/arch/x86/kernel/cpu/common.c
13848 +++ b/arch/x86/kernel/cpu/common.c
13849 @@ -84,60 +84,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
13850
13851 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
13852
13853 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
13854 -#ifdef CONFIG_X86_64
13855 - /*
13856 - * We need valid kernel segments for data and code in long mode too
13857 - * IRET will check the segment types kkeil 2000/10/28
13858 - * Also sysret mandates a special GDT layout
13859 - *
13860 - * TLS descriptors are currently at a different place compared to i386.
13861 - * Hopefully nobody expects them at a fixed place (Wine?)
13862 - */
13863 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
13864 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
13865 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
13866 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
13867 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
13868 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
13869 -#else
13870 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
13871 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13872 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
13873 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
13874 - /*
13875 - * Segments used for calling PnP BIOS have byte granularity.
13876 - * They code segments and data segments have fixed 64k limits,
13877 - * the transfer segment sizes are set at run time.
13878 - */
13879 - /* 32-bit code */
13880 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13881 - /* 16-bit code */
13882 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13883 - /* 16-bit data */
13884 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
13885 - /* 16-bit data */
13886 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
13887 - /* 16-bit data */
13888 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
13889 - /*
13890 - * The APM segments have byte granularity and their bases
13891 - * are set at run time. All have 64k limits.
13892 - */
13893 - /* 32-bit code */
13894 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13895 - /* 16-bit code */
13896 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13897 - /* data */
13898 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
13899 -
13900 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13901 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13902 - GDT_STACK_CANARY_INIT
13903 -#endif
13904 -} };
13905 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
13906 -
13907 static int __init x86_xsave_setup(char *s)
13908 {
13909 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
13910 @@ -372,7 +318,7 @@ void switch_to_new_gdt(int cpu)
13911 {
13912 struct desc_ptr gdt_descr;
13913
13914 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
13915 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
13916 gdt_descr.size = GDT_SIZE - 1;
13917 load_gdt(&gdt_descr);
13918 /* Reload the per-cpu base */
13919 @@ -839,6 +785,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
13920 /* Filter out anything that depends on CPUID levels we don't have */
13921 filter_cpuid_features(c, true);
13922
13923 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
13924 + setup_clear_cpu_cap(X86_FEATURE_SEP);
13925 +#endif
13926 +
13927 /* If the model name is still unset, do table lookup. */
13928 if (!c->x86_model_id[0]) {
13929 const char *p;
13930 @@ -1019,10 +969,12 @@ static __init int setup_disablecpuid(char *arg)
13931 }
13932 __setup("clearcpuid=", setup_disablecpuid);
13933
13934 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
13935 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
13936 +
13937 #ifdef CONFIG_X86_64
13938 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
13939 -struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
13940 - (unsigned long) nmi_idt_table };
13941 +struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
13942
13943 DEFINE_PER_CPU_FIRST(union irq_stack_union,
13944 irq_stack_union) __aligned(PAGE_SIZE);
13945 @@ -1036,7 +988,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
13946 EXPORT_PER_CPU_SYMBOL(current_task);
13947
13948 DEFINE_PER_CPU(unsigned long, kernel_stack) =
13949 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
13950 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
13951 EXPORT_PER_CPU_SYMBOL(kernel_stack);
13952
13953 DEFINE_PER_CPU(char *, irq_stack_ptr) =
13954 @@ -1126,7 +1078,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
13955 {
13956 memset(regs, 0, sizeof(struct pt_regs));
13957 regs->fs = __KERNEL_PERCPU;
13958 - regs->gs = __KERNEL_STACK_CANARY;
13959 + savesegment(gs, regs->gs);
13960
13961 return regs;
13962 }
13963 @@ -1181,7 +1133,7 @@ void __cpuinit cpu_init(void)
13964 int i;
13965
13966 cpu = stack_smp_processor_id();
13967 - t = &per_cpu(init_tss, cpu);
13968 + t = init_tss + cpu;
13969 oist = &per_cpu(orig_ist, cpu);
13970
13971 #ifdef CONFIG_NUMA
13972 @@ -1207,7 +1159,7 @@ void __cpuinit cpu_init(void)
13973 switch_to_new_gdt(cpu);
13974 loadsegment(fs, 0);
13975
13976 - load_idt((const struct desc_ptr *)&idt_descr);
13977 + load_idt(&idt_descr);
13978
13979 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
13980 syscall_init();
13981 @@ -1216,7 +1168,6 @@ void __cpuinit cpu_init(void)
13982 wrmsrl(MSR_KERNEL_GS_BASE, 0);
13983 barrier();
13984
13985 - x86_configure_nx();
13986 if (cpu != 0)
13987 enable_x2apic();
13988
13989 @@ -1272,7 +1223,7 @@ void __cpuinit cpu_init(void)
13990 {
13991 int cpu = smp_processor_id();
13992 struct task_struct *curr = current;
13993 - struct tss_struct *t = &per_cpu(init_tss, cpu);
13994 + struct tss_struct *t = init_tss + cpu;
13995 struct thread_struct *thread = &curr->thread;
13996
13997 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
13998 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
13999 index 3e6ff6c..54b4992 100644
14000 --- a/arch/x86/kernel/cpu/intel.c
14001 +++ b/arch/x86/kernel/cpu/intel.c
14002 @@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
14003 * Update the IDT descriptor and reload the IDT so that
14004 * it uses the read-only mapped virtual address.
14005 */
14006 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
14007 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
14008 load_idt(&idt_descr);
14009 }
14010 #endif
14011 diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
14012 index fc4beb3..f20a5a7 100644
14013 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
14014 +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
14015 @@ -199,6 +199,8 @@ static void raise_mce(struct mce *m)
14016
14017 /* Error injection interface */
14018 static ssize_t mce_write(struct file *filp, const char __user *ubuf,
14019 + size_t usize, loff_t *off) __size_overflow(3);
14020 +static ssize_t mce_write(struct file *filp, const char __user *ubuf,
14021 size_t usize, loff_t *off)
14022 {
14023 struct mce m;
14024 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
14025 index 5a11ae2..a1a1c8a 100644
14026 --- a/arch/x86/kernel/cpu/mcheck/mce.c
14027 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
14028 @@ -42,6 +42,7 @@
14029 #include <asm/processor.h>
14030 #include <asm/mce.h>
14031 #include <asm/msr.h>
14032 +#include <asm/local.h>
14033
14034 #include "mce-internal.h"
14035
14036 @@ -250,7 +251,7 @@ static void print_mce(struct mce *m)
14037 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
14038 m->cs, m->ip);
14039
14040 - if (m->cs == __KERNEL_CS)
14041 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
14042 print_symbol("{%s}", m->ip);
14043 pr_cont("\n");
14044 }
14045 @@ -283,10 +284,10 @@ static void print_mce(struct mce *m)
14046
14047 #define PANIC_TIMEOUT 5 /* 5 seconds */
14048
14049 -static atomic_t mce_paniced;
14050 +static atomic_unchecked_t mce_paniced;
14051
14052 static int fake_panic;
14053 -static atomic_t mce_fake_paniced;
14054 +static atomic_unchecked_t mce_fake_paniced;
14055
14056 /* Panic in progress. Enable interrupts and wait for final IPI */
14057 static void wait_for_panic(void)
14058 @@ -310,7 +311,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14059 /*
14060 * Make sure only one CPU runs in machine check panic
14061 */
14062 - if (atomic_inc_return(&mce_paniced) > 1)
14063 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
14064 wait_for_panic();
14065 barrier();
14066
14067 @@ -318,7 +319,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14068 console_verbose();
14069 } else {
14070 /* Don't log too much for fake panic */
14071 - if (atomic_inc_return(&mce_fake_paniced) > 1)
14072 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
14073 return;
14074 }
14075 /* First print corrected ones that are still unlogged */
14076 @@ -658,7 +659,7 @@ static int mce_timed_out(u64 *t)
14077 * might have been modified by someone else.
14078 */
14079 rmb();
14080 - if (atomic_read(&mce_paniced))
14081 + if (atomic_read_unchecked(&mce_paniced))
14082 wait_for_panic();
14083 if (!monarch_timeout)
14084 goto out;
14085 @@ -1446,7 +1447,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
14086 }
14087
14088 /* Call the installed machine check handler for this CPU setup. */
14089 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
14090 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
14091 unexpected_machine_check;
14092
14093 /*
14094 @@ -1469,7 +1470,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
14095 return;
14096 }
14097
14098 + pax_open_kernel();
14099 machine_check_vector = do_machine_check;
14100 + pax_close_kernel();
14101
14102 __mcheck_cpu_init_generic();
14103 __mcheck_cpu_init_vendor(c);
14104 @@ -1483,7 +1486,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
14105 */
14106
14107 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
14108 -static int mce_chrdev_open_count; /* #times opened */
14109 +static local_t mce_chrdev_open_count; /* #times opened */
14110 static int mce_chrdev_open_exclu; /* already open exclusive? */
14111
14112 static int mce_chrdev_open(struct inode *inode, struct file *file)
14113 @@ -1491,7 +1494,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
14114 spin_lock(&mce_chrdev_state_lock);
14115
14116 if (mce_chrdev_open_exclu ||
14117 - (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
14118 + (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
14119 spin_unlock(&mce_chrdev_state_lock);
14120
14121 return -EBUSY;
14122 @@ -1499,7 +1502,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
14123
14124 if (file->f_flags & O_EXCL)
14125 mce_chrdev_open_exclu = 1;
14126 - mce_chrdev_open_count++;
14127 + local_inc(&mce_chrdev_open_count);
14128
14129 spin_unlock(&mce_chrdev_state_lock);
14130
14131 @@ -1510,7 +1513,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
14132 {
14133 spin_lock(&mce_chrdev_state_lock);
14134
14135 - mce_chrdev_open_count--;
14136 + local_dec(&mce_chrdev_open_count);
14137 mce_chrdev_open_exclu = 0;
14138
14139 spin_unlock(&mce_chrdev_state_lock);
14140 @@ -2229,7 +2232,7 @@ struct dentry *mce_get_debugfs_dir(void)
14141 static void mce_reset(void)
14142 {
14143 cpu_missing = 0;
14144 - atomic_set(&mce_fake_paniced, 0);
14145 + atomic_set_unchecked(&mce_fake_paniced, 0);
14146 atomic_set(&mce_executing, 0);
14147 atomic_set(&mce_callin, 0);
14148 atomic_set(&global_nwo, 0);
14149 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
14150 index 5c0e653..0882b0a 100644
14151 --- a/arch/x86/kernel/cpu/mcheck/p5.c
14152 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
14153 @@ -12,6 +12,7 @@
14154 #include <asm/system.h>
14155 #include <asm/mce.h>
14156 #include <asm/msr.h>
14157 +#include <asm/pgtable.h>
14158
14159 /* By default disabled */
14160 int mce_p5_enabled __read_mostly;
14161 @@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
14162 if (!cpu_has(c, X86_FEATURE_MCE))
14163 return;
14164
14165 + pax_open_kernel();
14166 machine_check_vector = pentium_machine_check;
14167 + pax_close_kernel();
14168 /* Make sure the vector pointer is visible before we enable MCEs: */
14169 wmb();
14170
14171 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
14172 index 54060f5..c1a7577 100644
14173 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
14174 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
14175 @@ -11,6 +11,7 @@
14176 #include <asm/system.h>
14177 #include <asm/mce.h>
14178 #include <asm/msr.h>
14179 +#include <asm/pgtable.h>
14180
14181 /* Machine check handler for WinChip C6: */
14182 static void winchip_machine_check(struct pt_regs *regs, long error_code)
14183 @@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
14184 {
14185 u32 lo, hi;
14186
14187 + pax_open_kernel();
14188 machine_check_vector = winchip_machine_check;
14189 + pax_close_kernel();
14190 /* Make sure the vector pointer is visible before we enable MCEs: */
14191 wmb();
14192
14193 diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
14194 index 7928963..1b16001 100644
14195 --- a/arch/x86/kernel/cpu/mtrr/if.c
14196 +++ b/arch/x86/kernel/cpu/mtrr/if.c
14197 @@ -91,6 +91,8 @@ mtrr_file_del(unsigned long base, unsigned long size,
14198 * "base=%Lx size=%Lx type=%s" or "disable=%d"
14199 */
14200 static ssize_t
14201 +mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) __size_overflow(3);
14202 +static ssize_t
14203 mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
14204 {
14205 int i, err;
14206 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
14207 index 6b96110..0da73eb 100644
14208 --- a/arch/x86/kernel/cpu/mtrr/main.c
14209 +++ b/arch/x86/kernel/cpu/mtrr/main.c
14210 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
14211 u64 size_or_mask, size_and_mask;
14212 static bool mtrr_aps_delayed_init;
14213
14214 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
14215 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
14216
14217 const struct mtrr_ops *mtrr_if;
14218
14219 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
14220 index df5e41f..816c719 100644
14221 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
14222 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
14223 @@ -25,7 +25,7 @@ struct mtrr_ops {
14224 int (*validate_add_page)(unsigned long base, unsigned long size,
14225 unsigned int type);
14226 int (*have_wrcomb)(void);
14227 -};
14228 +} __do_const;
14229
14230 extern int generic_get_free_region(unsigned long base, unsigned long size,
14231 int replace_reg);
14232 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
14233 index 5adce10..99284ec 100644
14234 --- a/arch/x86/kernel/cpu/perf_event.c
14235 +++ b/arch/x86/kernel/cpu/perf_event.c
14236 @@ -1665,7 +1665,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
14237 break;
14238
14239 perf_callchain_store(entry, frame.return_address);
14240 - fp = frame.next_frame;
14241 + fp = (const void __force_user *)frame.next_frame;
14242 }
14243 }
14244
14245 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
14246 index 13ad899..f642b9a 100644
14247 --- a/arch/x86/kernel/crash.c
14248 +++ b/arch/x86/kernel/crash.c
14249 @@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
14250 {
14251 #ifdef CONFIG_X86_32
14252 struct pt_regs fixed_regs;
14253 -#endif
14254
14255 -#ifdef CONFIG_X86_32
14256 - if (!user_mode_vm(regs)) {
14257 + if (!user_mode(regs)) {
14258 crash_fixup_ss_esp(&fixed_regs, regs);
14259 regs = &fixed_regs;
14260 }
14261 diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
14262 index 37250fe..bf2ec74 100644
14263 --- a/arch/x86/kernel/doublefault_32.c
14264 +++ b/arch/x86/kernel/doublefault_32.c
14265 @@ -11,7 +11,7 @@
14266
14267 #define DOUBLEFAULT_STACKSIZE (1024)
14268 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
14269 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
14270 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
14271
14272 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
14273
14274 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
14275 unsigned long gdt, tss;
14276
14277 store_gdt(&gdt_desc);
14278 - gdt = gdt_desc.address;
14279 + gdt = (unsigned long)gdt_desc.address;
14280
14281 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
14282
14283 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
14284 /* 0x2 bit is always set */
14285 .flags = X86_EFLAGS_SF | 0x2,
14286 .sp = STACK_START,
14287 - .es = __USER_DS,
14288 + .es = __KERNEL_DS,
14289 .cs = __KERNEL_CS,
14290 .ss = __KERNEL_DS,
14291 - .ds = __USER_DS,
14292 + .ds = __KERNEL_DS,
14293 .fs = __KERNEL_PERCPU,
14294
14295 .__cr3 = __pa_nodebug(swapper_pg_dir),
14296 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
14297 index 4025fe4..d8451c6 100644
14298 --- a/arch/x86/kernel/dumpstack.c
14299 +++ b/arch/x86/kernel/dumpstack.c
14300 @@ -2,6 +2,9 @@
14301 * Copyright (C) 1991, 1992 Linus Torvalds
14302 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
14303 */
14304 +#ifdef CONFIG_GRKERNSEC_HIDESYM
14305 +#define __INCLUDED_BY_HIDESYM 1
14306 +#endif
14307 #include <linux/kallsyms.h>
14308 #include <linux/kprobes.h>
14309 #include <linux/uaccess.h>
14310 @@ -35,9 +38,8 @@ void printk_address(unsigned long address, int reliable)
14311 static void
14312 print_ftrace_graph_addr(unsigned long addr, void *data,
14313 const struct stacktrace_ops *ops,
14314 - struct thread_info *tinfo, int *graph)
14315 + struct task_struct *task, int *graph)
14316 {
14317 - struct task_struct *task = tinfo->task;
14318 unsigned long ret_addr;
14319 int index = task->curr_ret_stack;
14320
14321 @@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14322 static inline void
14323 print_ftrace_graph_addr(unsigned long addr, void *data,
14324 const struct stacktrace_ops *ops,
14325 - struct thread_info *tinfo, int *graph)
14326 + struct task_struct *task, int *graph)
14327 { }
14328 #endif
14329
14330 @@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14331 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
14332 */
14333
14334 -static inline int valid_stack_ptr(struct thread_info *tinfo,
14335 - void *p, unsigned int size, void *end)
14336 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
14337 {
14338 - void *t = tinfo;
14339 if (end) {
14340 if (p < end && p >= (end-THREAD_SIZE))
14341 return 1;
14342 @@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
14343 }
14344
14345 unsigned long
14346 -print_context_stack(struct thread_info *tinfo,
14347 +print_context_stack(struct task_struct *task, void *stack_start,
14348 unsigned long *stack, unsigned long bp,
14349 const struct stacktrace_ops *ops, void *data,
14350 unsigned long *end, int *graph)
14351 {
14352 struct stack_frame *frame = (struct stack_frame *)bp;
14353
14354 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
14355 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
14356 unsigned long addr;
14357
14358 addr = *stack;
14359 @@ -102,7 +102,7 @@ print_context_stack(struct thread_info *tinfo,
14360 } else {
14361 ops->address(data, addr, 0);
14362 }
14363 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14364 + print_ftrace_graph_addr(addr, data, ops, task, graph);
14365 }
14366 stack++;
14367 }
14368 @@ -111,7 +111,7 @@ print_context_stack(struct thread_info *tinfo,
14369 EXPORT_SYMBOL_GPL(print_context_stack);
14370
14371 unsigned long
14372 -print_context_stack_bp(struct thread_info *tinfo,
14373 +print_context_stack_bp(struct task_struct *task, void *stack_start,
14374 unsigned long *stack, unsigned long bp,
14375 const struct stacktrace_ops *ops, void *data,
14376 unsigned long *end, int *graph)
14377 @@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_info *tinfo,
14378 struct stack_frame *frame = (struct stack_frame *)bp;
14379 unsigned long *ret_addr = &frame->return_address;
14380
14381 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
14382 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
14383 unsigned long addr = *ret_addr;
14384
14385 if (!__kernel_text_address(addr))
14386 @@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_info *tinfo,
14387 ops->address(data, addr, 1);
14388 frame = frame->next_frame;
14389 ret_addr = &frame->return_address;
14390 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14391 + print_ftrace_graph_addr(addr, data, ops, task, graph);
14392 }
14393
14394 return (unsigned long)frame;
14395 @@ -186,7 +186,7 @@ void dump_stack(void)
14396
14397 bp = stack_frame(current, NULL);
14398 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
14399 - current->pid, current->comm, print_tainted(),
14400 + task_pid_nr(current), current->comm, print_tainted(),
14401 init_utsname()->release,
14402 (int)strcspn(init_utsname()->version, " "),
14403 init_utsname()->version);
14404 @@ -222,6 +222,8 @@ unsigned __kprobes long oops_begin(void)
14405 }
14406 EXPORT_SYMBOL_GPL(oops_begin);
14407
14408 +extern void gr_handle_kernel_exploit(void);
14409 +
14410 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14411 {
14412 if (regs && kexec_should_crash(current))
14413 @@ -243,7 +245,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14414 panic("Fatal exception in interrupt");
14415 if (panic_on_oops)
14416 panic("Fatal exception");
14417 - do_exit(signr);
14418 +
14419 + gr_handle_kernel_exploit();
14420 +
14421 + do_group_exit(signr);
14422 }
14423
14424 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14425 @@ -270,7 +275,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14426
14427 show_registers(regs);
14428 #ifdef CONFIG_X86_32
14429 - if (user_mode_vm(regs)) {
14430 + if (user_mode(regs)) {
14431 sp = regs->sp;
14432 ss = regs->ss & 0xffff;
14433 } else {
14434 @@ -298,7 +303,7 @@ void die(const char *str, struct pt_regs *regs, long err)
14435 unsigned long flags = oops_begin();
14436 int sig = SIGSEGV;
14437
14438 - if (!user_mode_vm(regs))
14439 + if (!user_mode(regs))
14440 report_bug(regs->ip, regs);
14441
14442 if (__die(str, regs, err))
14443 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
14444 index c99f9ed..2a15d80 100644
14445 --- a/arch/x86/kernel/dumpstack_32.c
14446 +++ b/arch/x86/kernel/dumpstack_32.c
14447 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14448 bp = stack_frame(task, regs);
14449
14450 for (;;) {
14451 - struct thread_info *context;
14452 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14453
14454 - context = (struct thread_info *)
14455 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14456 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
14457 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14458
14459 - stack = (unsigned long *)context->previous_esp;
14460 - if (!stack)
14461 + if (stack_start == task_stack_page(task))
14462 break;
14463 + stack = *(unsigned long **)stack_start;
14464 if (ops->stack(data, "IRQ") < 0)
14465 break;
14466 touch_nmi_watchdog();
14467 @@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
14468 * When in-kernel, we also print out the stack and code at the
14469 * time of the fault..
14470 */
14471 - if (!user_mode_vm(regs)) {
14472 + if (!user_mode(regs)) {
14473 unsigned int code_prologue = code_bytes * 43 / 64;
14474 unsigned int code_len = code_bytes;
14475 unsigned char c;
14476 u8 *ip;
14477 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14478
14479 printk(KERN_EMERG "Stack:\n");
14480 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
14481
14482 printk(KERN_EMERG "Code: ");
14483
14484 - ip = (u8 *)regs->ip - code_prologue;
14485 + ip = (u8 *)regs->ip - code_prologue + cs_base;
14486 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14487 /* try starting at IP */
14488 - ip = (u8 *)regs->ip;
14489 + ip = (u8 *)regs->ip + cs_base;
14490 code_len = code_len - code_prologue + 1;
14491 }
14492 for (i = 0; i < code_len; i++, ip++) {
14493 @@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
14494 printk(KERN_CONT " Bad EIP value.");
14495 break;
14496 }
14497 - if (ip == (u8 *)regs->ip)
14498 + if (ip == (u8 *)regs->ip + cs_base)
14499 printk(KERN_CONT "<%02x> ", c);
14500 else
14501 printk(KERN_CONT "%02x ", c);
14502 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
14503 {
14504 unsigned short ud2;
14505
14506 + ip = ktla_ktva(ip);
14507 if (ip < PAGE_OFFSET)
14508 return 0;
14509 if (probe_kernel_address((unsigned short *)ip, ud2))
14510 @@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
14511
14512 return ud2 == 0x0b0f;
14513 }
14514 +
14515 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14516 +void pax_check_alloca(unsigned long size)
14517 +{
14518 + unsigned long sp = (unsigned long)&sp, stack_left;
14519 +
14520 + /* all kernel stacks are of the same size */
14521 + stack_left = sp & (THREAD_SIZE - 1);
14522 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14523 +}
14524 +EXPORT_SYMBOL(pax_check_alloca);
14525 +#endif
14526 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
14527 index 17107bd..b2deecf 100644
14528 --- a/arch/x86/kernel/dumpstack_64.c
14529 +++ b/arch/x86/kernel/dumpstack_64.c
14530 @@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14531 unsigned long *irq_stack_end =
14532 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14533 unsigned used = 0;
14534 - struct thread_info *tinfo;
14535 int graph = 0;
14536 unsigned long dummy;
14537 + void *stack_start;
14538
14539 if (!task)
14540 task = current;
14541 @@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14542 * current stack address. If the stacks consist of nested
14543 * exceptions
14544 */
14545 - tinfo = task_thread_info(task);
14546 for (;;) {
14547 char *id;
14548 unsigned long *estack_end;
14549 +
14550 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14551 &used, &id);
14552
14553 @@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14554 if (ops->stack(data, id) < 0)
14555 break;
14556
14557 - bp = ops->walk_stack(tinfo, stack, bp, ops,
14558 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14559 data, estack_end, &graph);
14560 ops->stack(data, "<EOE>");
14561 /*
14562 @@ -172,7 +172,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14563 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
14564 if (ops->stack(data, "IRQ") < 0)
14565 break;
14566 - bp = ops->walk_stack(tinfo, stack, bp,
14567 + bp = ops->walk_stack(task, irq_stack, stack, bp,
14568 ops, data, irq_stack_end, &graph);
14569 /*
14570 * We link to the next stack (which would be
14571 @@ -191,7 +191,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14572 /*
14573 * This handles the process stack:
14574 */
14575 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14576 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14577 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14578 put_cpu();
14579 }
14580 EXPORT_SYMBOL(dump_trace);
14581 @@ -305,3 +306,50 @@ int is_valid_bugaddr(unsigned long ip)
14582
14583 return ud2 == 0x0b0f;
14584 }
14585 +
14586 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14587 +void pax_check_alloca(unsigned long size)
14588 +{
14589 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14590 + unsigned cpu, used;
14591 + char *id;
14592 +
14593 + /* check the process stack first */
14594 + stack_start = (unsigned long)task_stack_page(current);
14595 + stack_end = stack_start + THREAD_SIZE;
14596 + if (likely(stack_start <= sp && sp < stack_end)) {
14597 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
14598 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14599 + return;
14600 + }
14601 +
14602 + cpu = get_cpu();
14603 +
14604 + /* check the irq stacks */
14605 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14606 + stack_start = stack_end - IRQ_STACK_SIZE;
14607 + if (stack_start <= sp && sp < stack_end) {
14608 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14609 + put_cpu();
14610 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14611 + return;
14612 + }
14613 +
14614 + /* check the exception stacks */
14615 + used = 0;
14616 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14617 + stack_start = stack_end - EXCEPTION_STKSZ;
14618 + if (stack_end && stack_start <= sp && sp < stack_end) {
14619 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14620 + put_cpu();
14621 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14622 + return;
14623 + }
14624 +
14625 + put_cpu();
14626 +
14627 + /* unknown stack */
14628 + BUG();
14629 +}
14630 +EXPORT_SYMBOL(pax_check_alloca);
14631 +#endif
14632 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
14633 index 9b9f18b..9fcaa04 100644
14634 --- a/arch/x86/kernel/early_printk.c
14635 +++ b/arch/x86/kernel/early_printk.c
14636 @@ -7,6 +7,7 @@
14637 #include <linux/pci_regs.h>
14638 #include <linux/pci_ids.h>
14639 #include <linux/errno.h>
14640 +#include <linux/sched.h>
14641 #include <asm/io.h>
14642 #include <asm/processor.h>
14643 #include <asm/fcntl.h>
14644 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
14645 index 7b784f4..db6b628 100644
14646 --- a/arch/x86/kernel/entry_32.S
14647 +++ b/arch/x86/kernel/entry_32.S
14648 @@ -179,13 +179,146 @@
14649 /*CFI_REL_OFFSET gs, PT_GS*/
14650 .endm
14651 .macro SET_KERNEL_GS reg
14652 +
14653 +#ifdef CONFIG_CC_STACKPROTECTOR
14654 movl $(__KERNEL_STACK_CANARY), \reg
14655 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14656 + movl $(__USER_DS), \reg
14657 +#else
14658 + xorl \reg, \reg
14659 +#endif
14660 +
14661 movl \reg, %gs
14662 .endm
14663
14664 #endif /* CONFIG_X86_32_LAZY_GS */
14665
14666 -.macro SAVE_ALL
14667 +.macro pax_enter_kernel
14668 +#ifdef CONFIG_PAX_KERNEXEC
14669 + call pax_enter_kernel
14670 +#endif
14671 +.endm
14672 +
14673 +.macro pax_exit_kernel
14674 +#ifdef CONFIG_PAX_KERNEXEC
14675 + call pax_exit_kernel
14676 +#endif
14677 +.endm
14678 +
14679 +#ifdef CONFIG_PAX_KERNEXEC
14680 +ENTRY(pax_enter_kernel)
14681 +#ifdef CONFIG_PARAVIRT
14682 + pushl %eax
14683 + pushl %ecx
14684 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
14685 + mov %eax, %esi
14686 +#else
14687 + mov %cr0, %esi
14688 +#endif
14689 + bts $16, %esi
14690 + jnc 1f
14691 + mov %cs, %esi
14692 + cmp $__KERNEL_CS, %esi
14693 + jz 3f
14694 + ljmp $__KERNEL_CS, $3f
14695 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
14696 +2:
14697 +#ifdef CONFIG_PARAVIRT
14698 + mov %esi, %eax
14699 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
14700 +#else
14701 + mov %esi, %cr0
14702 +#endif
14703 +3:
14704 +#ifdef CONFIG_PARAVIRT
14705 + popl %ecx
14706 + popl %eax
14707 +#endif
14708 + ret
14709 +ENDPROC(pax_enter_kernel)
14710 +
14711 +ENTRY(pax_exit_kernel)
14712 +#ifdef CONFIG_PARAVIRT
14713 + pushl %eax
14714 + pushl %ecx
14715 +#endif
14716 + mov %cs, %esi
14717 + cmp $__KERNEXEC_KERNEL_CS, %esi
14718 + jnz 2f
14719 +#ifdef CONFIG_PARAVIRT
14720 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
14721 + mov %eax, %esi
14722 +#else
14723 + mov %cr0, %esi
14724 +#endif
14725 + btr $16, %esi
14726 + ljmp $__KERNEL_CS, $1f
14727 +1:
14728 +#ifdef CONFIG_PARAVIRT
14729 + mov %esi, %eax
14730 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
14731 +#else
14732 + mov %esi, %cr0
14733 +#endif
14734 +2:
14735 +#ifdef CONFIG_PARAVIRT
14736 + popl %ecx
14737 + popl %eax
14738 +#endif
14739 + ret
14740 +ENDPROC(pax_exit_kernel)
14741 +#endif
14742 +
14743 +.macro pax_erase_kstack
14744 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14745 + call pax_erase_kstack
14746 +#endif
14747 +.endm
14748 +
14749 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14750 +/*
14751 + * ebp: thread_info
14752 + * ecx, edx: can be clobbered
14753 + */
14754 +ENTRY(pax_erase_kstack)
14755 + pushl %edi
14756 + pushl %eax
14757 +
14758 + mov TI_lowest_stack(%ebp), %edi
14759 + mov $-0xBEEF, %eax
14760 + std
14761 +
14762 +1: mov %edi, %ecx
14763 + and $THREAD_SIZE_asm - 1, %ecx
14764 + shr $2, %ecx
14765 + repne scasl
14766 + jecxz 2f
14767 +
14768 + cmp $2*16, %ecx
14769 + jc 2f
14770 +
14771 + mov $2*16, %ecx
14772 + repe scasl
14773 + jecxz 2f
14774 + jne 1b
14775 +
14776 +2: cld
14777 + mov %esp, %ecx
14778 + sub %edi, %ecx
14779 + shr $2, %ecx
14780 + rep stosl
14781 +
14782 + mov TI_task_thread_sp0(%ebp), %edi
14783 + sub $128, %edi
14784 + mov %edi, TI_lowest_stack(%ebp)
14785 +
14786 + popl %eax
14787 + popl %edi
14788 + ret
14789 +ENDPROC(pax_erase_kstack)
14790 +#endif
14791 +
14792 +.macro __SAVE_ALL _DS
14793 cld
14794 PUSH_GS
14795 pushl_cfi %fs
14796 @@ -208,7 +341,7 @@
14797 CFI_REL_OFFSET ecx, 0
14798 pushl_cfi %ebx
14799 CFI_REL_OFFSET ebx, 0
14800 - movl $(__USER_DS), %edx
14801 + movl $\_DS, %edx
14802 movl %edx, %ds
14803 movl %edx, %es
14804 movl $(__KERNEL_PERCPU), %edx
14805 @@ -216,6 +349,15 @@
14806 SET_KERNEL_GS %edx
14807 .endm
14808
14809 +.macro SAVE_ALL
14810 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
14811 + __SAVE_ALL __KERNEL_DS
14812 + pax_enter_kernel
14813 +#else
14814 + __SAVE_ALL __USER_DS
14815 +#endif
14816 +.endm
14817 +
14818 .macro RESTORE_INT_REGS
14819 popl_cfi %ebx
14820 CFI_RESTORE ebx
14821 @@ -301,7 +443,7 @@ ENTRY(ret_from_fork)
14822 popfl_cfi
14823 jmp syscall_exit
14824 CFI_ENDPROC
14825 -END(ret_from_fork)
14826 +ENDPROC(ret_from_fork)
14827
14828 /*
14829 * Interrupt exit functions should be protected against kprobes
14830 @@ -335,7 +477,15 @@ resume_userspace_sig:
14831 andl $SEGMENT_RPL_MASK, %eax
14832 #endif
14833 cmpl $USER_RPL, %eax
14834 +
14835 +#ifdef CONFIG_PAX_KERNEXEC
14836 + jae resume_userspace
14837 +
14838 + pax_exit_kernel
14839 + jmp resume_kernel
14840 +#else
14841 jb resume_kernel # not returning to v8086 or userspace
14842 +#endif
14843
14844 ENTRY(resume_userspace)
14845 LOCKDEP_SYS_EXIT
14846 @@ -347,8 +497,8 @@ ENTRY(resume_userspace)
14847 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
14848 # int/exception return?
14849 jne work_pending
14850 - jmp restore_all
14851 -END(ret_from_exception)
14852 + jmp restore_all_pax
14853 +ENDPROC(ret_from_exception)
14854
14855 #ifdef CONFIG_PREEMPT
14856 ENTRY(resume_kernel)
14857 @@ -363,7 +513,7 @@ need_resched:
14858 jz restore_all
14859 call preempt_schedule_irq
14860 jmp need_resched
14861 -END(resume_kernel)
14862 +ENDPROC(resume_kernel)
14863 #endif
14864 CFI_ENDPROC
14865 /*
14866 @@ -397,23 +547,34 @@ sysenter_past_esp:
14867 /*CFI_REL_OFFSET cs, 0*/
14868 /*
14869 * Push current_thread_info()->sysenter_return to the stack.
14870 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
14871 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
14872 */
14873 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
14874 + pushl_cfi $0
14875 CFI_REL_OFFSET eip, 0
14876
14877 pushl_cfi %eax
14878 SAVE_ALL
14879 + GET_THREAD_INFO(%ebp)
14880 + movl TI_sysenter_return(%ebp),%ebp
14881 + movl %ebp,PT_EIP(%esp)
14882 ENABLE_INTERRUPTS(CLBR_NONE)
14883
14884 /*
14885 * Load the potential sixth argument from user stack.
14886 * Careful about security.
14887 */
14888 + movl PT_OLDESP(%esp),%ebp
14889 +
14890 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14891 + mov PT_OLDSS(%esp),%ds
14892 +1: movl %ds:(%ebp),%ebp
14893 + push %ss
14894 + pop %ds
14895 +#else
14896 cmpl $__PAGE_OFFSET-3,%ebp
14897 jae syscall_fault
14898 1: movl (%ebp),%ebp
14899 +#endif
14900 +
14901 movl %ebp,PT_EBP(%esp)
14902 .section __ex_table,"a"
14903 .align 4
14904 @@ -436,12 +597,24 @@ sysenter_do_call:
14905 testl $_TIF_ALLWORK_MASK, %ecx
14906 jne sysexit_audit
14907 sysenter_exit:
14908 +
14909 +#ifdef CONFIG_PAX_RANDKSTACK
14910 + pushl_cfi %eax
14911 + movl %esp, %eax
14912 + call pax_randomize_kstack
14913 + popl_cfi %eax
14914 +#endif
14915 +
14916 + pax_erase_kstack
14917 +
14918 /* if something modifies registers it must also disable sysexit */
14919 movl PT_EIP(%esp), %edx
14920 movl PT_OLDESP(%esp), %ecx
14921 xorl %ebp,%ebp
14922 TRACE_IRQS_ON
14923 1: mov PT_FS(%esp), %fs
14924 +2: mov PT_DS(%esp), %ds
14925 +3: mov PT_ES(%esp), %es
14926 PTGS_TO_GS
14927 ENABLE_INTERRUPTS_SYSEXIT
14928
14929 @@ -458,6 +631,9 @@ sysenter_audit:
14930 movl %eax,%edx /* 2nd arg: syscall number */
14931 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
14932 call __audit_syscall_entry
14933 +
14934 + pax_erase_kstack
14935 +
14936 pushl_cfi %ebx
14937 movl PT_EAX(%esp),%eax /* reload syscall number */
14938 jmp sysenter_do_call
14939 @@ -483,11 +659,17 @@ sysexit_audit:
14940
14941 CFI_ENDPROC
14942 .pushsection .fixup,"ax"
14943 -2: movl $0,PT_FS(%esp)
14944 +4: movl $0,PT_FS(%esp)
14945 + jmp 1b
14946 +5: movl $0,PT_DS(%esp)
14947 + jmp 1b
14948 +6: movl $0,PT_ES(%esp)
14949 jmp 1b
14950 .section __ex_table,"a"
14951 .align 4
14952 - .long 1b,2b
14953 + .long 1b,4b
14954 + .long 2b,5b
14955 + .long 3b,6b
14956 .popsection
14957 PTGS_TO_GS_EX
14958 ENDPROC(ia32_sysenter_target)
14959 @@ -520,6 +702,15 @@ syscall_exit:
14960 testl $_TIF_ALLWORK_MASK, %ecx # current->work
14961 jne syscall_exit_work
14962
14963 +restore_all_pax:
14964 +
14965 +#ifdef CONFIG_PAX_RANDKSTACK
14966 + movl %esp, %eax
14967 + call pax_randomize_kstack
14968 +#endif
14969 +
14970 + pax_erase_kstack
14971 +
14972 restore_all:
14973 TRACE_IRQS_IRET
14974 restore_all_notrace:
14975 @@ -579,14 +770,34 @@ ldt_ss:
14976 * compensating for the offset by changing to the ESPFIX segment with
14977 * a base address that matches for the difference.
14978 */
14979 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
14980 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
14981 mov %esp, %edx /* load kernel esp */
14982 mov PT_OLDESP(%esp), %eax /* load userspace esp */
14983 mov %dx, %ax /* eax: new kernel esp */
14984 sub %eax, %edx /* offset (low word is 0) */
14985 +#ifdef CONFIG_SMP
14986 + movl PER_CPU_VAR(cpu_number), %ebx
14987 + shll $PAGE_SHIFT_asm, %ebx
14988 + addl $cpu_gdt_table, %ebx
14989 +#else
14990 + movl $cpu_gdt_table, %ebx
14991 +#endif
14992 shr $16, %edx
14993 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
14994 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
14995 +
14996 +#ifdef CONFIG_PAX_KERNEXEC
14997 + mov %cr0, %esi
14998 + btr $16, %esi
14999 + mov %esi, %cr0
15000 +#endif
15001 +
15002 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
15003 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
15004 +
15005 +#ifdef CONFIG_PAX_KERNEXEC
15006 + bts $16, %esi
15007 + mov %esi, %cr0
15008 +#endif
15009 +
15010 pushl_cfi $__ESPFIX_SS
15011 pushl_cfi %eax /* new kernel esp */
15012 /* Disable interrupts, but do not irqtrace this section: we
15013 @@ -615,38 +826,30 @@ work_resched:
15014 movl TI_flags(%ebp), %ecx
15015 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
15016 # than syscall tracing?
15017 - jz restore_all
15018 + jz restore_all_pax
15019 testb $_TIF_NEED_RESCHED, %cl
15020 jnz work_resched
15021
15022 work_notifysig: # deal with pending signals and
15023 # notify-resume requests
15024 + movl %esp, %eax
15025 #ifdef CONFIG_VM86
15026 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
15027 - movl %esp, %eax
15028 - jne work_notifysig_v86 # returning to kernel-space or
15029 + jz 1f # returning to kernel-space or
15030 # vm86-space
15031 - TRACE_IRQS_ON
15032 - ENABLE_INTERRUPTS(CLBR_NONE)
15033 - xorl %edx, %edx
15034 - call do_notify_resume
15035 - jmp resume_userspace_sig
15036
15037 - ALIGN
15038 -work_notifysig_v86:
15039 pushl_cfi %ecx # save ti_flags for do_notify_resume
15040 call save_v86_state # %eax contains pt_regs pointer
15041 popl_cfi %ecx
15042 movl %eax, %esp
15043 -#else
15044 - movl %esp, %eax
15045 +1:
15046 #endif
15047 TRACE_IRQS_ON
15048 ENABLE_INTERRUPTS(CLBR_NONE)
15049 xorl %edx, %edx
15050 call do_notify_resume
15051 jmp resume_userspace_sig
15052 -END(work_pending)
15053 +ENDPROC(work_pending)
15054
15055 # perform syscall exit tracing
15056 ALIGN
15057 @@ -654,11 +857,14 @@ syscall_trace_entry:
15058 movl $-ENOSYS,PT_EAX(%esp)
15059 movl %esp, %eax
15060 call syscall_trace_enter
15061 +
15062 + pax_erase_kstack
15063 +
15064 /* What it returned is what we'll actually use. */
15065 cmpl $(NR_syscalls), %eax
15066 jnae syscall_call
15067 jmp syscall_exit
15068 -END(syscall_trace_entry)
15069 +ENDPROC(syscall_trace_entry)
15070
15071 # perform syscall exit tracing
15072 ALIGN
15073 @@ -671,20 +877,24 @@ syscall_exit_work:
15074 movl %esp, %eax
15075 call syscall_trace_leave
15076 jmp resume_userspace
15077 -END(syscall_exit_work)
15078 +ENDPROC(syscall_exit_work)
15079 CFI_ENDPROC
15080
15081 RING0_INT_FRAME # can't unwind into user space anyway
15082 syscall_fault:
15083 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15084 + push %ss
15085 + pop %ds
15086 +#endif
15087 GET_THREAD_INFO(%ebp)
15088 movl $-EFAULT,PT_EAX(%esp)
15089 jmp resume_userspace
15090 -END(syscall_fault)
15091 +ENDPROC(syscall_fault)
15092
15093 syscall_badsys:
15094 movl $-ENOSYS,PT_EAX(%esp)
15095 jmp resume_userspace
15096 -END(syscall_badsys)
15097 +ENDPROC(syscall_badsys)
15098 CFI_ENDPROC
15099 /*
15100 * End of kprobes section
15101 @@ -756,6 +966,36 @@ ENTRY(ptregs_clone)
15102 CFI_ENDPROC
15103 ENDPROC(ptregs_clone)
15104
15105 + ALIGN;
15106 +ENTRY(kernel_execve)
15107 + CFI_STARTPROC
15108 + pushl_cfi %ebp
15109 + sub $PT_OLDSS+4,%esp
15110 + pushl_cfi %edi
15111 + pushl_cfi %ecx
15112 + pushl_cfi %eax
15113 + lea 3*4(%esp),%edi
15114 + mov $PT_OLDSS/4+1,%ecx
15115 + xorl %eax,%eax
15116 + rep stosl
15117 + popl_cfi %eax
15118 + popl_cfi %ecx
15119 + popl_cfi %edi
15120 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
15121 + pushl_cfi %esp
15122 + call sys_execve
15123 + add $4,%esp
15124 + CFI_ADJUST_CFA_OFFSET -4
15125 + GET_THREAD_INFO(%ebp)
15126 + test %eax,%eax
15127 + jz syscall_exit
15128 + add $PT_OLDSS+4,%esp
15129 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
15130 + popl_cfi %ebp
15131 + ret
15132 + CFI_ENDPROC
15133 +ENDPROC(kernel_execve)
15134 +
15135 .macro FIXUP_ESPFIX_STACK
15136 /*
15137 * Switch back for ESPFIX stack to the normal zerobased stack
15138 @@ -765,8 +1005,15 @@ ENDPROC(ptregs_clone)
15139 * normal stack and adjusts ESP with the matching offset.
15140 */
15141 /* fixup the stack */
15142 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
15143 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
15144 +#ifdef CONFIG_SMP
15145 + movl PER_CPU_VAR(cpu_number), %ebx
15146 + shll $PAGE_SHIFT_asm, %ebx
15147 + addl $cpu_gdt_table, %ebx
15148 +#else
15149 + movl $cpu_gdt_table, %ebx
15150 +#endif
15151 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
15152 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
15153 shl $16, %eax
15154 addl %esp, %eax /* the adjusted stack pointer */
15155 pushl_cfi $__KERNEL_DS
15156 @@ -819,7 +1066,7 @@ vector=vector+1
15157 .endr
15158 2: jmp common_interrupt
15159 .endr
15160 -END(irq_entries_start)
15161 +ENDPROC(irq_entries_start)
15162
15163 .previous
15164 END(interrupt)
15165 @@ -867,7 +1114,7 @@ ENTRY(coprocessor_error)
15166 pushl_cfi $do_coprocessor_error
15167 jmp error_code
15168 CFI_ENDPROC
15169 -END(coprocessor_error)
15170 +ENDPROC(coprocessor_error)
15171
15172 ENTRY(simd_coprocessor_error)
15173 RING0_INT_FRAME
15174 @@ -888,7 +1135,7 @@ ENTRY(simd_coprocessor_error)
15175 #endif
15176 jmp error_code
15177 CFI_ENDPROC
15178 -END(simd_coprocessor_error)
15179 +ENDPROC(simd_coprocessor_error)
15180
15181 ENTRY(device_not_available)
15182 RING0_INT_FRAME
15183 @@ -896,7 +1143,7 @@ ENTRY(device_not_available)
15184 pushl_cfi $do_device_not_available
15185 jmp error_code
15186 CFI_ENDPROC
15187 -END(device_not_available)
15188 +ENDPROC(device_not_available)
15189
15190 #ifdef CONFIG_PARAVIRT
15191 ENTRY(native_iret)
15192 @@ -905,12 +1152,12 @@ ENTRY(native_iret)
15193 .align 4
15194 .long native_iret, iret_exc
15195 .previous
15196 -END(native_iret)
15197 +ENDPROC(native_iret)
15198
15199 ENTRY(native_irq_enable_sysexit)
15200 sti
15201 sysexit
15202 -END(native_irq_enable_sysexit)
15203 +ENDPROC(native_irq_enable_sysexit)
15204 #endif
15205
15206 ENTRY(overflow)
15207 @@ -919,7 +1166,7 @@ ENTRY(overflow)
15208 pushl_cfi $do_overflow
15209 jmp error_code
15210 CFI_ENDPROC
15211 -END(overflow)
15212 +ENDPROC(overflow)
15213
15214 ENTRY(bounds)
15215 RING0_INT_FRAME
15216 @@ -927,7 +1174,7 @@ ENTRY(bounds)
15217 pushl_cfi $do_bounds
15218 jmp error_code
15219 CFI_ENDPROC
15220 -END(bounds)
15221 +ENDPROC(bounds)
15222
15223 ENTRY(invalid_op)
15224 RING0_INT_FRAME
15225 @@ -935,7 +1182,7 @@ ENTRY(invalid_op)
15226 pushl_cfi $do_invalid_op
15227 jmp error_code
15228 CFI_ENDPROC
15229 -END(invalid_op)
15230 +ENDPROC(invalid_op)
15231
15232 ENTRY(coprocessor_segment_overrun)
15233 RING0_INT_FRAME
15234 @@ -943,35 +1190,35 @@ ENTRY(coprocessor_segment_overrun)
15235 pushl_cfi $do_coprocessor_segment_overrun
15236 jmp error_code
15237 CFI_ENDPROC
15238 -END(coprocessor_segment_overrun)
15239 +ENDPROC(coprocessor_segment_overrun)
15240
15241 ENTRY(invalid_TSS)
15242 RING0_EC_FRAME
15243 pushl_cfi $do_invalid_TSS
15244 jmp error_code
15245 CFI_ENDPROC
15246 -END(invalid_TSS)
15247 +ENDPROC(invalid_TSS)
15248
15249 ENTRY(segment_not_present)
15250 RING0_EC_FRAME
15251 pushl_cfi $do_segment_not_present
15252 jmp error_code
15253 CFI_ENDPROC
15254 -END(segment_not_present)
15255 +ENDPROC(segment_not_present)
15256
15257 ENTRY(stack_segment)
15258 RING0_EC_FRAME
15259 pushl_cfi $do_stack_segment
15260 jmp error_code
15261 CFI_ENDPROC
15262 -END(stack_segment)
15263 +ENDPROC(stack_segment)
15264
15265 ENTRY(alignment_check)
15266 RING0_EC_FRAME
15267 pushl_cfi $do_alignment_check
15268 jmp error_code
15269 CFI_ENDPROC
15270 -END(alignment_check)
15271 +ENDPROC(alignment_check)
15272
15273 ENTRY(divide_error)
15274 RING0_INT_FRAME
15275 @@ -979,7 +1226,7 @@ ENTRY(divide_error)
15276 pushl_cfi $do_divide_error
15277 jmp error_code
15278 CFI_ENDPROC
15279 -END(divide_error)
15280 +ENDPROC(divide_error)
15281
15282 #ifdef CONFIG_X86_MCE
15283 ENTRY(machine_check)
15284 @@ -988,7 +1235,7 @@ ENTRY(machine_check)
15285 pushl_cfi machine_check_vector
15286 jmp error_code
15287 CFI_ENDPROC
15288 -END(machine_check)
15289 +ENDPROC(machine_check)
15290 #endif
15291
15292 ENTRY(spurious_interrupt_bug)
15293 @@ -997,7 +1244,7 @@ ENTRY(spurious_interrupt_bug)
15294 pushl_cfi $do_spurious_interrupt_bug
15295 jmp error_code
15296 CFI_ENDPROC
15297 -END(spurious_interrupt_bug)
15298 +ENDPROC(spurious_interrupt_bug)
15299 /*
15300 * End of kprobes section
15301 */
15302 @@ -1112,7 +1359,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
15303
15304 ENTRY(mcount)
15305 ret
15306 -END(mcount)
15307 +ENDPROC(mcount)
15308
15309 ENTRY(ftrace_caller)
15310 cmpl $0, function_trace_stop
15311 @@ -1141,7 +1388,7 @@ ftrace_graph_call:
15312 .globl ftrace_stub
15313 ftrace_stub:
15314 ret
15315 -END(ftrace_caller)
15316 +ENDPROC(ftrace_caller)
15317
15318 #else /* ! CONFIG_DYNAMIC_FTRACE */
15319
15320 @@ -1177,7 +1424,7 @@ trace:
15321 popl %ecx
15322 popl %eax
15323 jmp ftrace_stub
15324 -END(mcount)
15325 +ENDPROC(mcount)
15326 #endif /* CONFIG_DYNAMIC_FTRACE */
15327 #endif /* CONFIG_FUNCTION_TRACER */
15328
15329 @@ -1198,7 +1445,7 @@ ENTRY(ftrace_graph_caller)
15330 popl %ecx
15331 popl %eax
15332 ret
15333 -END(ftrace_graph_caller)
15334 +ENDPROC(ftrace_graph_caller)
15335
15336 .globl return_to_handler
15337 return_to_handler:
15338 @@ -1253,15 +1500,18 @@ error_code:
15339 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
15340 REG_TO_PTGS %ecx
15341 SET_KERNEL_GS %ecx
15342 - movl $(__USER_DS), %ecx
15343 + movl $(__KERNEL_DS), %ecx
15344 movl %ecx, %ds
15345 movl %ecx, %es
15346 +
15347 + pax_enter_kernel
15348 +
15349 TRACE_IRQS_OFF
15350 movl %esp,%eax # pt_regs pointer
15351 call *%edi
15352 jmp ret_from_exception
15353 CFI_ENDPROC
15354 -END(page_fault)
15355 +ENDPROC(page_fault)
15356
15357 /*
15358 * Debug traps and NMI can happen at the one SYSENTER instruction
15359 @@ -1303,7 +1553,7 @@ debug_stack_correct:
15360 call do_debug
15361 jmp ret_from_exception
15362 CFI_ENDPROC
15363 -END(debug)
15364 +ENDPROC(debug)
15365
15366 /*
15367 * NMI is doubly nasty. It can happen _while_ we're handling
15368 @@ -1340,6 +1590,9 @@ nmi_stack_correct:
15369 xorl %edx,%edx # zero error code
15370 movl %esp,%eax # pt_regs pointer
15371 call do_nmi
15372 +
15373 + pax_exit_kernel
15374 +
15375 jmp restore_all_notrace
15376 CFI_ENDPROC
15377
15378 @@ -1376,12 +1629,15 @@ nmi_espfix_stack:
15379 FIXUP_ESPFIX_STACK # %eax == %esp
15380 xorl %edx,%edx # zero error code
15381 call do_nmi
15382 +
15383 + pax_exit_kernel
15384 +
15385 RESTORE_REGS
15386 lss 12+4(%esp), %esp # back to espfix stack
15387 CFI_ADJUST_CFA_OFFSET -24
15388 jmp irq_return
15389 CFI_ENDPROC
15390 -END(nmi)
15391 +ENDPROC(nmi)
15392
15393 ENTRY(int3)
15394 RING0_INT_FRAME
15395 @@ -1393,14 +1649,14 @@ ENTRY(int3)
15396 call do_int3
15397 jmp ret_from_exception
15398 CFI_ENDPROC
15399 -END(int3)
15400 +ENDPROC(int3)
15401
15402 ENTRY(general_protection)
15403 RING0_EC_FRAME
15404 pushl_cfi $do_general_protection
15405 jmp error_code
15406 CFI_ENDPROC
15407 -END(general_protection)
15408 +ENDPROC(general_protection)
15409
15410 #ifdef CONFIG_KVM_GUEST
15411 ENTRY(async_page_fault)
15412 @@ -1408,7 +1664,7 @@ ENTRY(async_page_fault)
15413 pushl_cfi $do_async_page_fault
15414 jmp error_code
15415 CFI_ENDPROC
15416 -END(async_page_fault)
15417 +ENDPROC(async_page_fault)
15418 #endif
15419
15420 /*
15421 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
15422 index 1333d98..b340ca2 100644
15423 --- a/arch/x86/kernel/entry_64.S
15424 +++ b/arch/x86/kernel/entry_64.S
15425 @@ -56,6 +56,8 @@
15426 #include <asm/ftrace.h>
15427 #include <asm/percpu.h>
15428 #include <linux/err.h>
15429 +#include <asm/pgtable.h>
15430 +#include <asm/alternative-asm.h>
15431
15432 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15433 #include <linux/elf-em.h>
15434 @@ -69,8 +71,9 @@
15435 #ifdef CONFIG_FUNCTION_TRACER
15436 #ifdef CONFIG_DYNAMIC_FTRACE
15437 ENTRY(mcount)
15438 + pax_force_retaddr
15439 retq
15440 -END(mcount)
15441 +ENDPROC(mcount)
15442
15443 ENTRY(ftrace_caller)
15444 cmpl $0, function_trace_stop
15445 @@ -93,8 +96,9 @@ GLOBAL(ftrace_graph_call)
15446 #endif
15447
15448 GLOBAL(ftrace_stub)
15449 + pax_force_retaddr
15450 retq
15451 -END(ftrace_caller)
15452 +ENDPROC(ftrace_caller)
15453
15454 #else /* ! CONFIG_DYNAMIC_FTRACE */
15455 ENTRY(mcount)
15456 @@ -113,6 +117,7 @@ ENTRY(mcount)
15457 #endif
15458
15459 GLOBAL(ftrace_stub)
15460 + pax_force_retaddr
15461 retq
15462
15463 trace:
15464 @@ -122,12 +127,13 @@ trace:
15465 movq 8(%rbp), %rsi
15466 subq $MCOUNT_INSN_SIZE, %rdi
15467
15468 + pax_force_fptr ftrace_trace_function
15469 call *ftrace_trace_function
15470
15471 MCOUNT_RESTORE_FRAME
15472
15473 jmp ftrace_stub
15474 -END(mcount)
15475 +ENDPROC(mcount)
15476 #endif /* CONFIG_DYNAMIC_FTRACE */
15477 #endif /* CONFIG_FUNCTION_TRACER */
15478
15479 @@ -147,8 +153,9 @@ ENTRY(ftrace_graph_caller)
15480
15481 MCOUNT_RESTORE_FRAME
15482
15483 + pax_force_retaddr
15484 retq
15485 -END(ftrace_graph_caller)
15486 +ENDPROC(ftrace_graph_caller)
15487
15488 GLOBAL(return_to_handler)
15489 subq $24, %rsp
15490 @@ -164,6 +171,7 @@ GLOBAL(return_to_handler)
15491 movq 8(%rsp), %rdx
15492 movq (%rsp), %rax
15493 addq $24, %rsp
15494 + pax_force_fptr %rdi
15495 jmp *%rdi
15496 #endif
15497
15498 @@ -179,6 +187,282 @@ ENTRY(native_usergs_sysret64)
15499 ENDPROC(native_usergs_sysret64)
15500 #endif /* CONFIG_PARAVIRT */
15501
15502 + .macro ljmpq sel, off
15503 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
15504 + .byte 0x48; ljmp *1234f(%rip)
15505 + .pushsection .rodata
15506 + .align 16
15507 + 1234: .quad \off; .word \sel
15508 + .popsection
15509 +#else
15510 + pushq $\sel
15511 + pushq $\off
15512 + lretq
15513 +#endif
15514 + .endm
15515 +
15516 + .macro pax_enter_kernel
15517 + pax_set_fptr_mask
15518 +#ifdef CONFIG_PAX_KERNEXEC
15519 + call pax_enter_kernel
15520 +#endif
15521 + .endm
15522 +
15523 + .macro pax_exit_kernel
15524 +#ifdef CONFIG_PAX_KERNEXEC
15525 + call pax_exit_kernel
15526 +#endif
15527 + .endm
15528 +
15529 +#ifdef CONFIG_PAX_KERNEXEC
15530 +ENTRY(pax_enter_kernel)
15531 + pushq %rdi
15532 +
15533 +#ifdef CONFIG_PARAVIRT
15534 + PV_SAVE_REGS(CLBR_RDI)
15535 +#endif
15536 +
15537 + GET_CR0_INTO_RDI
15538 + bts $16,%rdi
15539 + jnc 3f
15540 + mov %cs,%edi
15541 + cmp $__KERNEL_CS,%edi
15542 + jnz 2f
15543 +1:
15544 +
15545 +#ifdef CONFIG_PARAVIRT
15546 + PV_RESTORE_REGS(CLBR_RDI)
15547 +#endif
15548 +
15549 + popq %rdi
15550 + pax_force_retaddr
15551 + retq
15552 +
15553 +2: ljmpq __KERNEL_CS,1f
15554 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
15555 +4: SET_RDI_INTO_CR0
15556 + jmp 1b
15557 +ENDPROC(pax_enter_kernel)
15558 +
15559 +ENTRY(pax_exit_kernel)
15560 + pushq %rdi
15561 +
15562 +#ifdef CONFIG_PARAVIRT
15563 + PV_SAVE_REGS(CLBR_RDI)
15564 +#endif
15565 +
15566 + mov %cs,%rdi
15567 + cmp $__KERNEXEC_KERNEL_CS,%edi
15568 + jz 2f
15569 +1:
15570 +
15571 +#ifdef CONFIG_PARAVIRT
15572 + PV_RESTORE_REGS(CLBR_RDI);
15573 +#endif
15574 +
15575 + popq %rdi
15576 + pax_force_retaddr
15577 + retq
15578 +
15579 +2: GET_CR0_INTO_RDI
15580 + btr $16,%rdi
15581 + ljmpq __KERNEL_CS,3f
15582 +3: SET_RDI_INTO_CR0
15583 + jmp 1b
15584 +#ifdef CONFIG_PARAVIRT
15585 + PV_RESTORE_REGS(CLBR_RDI);
15586 +#endif
15587 +
15588 + popq %rdi
15589 + pax_force_retaddr
15590 + retq
15591 +ENDPROC(pax_exit_kernel)
15592 +#endif
15593 +
15594 + .macro pax_enter_kernel_user
15595 + pax_set_fptr_mask
15596 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15597 + call pax_enter_kernel_user
15598 +#endif
15599 + .endm
15600 +
15601 + .macro pax_exit_kernel_user
15602 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15603 + call pax_exit_kernel_user
15604 +#endif
15605 +#ifdef CONFIG_PAX_RANDKSTACK
15606 + pushq %rax
15607 + call pax_randomize_kstack
15608 + popq %rax
15609 +#endif
15610 + .endm
15611 +
15612 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15613 +ENTRY(pax_enter_kernel_user)
15614 + pushq %rdi
15615 + pushq %rbx
15616 +
15617 +#ifdef CONFIG_PARAVIRT
15618 + PV_SAVE_REGS(CLBR_RDI)
15619 +#endif
15620 +
15621 + GET_CR3_INTO_RDI
15622 + mov %rdi,%rbx
15623 + add $__START_KERNEL_map,%rbx
15624 + sub phys_base(%rip),%rbx
15625 +
15626 +#ifdef CONFIG_PARAVIRT
15627 + pushq %rdi
15628 + cmpl $0, pv_info+PARAVIRT_enabled
15629 + jz 1f
15630 + i = 0
15631 + .rept USER_PGD_PTRS
15632 + mov i*8(%rbx),%rsi
15633 + mov $0,%sil
15634 + lea i*8(%rbx),%rdi
15635 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15636 + i = i + 1
15637 + .endr
15638 + jmp 2f
15639 +1:
15640 +#endif
15641 +
15642 + i = 0
15643 + .rept USER_PGD_PTRS
15644 + movb $0,i*8(%rbx)
15645 + i = i + 1
15646 + .endr
15647 +
15648 +#ifdef CONFIG_PARAVIRT
15649 +2: popq %rdi
15650 +#endif
15651 + SET_RDI_INTO_CR3
15652 +
15653 +#ifdef CONFIG_PAX_KERNEXEC
15654 + GET_CR0_INTO_RDI
15655 + bts $16,%rdi
15656 + SET_RDI_INTO_CR0
15657 +#endif
15658 +
15659 +#ifdef CONFIG_PARAVIRT
15660 + PV_RESTORE_REGS(CLBR_RDI)
15661 +#endif
15662 +
15663 + popq %rbx
15664 + popq %rdi
15665 + pax_force_retaddr
15666 + retq
15667 +ENDPROC(pax_enter_kernel_user)
15668 +
15669 +ENTRY(pax_exit_kernel_user)
15670 + push %rdi
15671 +
15672 +#ifdef CONFIG_PARAVIRT
15673 + pushq %rbx
15674 + PV_SAVE_REGS(CLBR_RDI)
15675 +#endif
15676 +
15677 +#ifdef CONFIG_PAX_KERNEXEC
15678 + GET_CR0_INTO_RDI
15679 + btr $16,%rdi
15680 + SET_RDI_INTO_CR0
15681 +#endif
15682 +
15683 + GET_CR3_INTO_RDI
15684 + add $__START_KERNEL_map,%rdi
15685 + sub phys_base(%rip),%rdi
15686 +
15687 +#ifdef CONFIG_PARAVIRT
15688 + cmpl $0, pv_info+PARAVIRT_enabled
15689 + jz 1f
15690 + mov %rdi,%rbx
15691 + i = 0
15692 + .rept USER_PGD_PTRS
15693 + mov i*8(%rbx),%rsi
15694 + mov $0x67,%sil
15695 + lea i*8(%rbx),%rdi
15696 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15697 + i = i + 1
15698 + .endr
15699 + jmp 2f
15700 +1:
15701 +#endif
15702 +
15703 + i = 0
15704 + .rept USER_PGD_PTRS
15705 + movb $0x67,i*8(%rdi)
15706 + i = i + 1
15707 + .endr
15708 +
15709 +#ifdef CONFIG_PARAVIRT
15710 +2: PV_RESTORE_REGS(CLBR_RDI)
15711 + popq %rbx
15712 +#endif
15713 +
15714 + popq %rdi
15715 + pax_force_retaddr
15716 + retq
15717 +ENDPROC(pax_exit_kernel_user)
15718 +#endif
15719 +
15720 +.macro pax_erase_kstack
15721 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15722 + call pax_erase_kstack
15723 +#endif
15724 +.endm
15725 +
15726 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15727 +/*
15728 + * r11: thread_info
15729 + * rcx, rdx: can be clobbered
15730 + */
15731 +ENTRY(pax_erase_kstack)
15732 + pushq %rdi
15733 + pushq %rax
15734 + pushq %r11
15735 +
15736 + GET_THREAD_INFO(%r11)
15737 + mov TI_lowest_stack(%r11), %rdi
15738 + mov $-0xBEEF, %rax
15739 + std
15740 +
15741 +1: mov %edi, %ecx
15742 + and $THREAD_SIZE_asm - 1, %ecx
15743 + shr $3, %ecx
15744 + repne scasq
15745 + jecxz 2f
15746 +
15747 + cmp $2*8, %ecx
15748 + jc 2f
15749 +
15750 + mov $2*8, %ecx
15751 + repe scasq
15752 + jecxz 2f
15753 + jne 1b
15754 +
15755 +2: cld
15756 + mov %esp, %ecx
15757 + sub %edi, %ecx
15758 +
15759 + cmp $THREAD_SIZE_asm, %rcx
15760 + jb 3f
15761 + ud2
15762 +3:
15763 +
15764 + shr $3, %ecx
15765 + rep stosq
15766 +
15767 + mov TI_task_thread_sp0(%r11), %rdi
15768 + sub $256, %rdi
15769 + mov %rdi, TI_lowest_stack(%r11)
15770 +
15771 + popq %r11
15772 + popq %rax
15773 + popq %rdi
15774 + pax_force_retaddr
15775 + ret
15776 +ENDPROC(pax_erase_kstack)
15777 +#endif
15778
15779 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
15780 #ifdef CONFIG_TRACE_IRQFLAGS
15781 @@ -232,8 +516,8 @@ ENDPROC(native_usergs_sysret64)
15782 .endm
15783
15784 .macro UNFAKE_STACK_FRAME
15785 - addq $8*6, %rsp
15786 - CFI_ADJUST_CFA_OFFSET -(6*8)
15787 + addq $8*6 + ARG_SKIP, %rsp
15788 + CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
15789 .endm
15790
15791 /*
15792 @@ -320,7 +604,7 @@ ENDPROC(native_usergs_sysret64)
15793 movq %rsp, %rsi
15794
15795 leaq -RBP(%rsp),%rdi /* arg1 for handler */
15796 - testl $3, CS(%rdi)
15797 + testb $3, CS(%rdi)
15798 je 1f
15799 SWAPGS
15800 /*
15801 @@ -356,9 +640,10 @@ ENTRY(save_rest)
15802 movq_cfi r15, R15+16
15803 movq %r11, 8(%rsp) /* return address */
15804 FIXUP_TOP_OF_STACK %r11, 16
15805 + pax_force_retaddr
15806 ret
15807 CFI_ENDPROC
15808 -END(save_rest)
15809 +ENDPROC(save_rest)
15810
15811 /* save complete stack frame */
15812 .pushsection .kprobes.text, "ax"
15813 @@ -387,9 +672,10 @@ ENTRY(save_paranoid)
15814 js 1f /* negative -> in kernel */
15815 SWAPGS
15816 xorl %ebx,%ebx
15817 -1: ret
15818 +1: pax_force_retaddr_bts
15819 + ret
15820 CFI_ENDPROC
15821 -END(save_paranoid)
15822 +ENDPROC(save_paranoid)
15823 .popsection
15824
15825 /*
15826 @@ -411,7 +697,7 @@ ENTRY(ret_from_fork)
15827
15828 RESTORE_REST
15829
15830 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
15831 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
15832 jz retint_restore_args
15833
15834 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
15835 @@ -421,7 +707,7 @@ ENTRY(ret_from_fork)
15836 jmp ret_from_sys_call # go to the SYSRET fastpath
15837
15838 CFI_ENDPROC
15839 -END(ret_from_fork)
15840 +ENDPROC(ret_from_fork)
15841
15842 /*
15843 * System call entry. Up to 6 arguments in registers are supported.
15844 @@ -457,7 +743,7 @@ END(ret_from_fork)
15845 ENTRY(system_call)
15846 CFI_STARTPROC simple
15847 CFI_SIGNAL_FRAME
15848 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
15849 + CFI_DEF_CFA rsp,0
15850 CFI_REGISTER rip,rcx
15851 /*CFI_REGISTER rflags,r11*/
15852 SWAPGS_UNSAFE_STACK
15853 @@ -470,21 +756,23 @@ GLOBAL(system_call_after_swapgs)
15854
15855 movq %rsp,PER_CPU_VAR(old_rsp)
15856 movq PER_CPU_VAR(kernel_stack),%rsp
15857 + SAVE_ARGS 8*6,0
15858 + pax_enter_kernel_user
15859 /*
15860 * No need to follow this irqs off/on section - it's straight
15861 * and short:
15862 */
15863 ENABLE_INTERRUPTS(CLBR_NONE)
15864 - SAVE_ARGS 8,0
15865 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
15866 movq %rcx,RIP-ARGOFFSET(%rsp)
15867 CFI_REL_OFFSET rip,RIP-ARGOFFSET
15868 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15869 + GET_THREAD_INFO(%rcx)
15870 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
15871 jnz tracesys
15872 system_call_fastpath:
15873 cmpq $__NR_syscall_max,%rax
15874 ja badsys
15875 - movq %r10,%rcx
15876 + movq R10-ARGOFFSET(%rsp),%rcx
15877 call *sys_call_table(,%rax,8) # XXX: rip relative
15878 movq %rax,RAX-ARGOFFSET(%rsp)
15879 /*
15880 @@ -498,10 +786,13 @@ sysret_check:
15881 LOCKDEP_SYS_EXIT
15882 DISABLE_INTERRUPTS(CLBR_NONE)
15883 TRACE_IRQS_OFF
15884 - movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
15885 + GET_THREAD_INFO(%rcx)
15886 + movl TI_flags(%rcx),%edx
15887 andl %edi,%edx
15888 jnz sysret_careful
15889 CFI_REMEMBER_STATE
15890 + pax_exit_kernel_user
15891 + pax_erase_kstack
15892 /*
15893 * sysretq will re-enable interrupts:
15894 */
15895 @@ -553,14 +844,18 @@ badsys:
15896 * jump back to the normal fast path.
15897 */
15898 auditsys:
15899 - movq %r10,%r9 /* 6th arg: 4th syscall arg */
15900 + movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
15901 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
15902 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
15903 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
15904 movq %rax,%rsi /* 2nd arg: syscall number */
15905 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
15906 call __audit_syscall_entry
15907 +
15908 + pax_erase_kstack
15909 +
15910 LOAD_ARGS 0 /* reload call-clobbered registers */
15911 + pax_set_fptr_mask
15912 jmp system_call_fastpath
15913
15914 /*
15915 @@ -581,7 +876,7 @@ sysret_audit:
15916 /* Do syscall tracing */
15917 tracesys:
15918 #ifdef CONFIG_AUDITSYSCALL
15919 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
15920 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
15921 jz auditsys
15922 #endif
15923 SAVE_REST
15924 @@ -589,16 +884,20 @@ tracesys:
15925 FIXUP_TOP_OF_STACK %rdi
15926 movq %rsp,%rdi
15927 call syscall_trace_enter
15928 +
15929 + pax_erase_kstack
15930 +
15931 /*
15932 * Reload arg registers from stack in case ptrace changed them.
15933 * We don't reload %rax because syscall_trace_enter() returned
15934 * the value it wants us to use in the table lookup.
15935 */
15936 LOAD_ARGS ARGOFFSET, 1
15937 + pax_set_fptr_mask
15938 RESTORE_REST
15939 cmpq $__NR_syscall_max,%rax
15940 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
15941 - movq %r10,%rcx /* fixup for C */
15942 + movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
15943 call *sys_call_table(,%rax,8)
15944 movq %rax,RAX-ARGOFFSET(%rsp)
15945 /* Use IRET because user could have changed frame */
15946 @@ -619,6 +918,7 @@ GLOBAL(int_with_check)
15947 andl %edi,%edx
15948 jnz int_careful
15949 andl $~TS_COMPAT,TI_status(%rcx)
15950 + pax_erase_kstack
15951 jmp retint_swapgs
15952
15953 /* Either reschedule or signal or syscall exit tracking needed. */
15954 @@ -665,7 +965,7 @@ int_restore_rest:
15955 TRACE_IRQS_OFF
15956 jmp int_with_check
15957 CFI_ENDPROC
15958 -END(system_call)
15959 +ENDPROC(system_call)
15960
15961 /*
15962 * Certain special system calls that need to save a complete full stack frame.
15963 @@ -681,7 +981,7 @@ ENTRY(\label)
15964 call \func
15965 jmp ptregscall_common
15966 CFI_ENDPROC
15967 -END(\label)
15968 +ENDPROC(\label)
15969 .endm
15970
15971 PTREGSCALL stub_clone, sys_clone, %r8
15972 @@ -699,9 +999,10 @@ ENTRY(ptregscall_common)
15973 movq_cfi_restore R12+8, r12
15974 movq_cfi_restore RBP+8, rbp
15975 movq_cfi_restore RBX+8, rbx
15976 + pax_force_retaddr
15977 ret $REST_SKIP /* pop extended registers */
15978 CFI_ENDPROC
15979 -END(ptregscall_common)
15980 +ENDPROC(ptregscall_common)
15981
15982 ENTRY(stub_execve)
15983 CFI_STARTPROC
15984 @@ -716,7 +1017,7 @@ ENTRY(stub_execve)
15985 RESTORE_REST
15986 jmp int_ret_from_sys_call
15987 CFI_ENDPROC
15988 -END(stub_execve)
15989 +ENDPROC(stub_execve)
15990
15991 /*
15992 * sigreturn is special because it needs to restore all registers on return.
15993 @@ -734,7 +1035,7 @@ ENTRY(stub_rt_sigreturn)
15994 RESTORE_REST
15995 jmp int_ret_from_sys_call
15996 CFI_ENDPROC
15997 -END(stub_rt_sigreturn)
15998 +ENDPROC(stub_rt_sigreturn)
15999
16000 /*
16001 * Build the entry stubs and pointer table with some assembler magic.
16002 @@ -769,7 +1070,7 @@ vector=vector+1
16003 2: jmp common_interrupt
16004 .endr
16005 CFI_ENDPROC
16006 -END(irq_entries_start)
16007 +ENDPROC(irq_entries_start)
16008
16009 .previous
16010 END(interrupt)
16011 @@ -789,6 +1090,16 @@ END(interrupt)
16012 subq $ORIG_RAX-RBP, %rsp
16013 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
16014 SAVE_ARGS_IRQ
16015 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16016 + testb $3, CS(%rdi)
16017 + jnz 1f
16018 + pax_enter_kernel
16019 + jmp 2f
16020 +1: pax_enter_kernel_user
16021 +2:
16022 +#else
16023 + pax_enter_kernel
16024 +#endif
16025 call \func
16026 .endm
16027
16028 @@ -820,7 +1131,7 @@ ret_from_intr:
16029
16030 exit_intr:
16031 GET_THREAD_INFO(%rcx)
16032 - testl $3,CS-ARGOFFSET(%rsp)
16033 + testb $3,CS-ARGOFFSET(%rsp)
16034 je retint_kernel
16035
16036 /* Interrupt came from user space */
16037 @@ -842,12 +1153,15 @@ retint_swapgs: /* return to user-space */
16038 * The iretq could re-enable interrupts:
16039 */
16040 DISABLE_INTERRUPTS(CLBR_ANY)
16041 + pax_exit_kernel_user
16042 TRACE_IRQS_IRETQ
16043 SWAPGS
16044 jmp restore_args
16045
16046 retint_restore_args: /* return to kernel space */
16047 DISABLE_INTERRUPTS(CLBR_ANY)
16048 + pax_exit_kernel
16049 + pax_force_retaddr RIP-ARGOFFSET
16050 /*
16051 * The iretq could re-enable interrupts:
16052 */
16053 @@ -936,7 +1250,7 @@ ENTRY(retint_kernel)
16054 #endif
16055
16056 CFI_ENDPROC
16057 -END(common_interrupt)
16058 +ENDPROC(common_interrupt)
16059 /*
16060 * End of kprobes section
16061 */
16062 @@ -953,7 +1267,7 @@ ENTRY(\sym)
16063 interrupt \do_sym
16064 jmp ret_from_intr
16065 CFI_ENDPROC
16066 -END(\sym)
16067 +ENDPROC(\sym)
16068 .endm
16069
16070 #ifdef CONFIG_SMP
16071 @@ -1026,12 +1340,22 @@ ENTRY(\sym)
16072 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16073 call error_entry
16074 DEFAULT_FRAME 0
16075 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16076 + testb $3, CS(%rsp)
16077 + jnz 1f
16078 + pax_enter_kernel
16079 + jmp 2f
16080 +1: pax_enter_kernel_user
16081 +2:
16082 +#else
16083 + pax_enter_kernel
16084 +#endif
16085 movq %rsp,%rdi /* pt_regs pointer */
16086 xorl %esi,%esi /* no error code */
16087 call \do_sym
16088 jmp error_exit /* %ebx: no swapgs flag */
16089 CFI_ENDPROC
16090 -END(\sym)
16091 +ENDPROC(\sym)
16092 .endm
16093
16094 .macro paranoidzeroentry sym do_sym
16095 @@ -1043,15 +1367,25 @@ ENTRY(\sym)
16096 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16097 call save_paranoid
16098 TRACE_IRQS_OFF
16099 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16100 + testb $3, CS(%rsp)
16101 + jnz 1f
16102 + pax_enter_kernel
16103 + jmp 2f
16104 +1: pax_enter_kernel_user
16105 +2:
16106 +#else
16107 + pax_enter_kernel
16108 +#endif
16109 movq %rsp,%rdi /* pt_regs pointer */
16110 xorl %esi,%esi /* no error code */
16111 call \do_sym
16112 jmp paranoid_exit /* %ebx: no swapgs flag */
16113 CFI_ENDPROC
16114 -END(\sym)
16115 +ENDPROC(\sym)
16116 .endm
16117
16118 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
16119 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
16120 .macro paranoidzeroentry_ist sym do_sym ist
16121 ENTRY(\sym)
16122 INTR_FRAME
16123 @@ -1061,14 +1395,30 @@ ENTRY(\sym)
16124 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16125 call save_paranoid
16126 TRACE_IRQS_OFF
16127 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16128 + testb $3, CS(%rsp)
16129 + jnz 1f
16130 + pax_enter_kernel
16131 + jmp 2f
16132 +1: pax_enter_kernel_user
16133 +2:
16134 +#else
16135 + pax_enter_kernel
16136 +#endif
16137 movq %rsp,%rdi /* pt_regs pointer */
16138 xorl %esi,%esi /* no error code */
16139 +#ifdef CONFIG_SMP
16140 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
16141 + lea init_tss(%r12), %r12
16142 +#else
16143 + lea init_tss(%rip), %r12
16144 +#endif
16145 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
16146 call \do_sym
16147 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
16148 jmp paranoid_exit /* %ebx: no swapgs flag */
16149 CFI_ENDPROC
16150 -END(\sym)
16151 +ENDPROC(\sym)
16152 .endm
16153
16154 .macro errorentry sym do_sym
16155 @@ -1079,13 +1429,23 @@ ENTRY(\sym)
16156 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16157 call error_entry
16158 DEFAULT_FRAME 0
16159 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16160 + testb $3, CS(%rsp)
16161 + jnz 1f
16162 + pax_enter_kernel
16163 + jmp 2f
16164 +1: pax_enter_kernel_user
16165 +2:
16166 +#else
16167 + pax_enter_kernel
16168 +#endif
16169 movq %rsp,%rdi /* pt_regs pointer */
16170 movq ORIG_RAX(%rsp),%rsi /* get error code */
16171 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16172 call \do_sym
16173 jmp error_exit /* %ebx: no swapgs flag */
16174 CFI_ENDPROC
16175 -END(\sym)
16176 +ENDPROC(\sym)
16177 .endm
16178
16179 /* error code is on the stack already */
16180 @@ -1098,13 +1458,23 @@ ENTRY(\sym)
16181 call save_paranoid
16182 DEFAULT_FRAME 0
16183 TRACE_IRQS_OFF
16184 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16185 + testb $3, CS(%rsp)
16186 + jnz 1f
16187 + pax_enter_kernel
16188 + jmp 2f
16189 +1: pax_enter_kernel_user
16190 +2:
16191 +#else
16192 + pax_enter_kernel
16193 +#endif
16194 movq %rsp,%rdi /* pt_regs pointer */
16195 movq ORIG_RAX(%rsp),%rsi /* get error code */
16196 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16197 call \do_sym
16198 jmp paranoid_exit /* %ebx: no swapgs flag */
16199 CFI_ENDPROC
16200 -END(\sym)
16201 +ENDPROC(\sym)
16202 .endm
16203
16204 zeroentry divide_error do_divide_error
16205 @@ -1134,9 +1504,10 @@ gs_change:
16206 2: mfence /* workaround */
16207 SWAPGS
16208 popfq_cfi
16209 + pax_force_retaddr
16210 ret
16211 CFI_ENDPROC
16212 -END(native_load_gs_index)
16213 +ENDPROC(native_load_gs_index)
16214
16215 .section __ex_table,"a"
16216 .align 8
16217 @@ -1158,13 +1529,14 @@ ENTRY(kernel_thread_helper)
16218 * Here we are in the child and the registers are set as they were
16219 * at kernel_thread() invocation in the parent.
16220 */
16221 + pax_force_fptr %rsi
16222 call *%rsi
16223 # exit
16224 mov %eax, %edi
16225 call do_exit
16226 ud2 # padding for call trace
16227 CFI_ENDPROC
16228 -END(kernel_thread_helper)
16229 +ENDPROC(kernel_thread_helper)
16230
16231 /*
16232 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
16233 @@ -1191,11 +1563,11 @@ ENTRY(kernel_execve)
16234 RESTORE_REST
16235 testq %rax,%rax
16236 je int_ret_from_sys_call
16237 - RESTORE_ARGS
16238 UNFAKE_STACK_FRAME
16239 + pax_force_retaddr
16240 ret
16241 CFI_ENDPROC
16242 -END(kernel_execve)
16243 +ENDPROC(kernel_execve)
16244
16245 /* Call softirq on interrupt stack. Interrupts are off. */
16246 ENTRY(call_softirq)
16247 @@ -1213,9 +1585,10 @@ ENTRY(call_softirq)
16248 CFI_DEF_CFA_REGISTER rsp
16249 CFI_ADJUST_CFA_OFFSET -8
16250 decl PER_CPU_VAR(irq_count)
16251 + pax_force_retaddr
16252 ret
16253 CFI_ENDPROC
16254 -END(call_softirq)
16255 +ENDPROC(call_softirq)
16256
16257 #ifdef CONFIG_XEN
16258 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
16259 @@ -1253,7 +1626,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
16260 decl PER_CPU_VAR(irq_count)
16261 jmp error_exit
16262 CFI_ENDPROC
16263 -END(xen_do_hypervisor_callback)
16264 +ENDPROC(xen_do_hypervisor_callback)
16265
16266 /*
16267 * Hypervisor uses this for application faults while it executes.
16268 @@ -1312,7 +1685,7 @@ ENTRY(xen_failsafe_callback)
16269 SAVE_ALL
16270 jmp error_exit
16271 CFI_ENDPROC
16272 -END(xen_failsafe_callback)
16273 +ENDPROC(xen_failsafe_callback)
16274
16275 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
16276 xen_hvm_callback_vector xen_evtchn_do_upcall
16277 @@ -1361,16 +1734,31 @@ ENTRY(paranoid_exit)
16278 TRACE_IRQS_OFF
16279 testl %ebx,%ebx /* swapgs needed? */
16280 jnz paranoid_restore
16281 - testl $3,CS(%rsp)
16282 + testb $3,CS(%rsp)
16283 jnz paranoid_userspace
16284 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16285 + pax_exit_kernel
16286 + TRACE_IRQS_IRETQ 0
16287 + SWAPGS_UNSAFE_STACK
16288 + RESTORE_ALL 8
16289 + pax_force_retaddr_bts
16290 + jmp irq_return
16291 +#endif
16292 paranoid_swapgs:
16293 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16294 + pax_exit_kernel_user
16295 +#else
16296 + pax_exit_kernel
16297 +#endif
16298 TRACE_IRQS_IRETQ 0
16299 SWAPGS_UNSAFE_STACK
16300 RESTORE_ALL 8
16301 jmp irq_return
16302 paranoid_restore:
16303 + pax_exit_kernel
16304 TRACE_IRQS_IRETQ 0
16305 RESTORE_ALL 8
16306 + pax_force_retaddr_bts
16307 jmp irq_return
16308 paranoid_userspace:
16309 GET_THREAD_INFO(%rcx)
16310 @@ -1399,7 +1787,7 @@ paranoid_schedule:
16311 TRACE_IRQS_OFF
16312 jmp paranoid_userspace
16313 CFI_ENDPROC
16314 -END(paranoid_exit)
16315 +ENDPROC(paranoid_exit)
16316
16317 /*
16318 * Exception entry point. This expects an error code/orig_rax on the stack.
16319 @@ -1426,12 +1814,13 @@ ENTRY(error_entry)
16320 movq_cfi r14, R14+8
16321 movq_cfi r15, R15+8
16322 xorl %ebx,%ebx
16323 - testl $3,CS+8(%rsp)
16324 + testb $3,CS+8(%rsp)
16325 je error_kernelspace
16326 error_swapgs:
16327 SWAPGS
16328 error_sti:
16329 TRACE_IRQS_OFF
16330 + pax_force_retaddr_bts
16331 ret
16332
16333 /*
16334 @@ -1458,7 +1847,7 @@ bstep_iret:
16335 movq %rcx,RIP+8(%rsp)
16336 jmp error_swapgs
16337 CFI_ENDPROC
16338 -END(error_entry)
16339 +ENDPROC(error_entry)
16340
16341
16342 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
16343 @@ -1478,7 +1867,7 @@ ENTRY(error_exit)
16344 jnz retint_careful
16345 jmp retint_swapgs
16346 CFI_ENDPROC
16347 -END(error_exit)
16348 +ENDPROC(error_exit)
16349
16350 /*
16351 * Test if a given stack is an NMI stack or not.
16352 @@ -1535,9 +1924,11 @@ ENTRY(nmi)
16353 * If %cs was not the kernel segment, then the NMI triggered in user
16354 * space, which means it is definitely not nested.
16355 */
16356 + cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
16357 + je 1f
16358 cmpl $__KERNEL_CS, 16(%rsp)
16359 jne first_nmi
16360 -
16361 +1:
16362 /*
16363 * Check the special variable on the stack to see if NMIs are
16364 * executing.
16365 @@ -1659,6 +2050,16 @@ restart_nmi:
16366 */
16367 call save_paranoid
16368 DEFAULT_FRAME 0
16369 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16370 + testb $3, CS(%rsp)
16371 + jnz 1f
16372 + pax_enter_kernel
16373 + jmp 2f
16374 +1: pax_enter_kernel_user
16375 +2:
16376 +#else
16377 + pax_enter_kernel
16378 +#endif
16379 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
16380 movq %rsp,%rdi
16381 movq $-1,%rsi
16382 @@ -1666,14 +2067,25 @@ restart_nmi:
16383 testl %ebx,%ebx /* swapgs needed? */
16384 jnz nmi_restore
16385 nmi_swapgs:
16386 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16387 + pax_exit_kernel_user
16388 +#else
16389 + pax_exit_kernel
16390 +#endif
16391 SWAPGS_UNSAFE_STACK
16392 + RESTORE_ALL 8
16393 + /* Clear the NMI executing stack variable */
16394 + movq $0, 10*8(%rsp)
16395 + jmp irq_return
16396 nmi_restore:
16397 + pax_exit_kernel
16398 RESTORE_ALL 8
16399 + pax_force_retaddr_bts
16400 /* Clear the NMI executing stack variable */
16401 movq $0, 10*8(%rsp)
16402 jmp irq_return
16403 CFI_ENDPROC
16404 -END(nmi)
16405 +ENDPROC(nmi)
16406
16407 /*
16408 * If an NMI hit an iret because of an exception or breakpoint,
16409 @@ -1700,7 +2112,7 @@ ENTRY(ignore_sysret)
16410 mov $-ENOSYS,%eax
16411 sysret
16412 CFI_ENDPROC
16413 -END(ignore_sysret)
16414 +ENDPROC(ignore_sysret)
16415
16416 /*
16417 * End of kprobes section
16418 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
16419 index c9a281f..ce2f317 100644
16420 --- a/arch/x86/kernel/ftrace.c
16421 +++ b/arch/x86/kernel/ftrace.c
16422 @@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
16423 static const void *mod_code_newcode; /* holds the text to write to the IP */
16424
16425 static unsigned nmi_wait_count;
16426 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
16427 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
16428
16429 int ftrace_arch_read_dyn_info(char *buf, int size)
16430 {
16431 @@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
16432
16433 r = snprintf(buf, size, "%u %u",
16434 nmi_wait_count,
16435 - atomic_read(&nmi_update_count));
16436 + atomic_read_unchecked(&nmi_update_count));
16437 return r;
16438 }
16439
16440 @@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
16441
16442 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
16443 smp_rmb();
16444 + pax_open_kernel();
16445 ftrace_mod_code();
16446 - atomic_inc(&nmi_update_count);
16447 + pax_close_kernel();
16448 + atomic_inc_unchecked(&nmi_update_count);
16449 }
16450 /* Must have previous changes seen before executions */
16451 smp_mb();
16452 @@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
16453 {
16454 unsigned char replaced[MCOUNT_INSN_SIZE];
16455
16456 + ip = ktla_ktva(ip);
16457 +
16458 /*
16459 * Note: Due to modules and __init, code can
16460 * disappear and change, we need to protect against faulting
16461 @@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
16462 unsigned char old[MCOUNT_INSN_SIZE], *new;
16463 int ret;
16464
16465 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16466 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16467 new = ftrace_call_replace(ip, (unsigned long)func);
16468 ret = ftrace_modify_code(ip, old, new);
16469
16470 @@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
16471 {
16472 unsigned char code[MCOUNT_INSN_SIZE];
16473
16474 + ip = ktla_ktva(ip);
16475 +
16476 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16477 return -EFAULT;
16478
16479 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
16480 index 51ff186..9e77418 100644
16481 --- a/arch/x86/kernel/head32.c
16482 +++ b/arch/x86/kernel/head32.c
16483 @@ -19,6 +19,7 @@
16484 #include <asm/io_apic.h>
16485 #include <asm/bios_ebda.h>
16486 #include <asm/tlbflush.h>
16487 +#include <asm/boot.h>
16488
16489 static void __init i386_default_early_setup(void)
16490 {
16491 @@ -31,8 +32,7 @@ static void __init i386_default_early_setup(void)
16492
16493 void __init i386_start_kernel(void)
16494 {
16495 - memblock_reserve(__pa_symbol(&_text),
16496 - __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
16497 + memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop) - LOAD_PHYSICAL_ADDR);
16498
16499 #ifdef CONFIG_BLK_DEV_INITRD
16500 /* Reserve INITRD */
16501 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
16502 index ce0be7c..c41476e 100644
16503 --- a/arch/x86/kernel/head_32.S
16504 +++ b/arch/x86/kernel/head_32.S
16505 @@ -25,6 +25,12 @@
16506 /* Physical address */
16507 #define pa(X) ((X) - __PAGE_OFFSET)
16508
16509 +#ifdef CONFIG_PAX_KERNEXEC
16510 +#define ta(X) (X)
16511 +#else
16512 +#define ta(X) ((X) - __PAGE_OFFSET)
16513 +#endif
16514 +
16515 /*
16516 * References to members of the new_cpu_data structure.
16517 */
16518 @@ -54,11 +60,7 @@
16519 * and small than max_low_pfn, otherwise will waste some page table entries
16520 */
16521
16522 -#if PTRS_PER_PMD > 1
16523 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
16524 -#else
16525 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
16526 -#endif
16527 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
16528
16529 /* Number of possible pages in the lowmem region */
16530 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
16531 @@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
16532 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16533
16534 /*
16535 + * Real beginning of normal "text" segment
16536 + */
16537 +ENTRY(stext)
16538 +ENTRY(_stext)
16539 +
16540 +/*
16541 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
16542 * %esi points to the real-mode code as a 32-bit pointer.
16543 * CS and DS must be 4 GB flat segments, but we don't depend on
16544 @@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16545 * can.
16546 */
16547 __HEAD
16548 +
16549 +#ifdef CONFIG_PAX_KERNEXEC
16550 + jmp startup_32
16551 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
16552 +.fill PAGE_SIZE-5,1,0xcc
16553 +#endif
16554 +
16555 ENTRY(startup_32)
16556 movl pa(stack_start),%ecx
16557
16558 @@ -105,6 +120,57 @@ ENTRY(startup_32)
16559 2:
16560 leal -__PAGE_OFFSET(%ecx),%esp
16561
16562 +#ifdef CONFIG_SMP
16563 + movl $pa(cpu_gdt_table),%edi
16564 + movl $__per_cpu_load,%eax
16565 + movw %ax,__KERNEL_PERCPU + 2(%edi)
16566 + rorl $16,%eax
16567 + movb %al,__KERNEL_PERCPU + 4(%edi)
16568 + movb %ah,__KERNEL_PERCPU + 7(%edi)
16569 + movl $__per_cpu_end - 1,%eax
16570 + subl $__per_cpu_start,%eax
16571 + movw %ax,__KERNEL_PERCPU + 0(%edi)
16572 +#endif
16573 +
16574 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16575 + movl $NR_CPUS,%ecx
16576 + movl $pa(cpu_gdt_table),%edi
16577 +1:
16578 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
16579 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
16580 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
16581 + addl $PAGE_SIZE_asm,%edi
16582 + loop 1b
16583 +#endif
16584 +
16585 +#ifdef CONFIG_PAX_KERNEXEC
16586 + movl $pa(boot_gdt),%edi
16587 + movl $__LOAD_PHYSICAL_ADDR,%eax
16588 + movw %ax,__BOOT_CS + 2(%edi)
16589 + rorl $16,%eax
16590 + movb %al,__BOOT_CS + 4(%edi)
16591 + movb %ah,__BOOT_CS + 7(%edi)
16592 + rorl $16,%eax
16593 +
16594 + ljmp $(__BOOT_CS),$1f
16595 +1:
16596 +
16597 + movl $NR_CPUS,%ecx
16598 + movl $pa(cpu_gdt_table),%edi
16599 + addl $__PAGE_OFFSET,%eax
16600 +1:
16601 + movw %ax,__KERNEL_CS + 2(%edi)
16602 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
16603 + rorl $16,%eax
16604 + movb %al,__KERNEL_CS + 4(%edi)
16605 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
16606 + movb %ah,__KERNEL_CS + 7(%edi)
16607 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
16608 + rorl $16,%eax
16609 + addl $PAGE_SIZE_asm,%edi
16610 + loop 1b
16611 +#endif
16612 +
16613 /*
16614 * Clear BSS first so that there are no surprises...
16615 */
16616 @@ -195,8 +261,11 @@ ENTRY(startup_32)
16617 movl %eax, pa(max_pfn_mapped)
16618
16619 /* Do early initialization of the fixmap area */
16620 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16621 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
16622 +#ifdef CONFIG_COMPAT_VDSO
16623 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
16624 +#else
16625 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
16626 +#endif
16627 #else /* Not PAE */
16628
16629 page_pde_offset = (__PAGE_OFFSET >> 20);
16630 @@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16631 movl %eax, pa(max_pfn_mapped)
16632
16633 /* Do early initialization of the fixmap area */
16634 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16635 - movl %eax,pa(initial_page_table+0xffc)
16636 +#ifdef CONFIG_COMPAT_VDSO
16637 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
16638 +#else
16639 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
16640 +#endif
16641 #endif
16642
16643 #ifdef CONFIG_PARAVIRT
16644 @@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16645 cmpl $num_subarch_entries, %eax
16646 jae bad_subarch
16647
16648 - movl pa(subarch_entries)(,%eax,4), %eax
16649 - subl $__PAGE_OFFSET, %eax
16650 - jmp *%eax
16651 + jmp *pa(subarch_entries)(,%eax,4)
16652
16653 bad_subarch:
16654 WEAK(lguest_entry)
16655 @@ -255,10 +325,10 @@ WEAK(xen_entry)
16656 __INITDATA
16657
16658 subarch_entries:
16659 - .long default_entry /* normal x86/PC */
16660 - .long lguest_entry /* lguest hypervisor */
16661 - .long xen_entry /* Xen hypervisor */
16662 - .long default_entry /* Moorestown MID */
16663 + .long ta(default_entry) /* normal x86/PC */
16664 + .long ta(lguest_entry) /* lguest hypervisor */
16665 + .long ta(xen_entry) /* Xen hypervisor */
16666 + .long ta(default_entry) /* Moorestown MID */
16667 num_subarch_entries = (. - subarch_entries) / 4
16668 .previous
16669 #else
16670 @@ -312,6 +382,7 @@ default_entry:
16671 orl %edx,%eax
16672 movl %eax,%cr4
16673
16674 +#ifdef CONFIG_X86_PAE
16675 testb $X86_CR4_PAE, %al # check if PAE is enabled
16676 jz 6f
16677
16678 @@ -340,6 +411,9 @@ default_entry:
16679 /* Make changes effective */
16680 wrmsr
16681
16682 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
16683 +#endif
16684 +
16685 6:
16686
16687 /*
16688 @@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
16689 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
16690 movl %eax,%ss # after changing gdt.
16691
16692 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
16693 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
16694 movl %eax,%ds
16695 movl %eax,%es
16696
16697 @@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
16698 */
16699 cmpb $0,ready
16700 jne 1f
16701 - movl $gdt_page,%eax
16702 + movl $cpu_gdt_table,%eax
16703 movl $stack_canary,%ecx
16704 +#ifdef CONFIG_SMP
16705 + addl $__per_cpu_load,%ecx
16706 +#endif
16707 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
16708 shrl $16, %ecx
16709 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
16710 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
16711 1:
16712 -#endif
16713 movl $(__KERNEL_STACK_CANARY),%eax
16714 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
16715 + movl $(__USER_DS),%eax
16716 +#else
16717 + xorl %eax,%eax
16718 +#endif
16719 movl %eax,%gs
16720
16721 xorl %eax,%eax # Clear LDT
16722 @@ -558,22 +639,22 @@ early_page_fault:
16723 jmp early_fault
16724
16725 early_fault:
16726 - cld
16727 #ifdef CONFIG_PRINTK
16728 + cmpl $1,%ss:early_recursion_flag
16729 + je hlt_loop
16730 + incl %ss:early_recursion_flag
16731 + cld
16732 pusha
16733 movl $(__KERNEL_DS),%eax
16734 movl %eax,%ds
16735 movl %eax,%es
16736 - cmpl $2,early_recursion_flag
16737 - je hlt_loop
16738 - incl early_recursion_flag
16739 movl %cr2,%eax
16740 pushl %eax
16741 pushl %edx /* trapno */
16742 pushl $fault_msg
16743 call printk
16744 +; call dump_stack
16745 #endif
16746 - call dump_stack
16747 hlt_loop:
16748 hlt
16749 jmp hlt_loop
16750 @@ -581,8 +662,11 @@ hlt_loop:
16751 /* This is the default interrupt "handler" :-) */
16752 ALIGN
16753 ignore_int:
16754 - cld
16755 #ifdef CONFIG_PRINTK
16756 + cmpl $2,%ss:early_recursion_flag
16757 + je hlt_loop
16758 + incl %ss:early_recursion_flag
16759 + cld
16760 pushl %eax
16761 pushl %ecx
16762 pushl %edx
16763 @@ -591,9 +675,6 @@ ignore_int:
16764 movl $(__KERNEL_DS),%eax
16765 movl %eax,%ds
16766 movl %eax,%es
16767 - cmpl $2,early_recursion_flag
16768 - je hlt_loop
16769 - incl early_recursion_flag
16770 pushl 16(%esp)
16771 pushl 24(%esp)
16772 pushl 32(%esp)
16773 @@ -622,29 +703,43 @@ ENTRY(initial_code)
16774 /*
16775 * BSS section
16776 */
16777 -__PAGE_ALIGNED_BSS
16778 - .align PAGE_SIZE
16779 #ifdef CONFIG_X86_PAE
16780 +.section .initial_pg_pmd,"a",@progbits
16781 initial_pg_pmd:
16782 .fill 1024*KPMDS,4,0
16783 #else
16784 +.section .initial_page_table,"a",@progbits
16785 ENTRY(initial_page_table)
16786 .fill 1024,4,0
16787 #endif
16788 +.section .initial_pg_fixmap,"a",@progbits
16789 initial_pg_fixmap:
16790 .fill 1024,4,0
16791 +.section .empty_zero_page,"a",@progbits
16792 ENTRY(empty_zero_page)
16793 .fill 4096,1,0
16794 +.section .swapper_pg_dir,"a",@progbits
16795 ENTRY(swapper_pg_dir)
16796 +#ifdef CONFIG_X86_PAE
16797 + .fill 4,8,0
16798 +#else
16799 .fill 1024,4,0
16800 +#endif
16801 +
16802 +/*
16803 + * The IDT has to be page-aligned to simplify the Pentium
16804 + * F0 0F bug workaround.. We have a special link segment
16805 + * for this.
16806 + */
16807 +.section .idt,"a",@progbits
16808 +ENTRY(idt_table)
16809 + .fill 256,8,0
16810
16811 /*
16812 * This starts the data section.
16813 */
16814 #ifdef CONFIG_X86_PAE
16815 -__PAGE_ALIGNED_DATA
16816 - /* Page-aligned for the benefit of paravirt? */
16817 - .align PAGE_SIZE
16818 +.section .initial_page_table,"a",@progbits
16819 ENTRY(initial_page_table)
16820 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
16821 # if KPMDS == 3
16822 @@ -663,18 +758,27 @@ ENTRY(initial_page_table)
16823 # error "Kernel PMDs should be 1, 2 or 3"
16824 # endif
16825 .align PAGE_SIZE /* needs to be page-sized too */
16826 +
16827 +#ifdef CONFIG_PAX_PER_CPU_PGD
16828 +ENTRY(cpu_pgd)
16829 + .rept NR_CPUS
16830 + .fill 4,8,0
16831 + .endr
16832 +#endif
16833 +
16834 #endif
16835
16836 .data
16837 .balign 4
16838 ENTRY(stack_start)
16839 - .long init_thread_union+THREAD_SIZE
16840 + .long init_thread_union+THREAD_SIZE-8
16841
16842 +ready: .byte 0
16843 +
16844 +.section .rodata,"a",@progbits
16845 early_recursion_flag:
16846 .long 0
16847
16848 -ready: .byte 0
16849 -
16850 int_msg:
16851 .asciz "Unknown interrupt or fault at: %p %p %p\n"
16852
16853 @@ -707,7 +811,7 @@ fault_msg:
16854 .word 0 # 32 bit align gdt_desc.address
16855 boot_gdt_descr:
16856 .word __BOOT_DS+7
16857 - .long boot_gdt - __PAGE_OFFSET
16858 + .long pa(boot_gdt)
16859
16860 .word 0 # 32-bit align idt_desc.address
16861 idt_descr:
16862 @@ -718,7 +822,7 @@ idt_descr:
16863 .word 0 # 32 bit align gdt_desc.address
16864 ENTRY(early_gdt_descr)
16865 .word GDT_ENTRIES*8-1
16866 - .long gdt_page /* Overwritten for secondary CPUs */
16867 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
16868
16869 /*
16870 * The boot_gdt must mirror the equivalent in setup.S and is
16871 @@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
16872 .align L1_CACHE_BYTES
16873 ENTRY(boot_gdt)
16874 .fill GDT_ENTRY_BOOT_CS,8,0
16875 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
16876 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
16877 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
16878 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
16879 +
16880 + .align PAGE_SIZE_asm
16881 +ENTRY(cpu_gdt_table)
16882 + .rept NR_CPUS
16883 + .quad 0x0000000000000000 /* NULL descriptor */
16884 + .quad 0x0000000000000000 /* 0x0b reserved */
16885 + .quad 0x0000000000000000 /* 0x13 reserved */
16886 + .quad 0x0000000000000000 /* 0x1b reserved */
16887 +
16888 +#ifdef CONFIG_PAX_KERNEXEC
16889 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
16890 +#else
16891 + .quad 0x0000000000000000 /* 0x20 unused */
16892 +#endif
16893 +
16894 + .quad 0x0000000000000000 /* 0x28 unused */
16895 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
16896 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
16897 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
16898 + .quad 0x0000000000000000 /* 0x4b reserved */
16899 + .quad 0x0000000000000000 /* 0x53 reserved */
16900 + .quad 0x0000000000000000 /* 0x5b reserved */
16901 +
16902 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
16903 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
16904 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
16905 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
16906 +
16907 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
16908 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
16909 +
16910 + /*
16911 + * Segments used for calling PnP BIOS have byte granularity.
16912 + * The code segments and data segments have fixed 64k limits,
16913 + * the transfer segment sizes are set at run time.
16914 + */
16915 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
16916 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
16917 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
16918 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
16919 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
16920 +
16921 + /*
16922 + * The APM segments have byte granularity and their bases
16923 + * are set at run time. All have 64k limits.
16924 + */
16925 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
16926 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
16927 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
16928 +
16929 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
16930 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
16931 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
16932 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
16933 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
16934 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
16935 +
16936 + /* Be sure this is zeroed to avoid false validations in Xen */
16937 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
16938 + .endr
16939 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
16940 index 40f4eb3..6d24d9d 100644
16941 --- a/arch/x86/kernel/head_64.S
16942 +++ b/arch/x86/kernel/head_64.S
16943 @@ -19,6 +19,8 @@
16944 #include <asm/cache.h>
16945 #include <asm/processor-flags.h>
16946 #include <asm/percpu.h>
16947 +#include <asm/cpufeature.h>
16948 +#include <asm/alternative-asm.h>
16949
16950 #ifdef CONFIG_PARAVIRT
16951 #include <asm/asm-offsets.h>
16952 @@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
16953 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
16954 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
16955 L3_START_KERNEL = pud_index(__START_KERNEL_map)
16956 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
16957 +L3_VMALLOC_START = pud_index(VMALLOC_START)
16958 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
16959 +L3_VMALLOC_END = pud_index(VMALLOC_END)
16960 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
16961 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
16962
16963 .text
16964 __HEAD
16965 @@ -85,35 +93,23 @@ startup_64:
16966 */
16967 addq %rbp, init_level4_pgt + 0(%rip)
16968 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
16969 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
16970 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
16971 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
16972 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
16973
16974 addq %rbp, level3_ident_pgt + 0(%rip)
16975 +#ifndef CONFIG_XEN
16976 + addq %rbp, level3_ident_pgt + 8(%rip)
16977 +#endif
16978
16979 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
16980 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
16981 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
16982 +
16983 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
16984 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
16985
16986 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
16987 -
16988 - /* Add an Identity mapping if I am above 1G */
16989 - leaq _text(%rip), %rdi
16990 - andq $PMD_PAGE_MASK, %rdi
16991 -
16992 - movq %rdi, %rax
16993 - shrq $PUD_SHIFT, %rax
16994 - andq $(PTRS_PER_PUD - 1), %rax
16995 - jz ident_complete
16996 -
16997 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
16998 - leaq level3_ident_pgt(%rip), %rbx
16999 - movq %rdx, 0(%rbx, %rax, 8)
17000 -
17001 - movq %rdi, %rax
17002 - shrq $PMD_SHIFT, %rax
17003 - andq $(PTRS_PER_PMD - 1), %rax
17004 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
17005 - leaq level2_spare_pgt(%rip), %rbx
17006 - movq %rdx, 0(%rbx, %rax, 8)
17007 -ident_complete:
17008 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
17009
17010 /*
17011 * Fixup the kernel text+data virtual addresses. Note that
17012 @@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
17013 * after the boot processor executes this code.
17014 */
17015
17016 - /* Enable PAE mode and PGE */
17017 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
17018 + /* Enable PAE mode and PSE/PGE */
17019 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17020 movq %rax, %cr4
17021
17022 /* Setup early boot stage 4 level pagetables. */
17023 @@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
17024 movl $MSR_EFER, %ecx
17025 rdmsr
17026 btsl $_EFER_SCE, %eax /* Enable System Call */
17027 - btl $20,%edi /* No Execute supported? */
17028 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
17029 jnc 1f
17030 btsl $_EFER_NX, %eax
17031 + leaq init_level4_pgt(%rip), %rdi
17032 +#ifndef CONFIG_EFI
17033 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
17034 +#endif
17035 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
17036 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
17037 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
17038 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
17039 1: wrmsr /* Make changes effective */
17040
17041 /* Setup cr0 */
17042 @@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
17043 * jump. In addition we need to ensure %cs is set so we make this
17044 * a far return.
17045 */
17046 + pax_set_fptr_mask
17047 movq initial_code(%rip),%rax
17048 pushq $0 # fake return address to stop unwinder
17049 pushq $__KERNEL_CS # set correct cs
17050 @@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
17051 bad_address:
17052 jmp bad_address
17053
17054 - .section ".init.text","ax"
17055 + __INIT
17056 #ifdef CONFIG_EARLY_PRINTK
17057 .globl early_idt_handlers
17058 early_idt_handlers:
17059 @@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
17060 #endif /* EARLY_PRINTK */
17061 1: hlt
17062 jmp 1b
17063 + .previous
17064
17065 #ifdef CONFIG_EARLY_PRINTK
17066 + __INITDATA
17067 early_recursion_flag:
17068 .long 0
17069 + .previous
17070
17071 + .section .rodata,"a",@progbits
17072 early_idt_msg:
17073 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
17074 early_idt_ripmsg:
17075 .asciz "RIP %s\n"
17076 + .previous
17077 #endif /* CONFIG_EARLY_PRINTK */
17078 - .previous
17079
17080 + .section .rodata,"a",@progbits
17081 #define NEXT_PAGE(name) \
17082 .balign PAGE_SIZE; \
17083 ENTRY(name)
17084 @@ -338,7 +348,6 @@ ENTRY(name)
17085 i = i + 1 ; \
17086 .endr
17087
17088 - .data
17089 /*
17090 * This default setting generates an ident mapping at address 0x100000
17091 * and a mapping for the kernel that precisely maps virtual address
17092 @@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
17093 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17094 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
17095 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17096 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
17097 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
17098 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
17099 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
17100 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
17101 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17102 .org init_level4_pgt + L4_START_KERNEL*8, 0
17103 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
17104 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
17105
17106 +#ifdef CONFIG_PAX_PER_CPU_PGD
17107 +NEXT_PAGE(cpu_pgd)
17108 + .rept NR_CPUS
17109 + .fill 512,8,0
17110 + .endr
17111 +#endif
17112 +
17113 NEXT_PAGE(level3_ident_pgt)
17114 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17115 +#ifdef CONFIG_XEN
17116 .fill 511,8,0
17117 +#else
17118 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
17119 + .fill 510,8,0
17120 +#endif
17121 +
17122 +NEXT_PAGE(level3_vmalloc_start_pgt)
17123 + .fill 512,8,0
17124 +
17125 +NEXT_PAGE(level3_vmalloc_end_pgt)
17126 + .fill 512,8,0
17127 +
17128 +NEXT_PAGE(level3_vmemmap_pgt)
17129 + .fill L3_VMEMMAP_START,8,0
17130 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17131
17132 NEXT_PAGE(level3_kernel_pgt)
17133 .fill L3_START_KERNEL,8,0
17134 @@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
17135 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
17136 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17137
17138 +NEXT_PAGE(level2_vmemmap_pgt)
17139 + .fill 512,8,0
17140 +
17141 NEXT_PAGE(level2_fixmap_pgt)
17142 - .fill 506,8,0
17143 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17144 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
17145 - .fill 5,8,0
17146 + .fill 507,8,0
17147 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
17148 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
17149 + .fill 4,8,0
17150
17151 -NEXT_PAGE(level1_fixmap_pgt)
17152 +NEXT_PAGE(level1_vsyscall_pgt)
17153 .fill 512,8,0
17154
17155 -NEXT_PAGE(level2_ident_pgt)
17156 - /* Since I easily can, map the first 1G.
17157 + /* Since I easily can, map the first 2G.
17158 * Don't set NX because code runs from these pages.
17159 */
17160 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
17161 +NEXT_PAGE(level2_ident_pgt)
17162 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
17163
17164 NEXT_PAGE(level2_kernel_pgt)
17165 /*
17166 @@ -389,37 +429,59 @@ NEXT_PAGE(level2_kernel_pgt)
17167 * If you want to increase this then increase MODULES_VADDR
17168 * too.)
17169 */
17170 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
17171 - KERNEL_IMAGE_SIZE/PMD_SIZE)
17172 -
17173 -NEXT_PAGE(level2_spare_pgt)
17174 - .fill 512, 8, 0
17175 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
17176
17177 #undef PMDS
17178 #undef NEXT_PAGE
17179
17180 - .data
17181 + .align PAGE_SIZE
17182 +ENTRY(cpu_gdt_table)
17183 + .rept NR_CPUS
17184 + .quad 0x0000000000000000 /* NULL descriptor */
17185 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
17186 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
17187 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
17188 + .quad 0x00cffb000000ffff /* __USER32_CS */
17189 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
17190 + .quad 0x00affb000000ffff /* __USER_CS */
17191 +
17192 +#ifdef CONFIG_PAX_KERNEXEC
17193 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
17194 +#else
17195 + .quad 0x0 /* unused */
17196 +#endif
17197 +
17198 + .quad 0,0 /* TSS */
17199 + .quad 0,0 /* LDT */
17200 + .quad 0,0,0 /* three TLS descriptors */
17201 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
17202 + /* asm/segment.h:GDT_ENTRIES must match this */
17203 +
17204 + /* zero the remaining page */
17205 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
17206 + .endr
17207 +
17208 .align 16
17209 .globl early_gdt_descr
17210 early_gdt_descr:
17211 .word GDT_ENTRIES*8-1
17212 early_gdt_descr_base:
17213 - .quad INIT_PER_CPU_VAR(gdt_page)
17214 + .quad cpu_gdt_table
17215
17216 ENTRY(phys_base)
17217 /* This must match the first entry in level2_kernel_pgt */
17218 .quad 0x0000000000000000
17219
17220 #include "../../x86/xen/xen-head.S"
17221 -
17222 - .section .bss, "aw", @nobits
17223 +
17224 + .section .rodata,"a",@progbits
17225 .align L1_CACHE_BYTES
17226 ENTRY(idt_table)
17227 - .skip IDT_ENTRIES * 16
17228 + .fill 512,8,0
17229
17230 .align L1_CACHE_BYTES
17231 ENTRY(nmi_idt_table)
17232 - .skip IDT_ENTRIES * 16
17233 + .fill 512,8,0
17234
17235 __PAGE_ALIGNED_BSS
17236 .align PAGE_SIZE
17237 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
17238 index 9c3bd4a..e1d9b35 100644
17239 --- a/arch/x86/kernel/i386_ksyms_32.c
17240 +++ b/arch/x86/kernel/i386_ksyms_32.c
17241 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
17242 EXPORT_SYMBOL(cmpxchg8b_emu);
17243 #endif
17244
17245 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
17246 +
17247 /* Networking helper routines. */
17248 EXPORT_SYMBOL(csum_partial_copy_generic);
17249 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
17250 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
17251
17252 EXPORT_SYMBOL(__get_user_1);
17253 EXPORT_SYMBOL(__get_user_2);
17254 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
17255
17256 EXPORT_SYMBOL(csum_partial);
17257 EXPORT_SYMBOL(empty_zero_page);
17258 +
17259 +#ifdef CONFIG_PAX_KERNEXEC
17260 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
17261 +#endif
17262 diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
17263 index f239f30..aab2a58 100644
17264 --- a/arch/x86/kernel/i387.c
17265 +++ b/arch/x86/kernel/i387.c
17266 @@ -189,6 +189,9 @@ int xfpregs_active(struct task_struct *target, const struct user_regset *regset)
17267
17268 int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
17269 unsigned int pos, unsigned int count,
17270 + void *kbuf, void __user *ubuf) __size_overflow(4);
17271 +int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
17272 + unsigned int pos, unsigned int count,
17273 void *kbuf, void __user *ubuf)
17274 {
17275 int ret;
17276 @@ -208,6 +211,9 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
17277
17278 int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
17279 unsigned int pos, unsigned int count,
17280 + const void *kbuf, const void __user *ubuf) __size_overflow(4);
17281 +int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
17282 + unsigned int pos, unsigned int count,
17283 const void *kbuf, const void __user *ubuf)
17284 {
17285 int ret;
17286 @@ -241,6 +247,9 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
17287
17288 int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
17289 unsigned int pos, unsigned int count,
17290 + void *kbuf, void __user *ubuf) __size_overflow(4);
17291 +int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
17292 + unsigned int pos, unsigned int count,
17293 void *kbuf, void __user *ubuf)
17294 {
17295 int ret;
17296 @@ -270,6 +279,9 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
17297
17298 int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
17299 unsigned int pos, unsigned int count,
17300 + const void *kbuf, const void __user *ubuf) __size_overflow(4);
17301 +int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
17302 + unsigned int pos, unsigned int count,
17303 const void *kbuf, const void __user *ubuf)
17304 {
17305 int ret;
17306 @@ -440,6 +452,9 @@ static void convert_to_fxsr(struct task_struct *tsk,
17307
17308 int fpregs_get(struct task_struct *target, const struct user_regset *regset,
17309 unsigned int pos, unsigned int count,
17310 + void *kbuf, void __user *ubuf) __size_overflow(3,4);
17311 +int fpregs_get(struct task_struct *target, const struct user_regset *regset,
17312 + unsigned int pos, unsigned int count,
17313 void *kbuf, void __user *ubuf)
17314 {
17315 struct user_i387_ia32_struct env;
17316 @@ -472,6 +487,9 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
17317
17318 int fpregs_set(struct task_struct *target, const struct user_regset *regset,
17319 unsigned int pos, unsigned int count,
17320 + const void *kbuf, const void __user *ubuf) __size_overflow(3,4);
17321 +int fpregs_set(struct task_struct *target, const struct user_regset *regset,
17322 + unsigned int pos, unsigned int count,
17323 const void *kbuf, const void __user *ubuf)
17324 {
17325 struct user_i387_ia32_struct env;
17326 @@ -620,6 +638,8 @@ static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf)
17327 }
17328
17329 static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf,
17330 + unsigned int size) __size_overflow(2);
17331 +static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf,
17332 unsigned int size)
17333 {
17334 struct task_struct *tsk = current;
17335 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
17336 index 6104852..6114160 100644
17337 --- a/arch/x86/kernel/i8259.c
17338 +++ b/arch/x86/kernel/i8259.c
17339 @@ -210,7 +210,7 @@ spurious_8259A_irq:
17340 "spurious 8259A interrupt: IRQ%d.\n", irq);
17341 spurious_irq_mask |= irqmask;
17342 }
17343 - atomic_inc(&irq_err_count);
17344 + atomic_inc_unchecked(&irq_err_count);
17345 /*
17346 * Theoretically we do not have to handle this IRQ,
17347 * but in Linux this does not cause problems and is
17348 diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
17349 index 43e9ccf..44ccf6f 100644
17350 --- a/arch/x86/kernel/init_task.c
17351 +++ b/arch/x86/kernel/init_task.c
17352 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17353 * way process stacks are handled. This is done by having a special
17354 * "init_task" linker map entry..
17355 */
17356 -union thread_union init_thread_union __init_task_data =
17357 - { INIT_THREAD_INFO(init_task) };
17358 +union thread_union init_thread_union __init_task_data;
17359
17360 /*
17361 * Initial task structure.
17362 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
17363 * section. Since TSS's are completely CPU-local, we want them
17364 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
17365 */
17366 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
17367 -
17368 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
17369 +EXPORT_SYMBOL(init_tss);
17370 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
17371 index 8c96897..be66bfa 100644
17372 --- a/arch/x86/kernel/ioport.c
17373 +++ b/arch/x86/kernel/ioport.c
17374 @@ -6,6 +6,7 @@
17375 #include <linux/sched.h>
17376 #include <linux/kernel.h>
17377 #include <linux/capability.h>
17378 +#include <linux/security.h>
17379 #include <linux/errno.h>
17380 #include <linux/types.h>
17381 #include <linux/ioport.h>
17382 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17383
17384 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
17385 return -EINVAL;
17386 +#ifdef CONFIG_GRKERNSEC_IO
17387 + if (turn_on && grsec_disable_privio) {
17388 + gr_handle_ioperm();
17389 + return -EPERM;
17390 + }
17391 +#endif
17392 if (turn_on && !capable(CAP_SYS_RAWIO))
17393 return -EPERM;
17394
17395 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17396 * because the ->io_bitmap_max value must match the bitmap
17397 * contents:
17398 */
17399 - tss = &per_cpu(init_tss, get_cpu());
17400 + tss = init_tss + get_cpu();
17401
17402 if (turn_on)
17403 bitmap_clear(t->io_bitmap_ptr, from, num);
17404 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
17405 return -EINVAL;
17406 /* Trying to gain more privileges? */
17407 if (level > old) {
17408 +#ifdef CONFIG_GRKERNSEC_IO
17409 + if (grsec_disable_privio) {
17410 + gr_handle_iopl();
17411 + return -EPERM;
17412 + }
17413 +#endif
17414 if (!capable(CAP_SYS_RAWIO))
17415 return -EPERM;
17416 }
17417 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
17418 index 7943e0c..dd32c5c 100644
17419 --- a/arch/x86/kernel/irq.c
17420 +++ b/arch/x86/kernel/irq.c
17421 @@ -18,7 +18,7 @@
17422 #include <asm/mce.h>
17423 #include <asm/hw_irq.h>
17424
17425 -atomic_t irq_err_count;
17426 +atomic_unchecked_t irq_err_count;
17427
17428 /* Function pointer for generic interrupt vector handling */
17429 void (*x86_platform_ipi_callback)(void) = NULL;
17430 @@ -121,9 +121,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
17431 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
17432 seq_printf(p, " Machine check polls\n");
17433 #endif
17434 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
17435 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
17436 #if defined(CONFIG_X86_IO_APIC)
17437 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
17438 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
17439 #endif
17440 return 0;
17441 }
17442 @@ -164,10 +164,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
17443
17444 u64 arch_irq_stat(void)
17445 {
17446 - u64 sum = atomic_read(&irq_err_count);
17447 + u64 sum = atomic_read_unchecked(&irq_err_count);
17448
17449 #ifdef CONFIG_X86_IO_APIC
17450 - sum += atomic_read(&irq_mis_count);
17451 + sum += atomic_read_unchecked(&irq_mis_count);
17452 #endif
17453 return sum;
17454 }
17455 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
17456 index 40fc861..9b8739b 100644
17457 --- a/arch/x86/kernel/irq_32.c
17458 +++ b/arch/x86/kernel/irq_32.c
17459 @@ -39,7 +39,7 @@ static int check_stack_overflow(void)
17460 __asm__ __volatile__("andl %%esp,%0" :
17461 "=r" (sp) : "0" (THREAD_SIZE - 1));
17462
17463 - return sp < (sizeof(struct thread_info) + STACK_WARN);
17464 + return sp < STACK_WARN;
17465 }
17466
17467 static void print_stack_overflow(void)
17468 @@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
17469 * per-CPU IRQ handling contexts (thread information and stack)
17470 */
17471 union irq_ctx {
17472 - struct thread_info tinfo;
17473 - u32 stack[THREAD_SIZE/sizeof(u32)];
17474 + unsigned long previous_esp;
17475 + u32 stack[THREAD_SIZE/sizeof(u32)];
17476 } __attribute__((aligned(THREAD_SIZE)));
17477
17478 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
17479 @@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
17480 static inline int
17481 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17482 {
17483 - union irq_ctx *curctx, *irqctx;
17484 + union irq_ctx *irqctx;
17485 u32 *isp, arg1, arg2;
17486
17487 - curctx = (union irq_ctx *) current_thread_info();
17488 irqctx = __this_cpu_read(hardirq_ctx);
17489
17490 /*
17491 @@ -92,21 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17492 * handler) we can't do that and just have to keep using the
17493 * current stack (which is the irq stack already after all)
17494 */
17495 - if (unlikely(curctx == irqctx))
17496 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
17497 return 0;
17498
17499 /* build the stack frame on the IRQ stack */
17500 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17501 - irqctx->tinfo.task = curctx->tinfo.task;
17502 - irqctx->tinfo.previous_esp = current_stack_pointer;
17503 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17504 + irqctx->previous_esp = current_stack_pointer;
17505
17506 - /*
17507 - * Copy the softirq bits in preempt_count so that the
17508 - * softirq checks work in the hardirq context.
17509 - */
17510 - irqctx->tinfo.preempt_count =
17511 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
17512 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
17513 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17514 + __set_fs(MAKE_MM_SEG(0));
17515 +#endif
17516
17517 if (unlikely(overflow))
17518 call_on_stack(print_stack_overflow, isp);
17519 @@ -118,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17520 : "0" (irq), "1" (desc), "2" (isp),
17521 "D" (desc->handle_irq)
17522 : "memory", "cc", "ecx");
17523 +
17524 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17525 + __set_fs(current_thread_info()->addr_limit);
17526 +#endif
17527 +
17528 return 1;
17529 }
17530
17531 @@ -126,29 +125,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17532 */
17533 void __cpuinit irq_ctx_init(int cpu)
17534 {
17535 - union irq_ctx *irqctx;
17536 -
17537 if (per_cpu(hardirq_ctx, cpu))
17538 return;
17539
17540 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17541 - THREAD_FLAGS,
17542 - THREAD_ORDER));
17543 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17544 - irqctx->tinfo.cpu = cpu;
17545 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
17546 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17547 -
17548 - per_cpu(hardirq_ctx, cpu) = irqctx;
17549 -
17550 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17551 - THREAD_FLAGS,
17552 - THREAD_ORDER));
17553 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17554 - irqctx->tinfo.cpu = cpu;
17555 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17556 -
17557 - per_cpu(softirq_ctx, cpu) = irqctx;
17558 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17559 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17560
17561 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
17562 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
17563 @@ -157,7 +138,6 @@ void __cpuinit irq_ctx_init(int cpu)
17564 asmlinkage void do_softirq(void)
17565 {
17566 unsigned long flags;
17567 - struct thread_info *curctx;
17568 union irq_ctx *irqctx;
17569 u32 *isp;
17570
17571 @@ -167,15 +147,22 @@ asmlinkage void do_softirq(void)
17572 local_irq_save(flags);
17573
17574 if (local_softirq_pending()) {
17575 - curctx = current_thread_info();
17576 irqctx = __this_cpu_read(softirq_ctx);
17577 - irqctx->tinfo.task = curctx->task;
17578 - irqctx->tinfo.previous_esp = current_stack_pointer;
17579 + irqctx->previous_esp = current_stack_pointer;
17580
17581 /* build the stack frame on the softirq stack */
17582 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17583 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17584 +
17585 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17586 + __set_fs(MAKE_MM_SEG(0));
17587 +#endif
17588
17589 call_on_stack(__do_softirq, isp);
17590 +
17591 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17592 + __set_fs(current_thread_info()->addr_limit);
17593 +#endif
17594 +
17595 /*
17596 * Shouldn't happen, we returned above if in_interrupt():
17597 */
17598 diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
17599 index d04d3ec..ea4b374 100644
17600 --- a/arch/x86/kernel/irq_64.c
17601 +++ b/arch/x86/kernel/irq_64.c
17602 @@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
17603 u64 estack_top, estack_bottom;
17604 u64 curbase = (u64)task_stack_page(current);
17605
17606 - if (user_mode_vm(regs))
17607 + if (user_mode(regs))
17608 return;
17609
17610 if (regs->sp >= curbase + sizeof(struct thread_info) +
17611 diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
17612 index 90fcf62..e682cdd 100644
17613 --- a/arch/x86/kernel/kdebugfs.c
17614 +++ b/arch/x86/kernel/kdebugfs.c
17615 @@ -28,6 +28,8 @@ struct setup_data_node {
17616 };
17617
17618 static ssize_t setup_data_read(struct file *file, char __user *user_buf,
17619 + size_t count, loff_t *ppos) __size_overflow(3);
17620 +static ssize_t setup_data_read(struct file *file, char __user *user_buf,
17621 size_t count, loff_t *ppos)
17622 {
17623 struct setup_data_node *node = file->private_data;
17624 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
17625 index 2f45c4c..d95504f 100644
17626 --- a/arch/x86/kernel/kgdb.c
17627 +++ b/arch/x86/kernel/kgdb.c
17628 @@ -126,11 +126,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
17629 #ifdef CONFIG_X86_32
17630 switch (regno) {
17631 case GDB_SS:
17632 - if (!user_mode_vm(regs))
17633 + if (!user_mode(regs))
17634 *(unsigned long *)mem = __KERNEL_DS;
17635 break;
17636 case GDB_SP:
17637 - if (!user_mode_vm(regs))
17638 + if (!user_mode(regs))
17639 *(unsigned long *)mem = kernel_stack_pointer(regs);
17640 break;
17641 case GDB_GS:
17642 @@ -475,12 +475,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
17643 case 'k':
17644 /* clear the trace bit */
17645 linux_regs->flags &= ~X86_EFLAGS_TF;
17646 - atomic_set(&kgdb_cpu_doing_single_step, -1);
17647 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
17648
17649 /* set the trace bit if we're stepping */
17650 if (remcomInBuffer[0] == 's') {
17651 linux_regs->flags |= X86_EFLAGS_TF;
17652 - atomic_set(&kgdb_cpu_doing_single_step,
17653 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
17654 raw_smp_processor_id());
17655 }
17656
17657 @@ -545,7 +545,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
17658
17659 switch (cmd) {
17660 case DIE_DEBUG:
17661 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
17662 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
17663 if (user_mode(regs))
17664 return single_step_cont(regs, args);
17665 break;
17666 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
17667 index 7da647d..56fe348 100644
17668 --- a/arch/x86/kernel/kprobes.c
17669 +++ b/arch/x86/kernel/kprobes.c
17670 @@ -118,8 +118,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
17671 } __attribute__((packed)) *insn;
17672
17673 insn = (struct __arch_relative_insn *)from;
17674 +
17675 + pax_open_kernel();
17676 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
17677 insn->op = op;
17678 + pax_close_kernel();
17679 }
17680
17681 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
17682 @@ -156,7 +159,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
17683 kprobe_opcode_t opcode;
17684 kprobe_opcode_t *orig_opcodes = opcodes;
17685
17686 - if (search_exception_tables((unsigned long)opcodes))
17687 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
17688 return 0; /* Page fault may occur on this address. */
17689
17690 retry:
17691 @@ -317,7 +320,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
17692 }
17693 }
17694 insn_get_length(&insn);
17695 + pax_open_kernel();
17696 memcpy(dest, insn.kaddr, insn.length);
17697 + pax_close_kernel();
17698
17699 #ifdef CONFIG_X86_64
17700 if (insn_rip_relative(&insn)) {
17701 @@ -341,7 +346,9 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
17702 (u8 *) dest;
17703 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
17704 disp = (u8 *) dest + insn_offset_displacement(&insn);
17705 + pax_open_kernel();
17706 *(s32 *) disp = (s32) newdisp;
17707 + pax_close_kernel();
17708 }
17709 #endif
17710 return insn.length;
17711 @@ -355,12 +362,12 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
17712 */
17713 __copy_instruction(p->ainsn.insn, p->addr, 0);
17714
17715 - if (can_boost(p->addr))
17716 + if (can_boost(ktla_ktva(p->addr)))
17717 p->ainsn.boostable = 0;
17718 else
17719 p->ainsn.boostable = -1;
17720
17721 - p->opcode = *p->addr;
17722 + p->opcode = *(ktla_ktva(p->addr));
17723 }
17724
17725 int __kprobes arch_prepare_kprobe(struct kprobe *p)
17726 @@ -477,7 +484,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
17727 * nor set current_kprobe, because it doesn't use single
17728 * stepping.
17729 */
17730 - regs->ip = (unsigned long)p->ainsn.insn;
17731 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17732 preempt_enable_no_resched();
17733 return;
17734 }
17735 @@ -496,7 +503,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
17736 if (p->opcode == BREAKPOINT_INSTRUCTION)
17737 regs->ip = (unsigned long)p->addr;
17738 else
17739 - regs->ip = (unsigned long)p->ainsn.insn;
17740 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17741 }
17742
17743 /*
17744 @@ -575,7 +582,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
17745 setup_singlestep(p, regs, kcb, 0);
17746 return 1;
17747 }
17748 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
17749 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
17750 /*
17751 * The breakpoint instruction was removed right
17752 * after we hit it. Another cpu has removed
17753 @@ -683,6 +690,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
17754 " movq %rax, 152(%rsp)\n"
17755 RESTORE_REGS_STRING
17756 " popfq\n"
17757 +#ifdef KERNEXEC_PLUGIN
17758 + " btsq $63,(%rsp)\n"
17759 +#endif
17760 #else
17761 " pushf\n"
17762 SAVE_REGS_STRING
17763 @@ -820,7 +830,7 @@ static void __kprobes resume_execution(struct kprobe *p,
17764 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
17765 {
17766 unsigned long *tos = stack_addr(regs);
17767 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
17768 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
17769 unsigned long orig_ip = (unsigned long)p->addr;
17770 kprobe_opcode_t *insn = p->ainsn.insn;
17771
17772 @@ -1002,7 +1012,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
17773 struct die_args *args = data;
17774 int ret = NOTIFY_DONE;
17775
17776 - if (args->regs && user_mode_vm(args->regs))
17777 + if (args->regs && user_mode(args->regs))
17778 return ret;
17779
17780 switch (val) {
17781 @@ -1384,7 +1394,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17782 * Verify if the address gap is in 2GB range, because this uses
17783 * a relative jump.
17784 */
17785 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
17786 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
17787 if (abs(rel) > 0x7fffffff)
17788 return -ERANGE;
17789
17790 @@ -1405,11 +1415,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17791 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
17792
17793 /* Set probe function call */
17794 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
17795 + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
17796
17797 /* Set returning jmp instruction at the tail of out-of-line buffer */
17798 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
17799 - (u8 *)op->kp.addr + op->optinsn.size);
17800 + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
17801
17802 flush_icache_range((unsigned long) buf,
17803 (unsigned long) buf + TMPL_END_IDX +
17804 @@ -1431,7 +1441,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
17805 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
17806
17807 /* Backup instructions which will be replaced by jump address */
17808 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
17809 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
17810 RELATIVE_ADDR_SIZE);
17811
17812 insn_buf[0] = RELATIVEJUMP_OPCODE;
17813 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
17814 index ea69726..a305f16 100644
17815 --- a/arch/x86/kernel/ldt.c
17816 +++ b/arch/x86/kernel/ldt.c
17817 @@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
17818 if (reload) {
17819 #ifdef CONFIG_SMP
17820 preempt_disable();
17821 - load_LDT(pc);
17822 + load_LDT_nolock(pc);
17823 if (!cpumask_equal(mm_cpumask(current->mm),
17824 cpumask_of(smp_processor_id())))
17825 smp_call_function(flush_ldt, current->mm, 1);
17826 preempt_enable();
17827 #else
17828 - load_LDT(pc);
17829 + load_LDT_nolock(pc);
17830 #endif
17831 }
17832 if (oldsize) {
17833 @@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
17834 return err;
17835
17836 for (i = 0; i < old->size; i++)
17837 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
17838 + write_ldt_entry(new->ldt, i, old->ldt + i);
17839 return 0;
17840 }
17841
17842 @@ -116,6 +116,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
17843 retval = copy_ldt(&mm->context, &old_mm->context);
17844 mutex_unlock(&old_mm->context.lock);
17845 }
17846 +
17847 + if (tsk == current) {
17848 + mm->context.vdso = 0;
17849 +
17850 +#ifdef CONFIG_X86_32
17851 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
17852 + mm->context.user_cs_base = 0UL;
17853 + mm->context.user_cs_limit = ~0UL;
17854 +
17855 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
17856 + cpus_clear(mm->context.cpu_user_cs_mask);
17857 +#endif
17858 +
17859 +#endif
17860 +#endif
17861 +
17862 + }
17863 +
17864 return retval;
17865 }
17866
17867 @@ -141,6 +159,7 @@ void destroy_context(struct mm_struct *mm)
17868 }
17869 }
17870
17871 +static int read_ldt(void __user *ptr, unsigned long bytecount) __size_overflow(2);
17872 static int read_ldt(void __user *ptr, unsigned long bytecount)
17873 {
17874 int err;
17875 @@ -175,6 +194,7 @@ error_return:
17876 return err;
17877 }
17878
17879 +static int read_default_ldt(void __user *ptr, unsigned long bytecount) __size_overflow(2);
17880 static int read_default_ldt(void __user *ptr, unsigned long bytecount)
17881 {
17882 /* CHECKME: Can we use _one_ random number ? */
17883 @@ -230,6 +250,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
17884 }
17885 }
17886
17887 +#ifdef CONFIG_PAX_SEGMEXEC
17888 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
17889 + error = -EINVAL;
17890 + goto out_unlock;
17891 + }
17892 +#endif
17893 +
17894 fill_ldt(&ldt, &ldt_info);
17895 if (oldmode)
17896 ldt.avl = 0;
17897 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
17898 index a3fa43b..8966f4c 100644
17899 --- a/arch/x86/kernel/machine_kexec_32.c
17900 +++ b/arch/x86/kernel/machine_kexec_32.c
17901 @@ -27,7 +27,7 @@
17902 #include <asm/cacheflush.h>
17903 #include <asm/debugreg.h>
17904
17905 -static void set_idt(void *newidt, __u16 limit)
17906 +static void set_idt(struct desc_struct *newidt, __u16 limit)
17907 {
17908 struct desc_ptr curidt;
17909
17910 @@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 limit)
17911 }
17912
17913
17914 -static void set_gdt(void *newgdt, __u16 limit)
17915 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
17916 {
17917 struct desc_ptr curgdt;
17918
17919 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
17920 }
17921
17922 control_page = page_address(image->control_code_page);
17923 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
17924 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
17925
17926 relocate_kernel_ptr = control_page;
17927 page_list[PA_CONTROL_PAGE] = __pa(control_page);
17928 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
17929 index 3ca42d0..79d24cd 100644
17930 --- a/arch/x86/kernel/microcode_intel.c
17931 +++ b/arch/x86/kernel/microcode_intel.c
17932 @@ -434,15 +434,16 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
17933 return ret;
17934 }
17935
17936 +static int get_ucode_user(void *to, const void *from, size_t n) __size_overflow(3);
17937 static int get_ucode_user(void *to, const void *from, size_t n)
17938 {
17939 - return copy_from_user(to, from, n);
17940 + return copy_from_user(to, (const void __force_user *)from, n);
17941 }
17942
17943 static enum ucode_state
17944 request_microcode_user(int cpu, const void __user *buf, size_t size)
17945 {
17946 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
17947 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
17948 }
17949
17950 static void microcode_fini_cpu(int cpu)
17951 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
17952 index 925179f..1f0d561 100644
17953 --- a/arch/x86/kernel/module.c
17954 +++ b/arch/x86/kernel/module.c
17955 @@ -36,15 +36,61 @@
17956 #define DEBUGP(fmt...)
17957 #endif
17958
17959 -void *module_alloc(unsigned long size)
17960 +static inline void *__module_alloc(unsigned long size, pgprot_t prot) __size_overflow(1);
17961 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
17962 {
17963 - if (PAGE_ALIGN(size) > MODULES_LEN)
17964 + if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
17965 return NULL;
17966 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
17967 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
17968 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
17969 -1, __builtin_return_address(0));
17970 }
17971
17972 +void *module_alloc(unsigned long size)
17973 +{
17974 +
17975 +#ifdef CONFIG_PAX_KERNEXEC
17976 + return __module_alloc(size, PAGE_KERNEL);
17977 +#else
17978 + return __module_alloc(size, PAGE_KERNEL_EXEC);
17979 +#endif
17980 +
17981 +}
17982 +
17983 +#ifdef CONFIG_PAX_KERNEXEC
17984 +#ifdef CONFIG_X86_32
17985 +void *module_alloc_exec(unsigned long size)
17986 +{
17987 + struct vm_struct *area;
17988 +
17989 + if (size == 0)
17990 + return NULL;
17991 +
17992 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
17993 + return area ? area->addr : NULL;
17994 +}
17995 +EXPORT_SYMBOL(module_alloc_exec);
17996 +
17997 +void module_free_exec(struct module *mod, void *module_region)
17998 +{
17999 + vunmap(module_region);
18000 +}
18001 +EXPORT_SYMBOL(module_free_exec);
18002 +#else
18003 +void module_free_exec(struct module *mod, void *module_region)
18004 +{
18005 + module_free(mod, module_region);
18006 +}
18007 +EXPORT_SYMBOL(module_free_exec);
18008 +
18009 +void *module_alloc_exec(unsigned long size)
18010 +{
18011 + return __module_alloc(size, PAGE_KERNEL_RX);
18012 +}
18013 +EXPORT_SYMBOL(module_alloc_exec);
18014 +#endif
18015 +#endif
18016 +
18017 #ifdef CONFIG_X86_32
18018 int apply_relocate(Elf32_Shdr *sechdrs,
18019 const char *strtab,
18020 @@ -55,14 +101,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18021 unsigned int i;
18022 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
18023 Elf32_Sym *sym;
18024 - uint32_t *location;
18025 + uint32_t *plocation, location;
18026
18027 DEBUGP("Applying relocate section %u to %u\n", relsec,
18028 sechdrs[relsec].sh_info);
18029 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
18030 /* This is where to make the change */
18031 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
18032 - + rel[i].r_offset;
18033 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
18034 + location = (uint32_t)plocation;
18035 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
18036 + plocation = ktla_ktva((void *)plocation);
18037 /* This is the symbol it is referring to. Note that all
18038 undefined symbols have been resolved. */
18039 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
18040 @@ -71,11 +119,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18041 switch (ELF32_R_TYPE(rel[i].r_info)) {
18042 case R_386_32:
18043 /* We add the value into the location given */
18044 - *location += sym->st_value;
18045 + pax_open_kernel();
18046 + *plocation += sym->st_value;
18047 + pax_close_kernel();
18048 break;
18049 case R_386_PC32:
18050 /* Add the value, subtract its postition */
18051 - *location += sym->st_value - (uint32_t)location;
18052 + pax_open_kernel();
18053 + *plocation += sym->st_value - location;
18054 + pax_close_kernel();
18055 break;
18056 default:
18057 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
18058 @@ -120,21 +172,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
18059 case R_X86_64_NONE:
18060 break;
18061 case R_X86_64_64:
18062 + pax_open_kernel();
18063 *(u64 *)loc = val;
18064 + pax_close_kernel();
18065 break;
18066 case R_X86_64_32:
18067 + pax_open_kernel();
18068 *(u32 *)loc = val;
18069 + pax_close_kernel();
18070 if (val != *(u32 *)loc)
18071 goto overflow;
18072 break;
18073 case R_X86_64_32S:
18074 + pax_open_kernel();
18075 *(s32 *)loc = val;
18076 + pax_close_kernel();
18077 if ((s64)val != *(s32 *)loc)
18078 goto overflow;
18079 break;
18080 case R_X86_64_PC32:
18081 val -= (u64)loc;
18082 + pax_open_kernel();
18083 *(u32 *)loc = val;
18084 + pax_close_kernel();
18085 +
18086 #if 0
18087 if ((s64)val != *(s32 *)loc)
18088 goto overflow;
18089 diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
18090 index 47acaf3..ec48ab6 100644
18091 --- a/arch/x86/kernel/nmi.c
18092 +++ b/arch/x86/kernel/nmi.c
18093 @@ -505,6 +505,17 @@ static inline void nmi_nesting_postprocess(void)
18094 dotraplinkage notrace __kprobes void
18095 do_nmi(struct pt_regs *regs, long error_code)
18096 {
18097 +
18098 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18099 + if (!user_mode(regs)) {
18100 + unsigned long cs = regs->cs & 0xFFFF;
18101 + unsigned long ip = ktva_ktla(regs->ip);
18102 +
18103 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
18104 + regs->ip = ip;
18105 + }
18106 +#endif
18107 +
18108 nmi_nesting_preprocess(regs);
18109
18110 nmi_enter();
18111 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
18112 index 676b8c7..870ba04 100644
18113 --- a/arch/x86/kernel/paravirt-spinlocks.c
18114 +++ b/arch/x86/kernel/paravirt-spinlocks.c
18115 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
18116 arch_spin_lock(lock);
18117 }
18118
18119 -struct pv_lock_ops pv_lock_ops = {
18120 +struct pv_lock_ops pv_lock_ops __read_only = {
18121 #ifdef CONFIG_SMP
18122 .spin_is_locked = __ticket_spin_is_locked,
18123 .spin_is_contended = __ticket_spin_is_contended,
18124 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
18125 index d90272e..6bb013b 100644
18126 --- a/arch/x86/kernel/paravirt.c
18127 +++ b/arch/x86/kernel/paravirt.c
18128 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
18129 {
18130 return x;
18131 }
18132 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18133 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
18134 +#endif
18135
18136 void __init default_banner(void)
18137 {
18138 @@ -145,15 +148,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
18139 if (opfunc == NULL)
18140 /* If there's no function, patch it with a ud2a (BUG) */
18141 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
18142 - else if (opfunc == _paravirt_nop)
18143 + else if (opfunc == (void *)_paravirt_nop)
18144 /* If the operation is a nop, then nop the callsite */
18145 ret = paravirt_patch_nop();
18146
18147 /* identity functions just return their single argument */
18148 - else if (opfunc == _paravirt_ident_32)
18149 + else if (opfunc == (void *)_paravirt_ident_32)
18150 ret = paravirt_patch_ident_32(insnbuf, len);
18151 - else if (opfunc == _paravirt_ident_64)
18152 + else if (opfunc == (void *)_paravirt_ident_64)
18153 ret = paravirt_patch_ident_64(insnbuf, len);
18154 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18155 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
18156 + ret = paravirt_patch_ident_64(insnbuf, len);
18157 +#endif
18158
18159 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
18160 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
18161 @@ -178,7 +185,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
18162 if (insn_len > len || start == NULL)
18163 insn_len = len;
18164 else
18165 - memcpy(insnbuf, start, insn_len);
18166 + memcpy(insnbuf, ktla_ktva(start), insn_len);
18167
18168 return insn_len;
18169 }
18170 @@ -302,7 +309,7 @@ void arch_flush_lazy_mmu_mode(void)
18171 preempt_enable();
18172 }
18173
18174 -struct pv_info pv_info = {
18175 +struct pv_info pv_info __read_only = {
18176 .name = "bare hardware",
18177 .paravirt_enabled = 0,
18178 .kernel_rpl = 0,
18179 @@ -313,16 +320,16 @@ struct pv_info pv_info = {
18180 #endif
18181 };
18182
18183 -struct pv_init_ops pv_init_ops = {
18184 +struct pv_init_ops pv_init_ops __read_only = {
18185 .patch = native_patch,
18186 };
18187
18188 -struct pv_time_ops pv_time_ops = {
18189 +struct pv_time_ops pv_time_ops __read_only = {
18190 .sched_clock = native_sched_clock,
18191 .steal_clock = native_steal_clock,
18192 };
18193
18194 -struct pv_irq_ops pv_irq_ops = {
18195 +struct pv_irq_ops pv_irq_ops __read_only = {
18196 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
18197 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
18198 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
18199 @@ -334,7 +341,7 @@ struct pv_irq_ops pv_irq_ops = {
18200 #endif
18201 };
18202
18203 -struct pv_cpu_ops pv_cpu_ops = {
18204 +struct pv_cpu_ops pv_cpu_ops __read_only = {
18205 .cpuid = native_cpuid,
18206 .get_debugreg = native_get_debugreg,
18207 .set_debugreg = native_set_debugreg,
18208 @@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
18209 .end_context_switch = paravirt_nop,
18210 };
18211
18212 -struct pv_apic_ops pv_apic_ops = {
18213 +struct pv_apic_ops pv_apic_ops __read_only = {
18214 #ifdef CONFIG_X86_LOCAL_APIC
18215 .startup_ipi_hook = paravirt_nop,
18216 #endif
18217 };
18218
18219 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
18220 +#ifdef CONFIG_X86_32
18221 +#ifdef CONFIG_X86_PAE
18222 +/* 64-bit pagetable entries */
18223 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
18224 +#else
18225 /* 32-bit pagetable entries */
18226 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
18227 +#endif
18228 #else
18229 /* 64-bit pagetable entries */
18230 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
18231 #endif
18232
18233 -struct pv_mmu_ops pv_mmu_ops = {
18234 +struct pv_mmu_ops pv_mmu_ops __read_only = {
18235
18236 .read_cr2 = native_read_cr2,
18237 .write_cr2 = native_write_cr2,
18238 @@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
18239 .make_pud = PTE_IDENT,
18240
18241 .set_pgd = native_set_pgd,
18242 + .set_pgd_batched = native_set_pgd_batched,
18243 #endif
18244 #endif /* PAGETABLE_LEVELS >= 3 */
18245
18246 @@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
18247 },
18248
18249 .set_fixmap = native_set_fixmap,
18250 +
18251 +#ifdef CONFIG_PAX_KERNEXEC
18252 + .pax_open_kernel = native_pax_open_kernel,
18253 + .pax_close_kernel = native_pax_close_kernel,
18254 +#endif
18255 +
18256 };
18257
18258 EXPORT_SYMBOL_GPL(pv_time_ops);
18259 diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
18260 index 35ccf75..7a15747 100644
18261 --- a/arch/x86/kernel/pci-iommu_table.c
18262 +++ b/arch/x86/kernel/pci-iommu_table.c
18263 @@ -2,7 +2,7 @@
18264 #include <asm/iommu_table.h>
18265 #include <linux/string.h>
18266 #include <linux/kallsyms.h>
18267 -
18268 +#include <linux/sched.h>
18269
18270 #define DEBUG 1
18271
18272 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
18273 index 15763af..da59ada 100644
18274 --- a/arch/x86/kernel/process.c
18275 +++ b/arch/x86/kernel/process.c
18276 @@ -48,16 +48,33 @@ void free_thread_xstate(struct task_struct *tsk)
18277
18278 void free_thread_info(struct thread_info *ti)
18279 {
18280 - free_thread_xstate(ti->task);
18281 free_pages((unsigned long)ti, THREAD_ORDER);
18282 }
18283
18284 +static struct kmem_cache *task_struct_cachep;
18285 +
18286 void arch_task_cache_init(void)
18287 {
18288 - task_xstate_cachep =
18289 - kmem_cache_create("task_xstate", xstate_size,
18290 + /* create a slab on which task_structs can be allocated */
18291 + task_struct_cachep =
18292 + kmem_cache_create("task_struct", sizeof(struct task_struct),
18293 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
18294 +
18295 + task_xstate_cachep =
18296 + kmem_cache_create("task_xstate", xstate_size,
18297 __alignof__(union thread_xstate),
18298 - SLAB_PANIC | SLAB_NOTRACK, NULL);
18299 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
18300 +}
18301 +
18302 +struct task_struct *alloc_task_struct_node(int node)
18303 +{
18304 + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
18305 +}
18306 +
18307 +void free_task_struct(struct task_struct *task)
18308 +{
18309 + free_thread_xstate(task);
18310 + kmem_cache_free(task_struct_cachep, task);
18311 }
18312
18313 /*
18314 @@ -70,7 +87,7 @@ void exit_thread(void)
18315 unsigned long *bp = t->io_bitmap_ptr;
18316
18317 if (bp) {
18318 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
18319 + struct tss_struct *tss = init_tss + get_cpu();
18320
18321 t->io_bitmap_ptr = NULL;
18322 clear_thread_flag(TIF_IO_BITMAP);
18323 @@ -106,7 +123,7 @@ void show_regs_common(void)
18324
18325 printk(KERN_CONT "\n");
18326 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
18327 - current->pid, current->comm, print_tainted(),
18328 + task_pid_nr(current), current->comm, print_tainted(),
18329 init_utsname()->release,
18330 (int)strcspn(init_utsname()->version, " "),
18331 init_utsname()->version);
18332 @@ -120,6 +137,9 @@ void flush_thread(void)
18333 {
18334 struct task_struct *tsk = current;
18335
18336 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18337 + loadsegment(gs, 0);
18338 +#endif
18339 flush_ptrace_hw_breakpoint(tsk);
18340 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
18341 /*
18342 @@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
18343 regs.di = (unsigned long) arg;
18344
18345 #ifdef CONFIG_X86_32
18346 - regs.ds = __USER_DS;
18347 - regs.es = __USER_DS;
18348 + regs.ds = __KERNEL_DS;
18349 + regs.es = __KERNEL_DS;
18350 regs.fs = __KERNEL_PERCPU;
18351 - regs.gs = __KERNEL_STACK_CANARY;
18352 + savesegment(gs, regs.gs);
18353 #else
18354 regs.ss = __KERNEL_DS;
18355 #endif
18356 @@ -411,7 +431,7 @@ bool set_pm_idle_to_default(void)
18357
18358 return ret;
18359 }
18360 -void stop_this_cpu(void *dummy)
18361 +__noreturn void stop_this_cpu(void *dummy)
18362 {
18363 local_irq_disable();
18364 /*
18365 @@ -653,16 +673,37 @@ static int __init idle_setup(char *str)
18366 }
18367 early_param("idle", idle_setup);
18368
18369 -unsigned long arch_align_stack(unsigned long sp)
18370 +#ifdef CONFIG_PAX_RANDKSTACK
18371 +void pax_randomize_kstack(struct pt_regs *regs)
18372 {
18373 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
18374 - sp -= get_random_int() % 8192;
18375 - return sp & ~0xf;
18376 -}
18377 + struct thread_struct *thread = &current->thread;
18378 + unsigned long time;
18379
18380 -unsigned long arch_randomize_brk(struct mm_struct *mm)
18381 -{
18382 - unsigned long range_end = mm->brk + 0x02000000;
18383 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
18384 -}
18385 + if (!randomize_va_space)
18386 + return;
18387 +
18388 + if (v8086_mode(regs))
18389 + return;
18390
18391 + rdtscl(time);
18392 +
18393 + /* P4 seems to return a 0 LSB, ignore it */
18394 +#ifdef CONFIG_MPENTIUM4
18395 + time &= 0x3EUL;
18396 + time <<= 2;
18397 +#elif defined(CONFIG_X86_64)
18398 + time &= 0xFUL;
18399 + time <<= 4;
18400 +#else
18401 + time &= 0x1FUL;
18402 + time <<= 3;
18403 +#endif
18404 +
18405 + thread->sp0 ^= time;
18406 + load_sp0(init_tss + smp_processor_id(), thread);
18407 +
18408 +#ifdef CONFIG_X86_64
18409 + percpu_write(kernel_stack, thread->sp0);
18410 +#endif
18411 +}
18412 +#endif
18413 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
18414 index c08d1ff..6ae1c81 100644
18415 --- a/arch/x86/kernel/process_32.c
18416 +++ b/arch/x86/kernel/process_32.c
18417 @@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
18418 unsigned long thread_saved_pc(struct task_struct *tsk)
18419 {
18420 return ((unsigned long *)tsk->thread.sp)[3];
18421 +//XXX return tsk->thread.eip;
18422 }
18423
18424 #ifndef CONFIG_SMP
18425 @@ -132,15 +133,14 @@ void __show_regs(struct pt_regs *regs, int all)
18426 unsigned long sp;
18427 unsigned short ss, gs;
18428
18429 - if (user_mode_vm(regs)) {
18430 + if (user_mode(regs)) {
18431 sp = regs->sp;
18432 ss = regs->ss & 0xffff;
18433 - gs = get_user_gs(regs);
18434 } else {
18435 sp = kernel_stack_pointer(regs);
18436 savesegment(ss, ss);
18437 - savesegment(gs, gs);
18438 }
18439 + gs = get_user_gs(regs);
18440
18441 show_regs_common();
18442
18443 @@ -202,13 +202,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18444 struct task_struct *tsk;
18445 int err;
18446
18447 - childregs = task_pt_regs(p);
18448 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
18449 *childregs = *regs;
18450 childregs->ax = 0;
18451 childregs->sp = sp;
18452
18453 p->thread.sp = (unsigned long) childregs;
18454 p->thread.sp0 = (unsigned long) (childregs+1);
18455 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18456
18457 p->thread.ip = (unsigned long) ret_from_fork;
18458
18459 @@ -299,7 +300,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18460 struct thread_struct *prev = &prev_p->thread,
18461 *next = &next_p->thread;
18462 int cpu = smp_processor_id();
18463 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
18464 + struct tss_struct *tss = init_tss + cpu;
18465 fpu_switch_t fpu;
18466
18467 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
18468 @@ -323,6 +324,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18469 */
18470 lazy_save_gs(prev->gs);
18471
18472 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18473 + __set_fs(task_thread_info(next_p)->addr_limit);
18474 +#endif
18475 +
18476 /*
18477 * Load the per-thread Thread-Local Storage descriptor.
18478 */
18479 @@ -353,6 +358,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18480 */
18481 arch_end_context_switch(next_p);
18482
18483 + percpu_write(current_task, next_p);
18484 + percpu_write(current_tinfo, &next_p->tinfo);
18485 +
18486 /*
18487 * Restore %gs if needed (which is common)
18488 */
18489 @@ -361,8 +369,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18490
18491 switch_fpu_finish(next_p, fpu);
18492
18493 - percpu_write(current_task, next_p);
18494 -
18495 return prev_p;
18496 }
18497
18498 @@ -392,4 +398,3 @@ unsigned long get_wchan(struct task_struct *p)
18499 } while (count++ < 16);
18500 return 0;
18501 }
18502 -
18503 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
18504 index cfa5c90..4facd28 100644
18505 --- a/arch/x86/kernel/process_64.c
18506 +++ b/arch/x86/kernel/process_64.c
18507 @@ -89,7 +89,7 @@ static void __exit_idle(void)
18508 void exit_idle(void)
18509 {
18510 /* idle loop has pid 0 */
18511 - if (current->pid)
18512 + if (task_pid_nr(current))
18513 return;
18514 __exit_idle();
18515 }
18516 @@ -270,8 +270,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18517 struct pt_regs *childregs;
18518 struct task_struct *me = current;
18519
18520 - childregs = ((struct pt_regs *)
18521 - (THREAD_SIZE + task_stack_page(p))) - 1;
18522 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
18523 *childregs = *regs;
18524
18525 childregs->ax = 0;
18526 @@ -283,6 +282,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18527 p->thread.sp = (unsigned long) childregs;
18528 p->thread.sp0 = (unsigned long) (childregs+1);
18529 p->thread.usersp = me->thread.usersp;
18530 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18531
18532 set_tsk_thread_flag(p, TIF_FORK);
18533
18534 @@ -385,7 +385,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18535 struct thread_struct *prev = &prev_p->thread;
18536 struct thread_struct *next = &next_p->thread;
18537 int cpu = smp_processor_id();
18538 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
18539 + struct tss_struct *tss = init_tss + cpu;
18540 unsigned fsindex, gsindex;
18541 fpu_switch_t fpu;
18542
18543 @@ -467,10 +467,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18544 prev->usersp = percpu_read(old_rsp);
18545 percpu_write(old_rsp, next->usersp);
18546 percpu_write(current_task, next_p);
18547 + percpu_write(current_tinfo, &next_p->tinfo);
18548
18549 - percpu_write(kernel_stack,
18550 - (unsigned long)task_stack_page(next_p) +
18551 - THREAD_SIZE - KERNEL_STACK_OFFSET);
18552 + percpu_write(kernel_stack, next->sp0);
18553
18554 /*
18555 * Now maybe reload the debug registers and handle I/O bitmaps
18556 @@ -525,12 +524,11 @@ unsigned long get_wchan(struct task_struct *p)
18557 if (!p || p == current || p->state == TASK_RUNNING)
18558 return 0;
18559 stack = (unsigned long)task_stack_page(p);
18560 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
18561 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
18562 return 0;
18563 fp = *(u64 *)(p->thread.sp);
18564 do {
18565 - if (fp < (unsigned long)stack ||
18566 - fp >= (unsigned long)stack+THREAD_SIZE)
18567 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
18568 return 0;
18569 ip = *(u64 *)(fp+8);
18570 if (!in_sched_functions(ip))
18571 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
18572 index 5026738..574f70a 100644
18573 --- a/arch/x86/kernel/ptrace.c
18574 +++ b/arch/x86/kernel/ptrace.c
18575 @@ -792,6 +792,10 @@ static int ioperm_active(struct task_struct *target,
18576 static int ioperm_get(struct task_struct *target,
18577 const struct user_regset *regset,
18578 unsigned int pos, unsigned int count,
18579 + void *kbuf, void __user *ubuf) __size_overflow(3,4);
18580 +static int ioperm_get(struct task_struct *target,
18581 + const struct user_regset *regset,
18582 + unsigned int pos, unsigned int count,
18583 void *kbuf, void __user *ubuf)
18584 {
18585 if (!target->thread.io_bitmap_ptr)
18586 @@ -823,7 +827,7 @@ long arch_ptrace(struct task_struct *child, long request,
18587 unsigned long addr, unsigned long data)
18588 {
18589 int ret;
18590 - unsigned long __user *datap = (unsigned long __user *)data;
18591 + unsigned long __user *datap = (__force unsigned long __user *)data;
18592
18593 switch (request) {
18594 /* read the word at location addr in the USER area. */
18595 @@ -908,14 +912,14 @@ long arch_ptrace(struct task_struct *child, long request,
18596 if ((int) addr < 0)
18597 return -EIO;
18598 ret = do_get_thread_area(child, addr,
18599 - (struct user_desc __user *)data);
18600 + (__force struct user_desc __user *) data);
18601 break;
18602
18603 case PTRACE_SET_THREAD_AREA:
18604 if ((int) addr < 0)
18605 return -EIO;
18606 ret = do_set_thread_area(child, addr,
18607 - (struct user_desc __user *)data, 0);
18608 + (__force struct user_desc __user *) data, 0);
18609 break;
18610 #endif
18611
18612 @@ -1332,7 +1336,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
18613 memset(info, 0, sizeof(*info));
18614 info->si_signo = SIGTRAP;
18615 info->si_code = si_code;
18616 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
18617 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
18618 }
18619
18620 void user_single_step_siginfo(struct task_struct *tsk,
18621 @@ -1361,6 +1365,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
18622 # define IS_IA32 0
18623 #endif
18624
18625 +#ifdef CONFIG_GRKERNSEC_SETXID
18626 +extern void gr_delayed_cred_worker(void);
18627 +#endif
18628 +
18629 /*
18630 * We must return the syscall number to actually look up in the table.
18631 * This can be -1L to skip running any syscall at all.
18632 @@ -1369,6 +1377,11 @@ long syscall_trace_enter(struct pt_regs *regs)
18633 {
18634 long ret = 0;
18635
18636 +#ifdef CONFIG_GRKERNSEC_SETXID
18637 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
18638 + gr_delayed_cred_worker();
18639 +#endif
18640 +
18641 /*
18642 * If we stepped into a sysenter/syscall insn, it trapped in
18643 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
18644 @@ -1412,6 +1425,11 @@ void syscall_trace_leave(struct pt_regs *regs)
18645 {
18646 bool step;
18647
18648 +#ifdef CONFIG_GRKERNSEC_SETXID
18649 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
18650 + gr_delayed_cred_worker();
18651 +#endif
18652 +
18653 audit_syscall_exit(regs);
18654
18655 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
18656 diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
18657 index 42eb330..139955c 100644
18658 --- a/arch/x86/kernel/pvclock.c
18659 +++ b/arch/x86/kernel/pvclock.c
18660 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
18661 return pv_tsc_khz;
18662 }
18663
18664 -static atomic64_t last_value = ATOMIC64_INIT(0);
18665 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
18666
18667 void pvclock_resume(void)
18668 {
18669 - atomic64_set(&last_value, 0);
18670 + atomic64_set_unchecked(&last_value, 0);
18671 }
18672
18673 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18674 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18675 * updating at the same time, and one of them could be slightly behind,
18676 * making the assumption that last_value always go forward fail to hold.
18677 */
18678 - last = atomic64_read(&last_value);
18679 + last = atomic64_read_unchecked(&last_value);
18680 do {
18681 if (ret < last)
18682 return last;
18683 - last = atomic64_cmpxchg(&last_value, last, ret);
18684 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
18685 } while (unlikely(last != ret));
18686
18687 return ret;
18688 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
18689 index d840e69..98e9581 100644
18690 --- a/arch/x86/kernel/reboot.c
18691 +++ b/arch/x86/kernel/reboot.c
18692 @@ -35,7 +35,7 @@ void (*pm_power_off)(void);
18693 EXPORT_SYMBOL(pm_power_off);
18694
18695 static const struct desc_ptr no_idt = {};
18696 -static int reboot_mode;
18697 +static unsigned short reboot_mode;
18698 enum reboot_type reboot_type = BOOT_ACPI;
18699 int reboot_force;
18700
18701 @@ -335,13 +335,17 @@ core_initcall(reboot_init);
18702 extern const unsigned char machine_real_restart_asm[];
18703 extern const u64 machine_real_restart_gdt[3];
18704
18705 -void machine_real_restart(unsigned int type)
18706 +__noreturn void machine_real_restart(unsigned int type)
18707 {
18708 void *restart_va;
18709 unsigned long restart_pa;
18710 - void (*restart_lowmem)(unsigned int);
18711 + void (* __noreturn restart_lowmem)(unsigned int);
18712 u64 *lowmem_gdt;
18713
18714 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18715 + struct desc_struct *gdt;
18716 +#endif
18717 +
18718 local_irq_disable();
18719
18720 /* Write zero to CMOS register number 0x0f, which the BIOS POST
18721 @@ -367,14 +371,14 @@ void machine_real_restart(unsigned int type)
18722 boot)". This seems like a fairly standard thing that gets set by
18723 REBOOT.COM programs, and the previous reset routine did this
18724 too. */
18725 - *((unsigned short *)0x472) = reboot_mode;
18726 + *(unsigned short *)(__va(0x472)) = reboot_mode;
18727
18728 /* Patch the GDT in the low memory trampoline */
18729 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
18730
18731 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
18732 restart_pa = virt_to_phys(restart_va);
18733 - restart_lowmem = (void (*)(unsigned int))restart_pa;
18734 + restart_lowmem = (void *)restart_pa;
18735
18736 /* GDT[0]: GDT self-pointer */
18737 lowmem_gdt[0] =
18738 @@ -385,7 +389,33 @@ void machine_real_restart(unsigned int type)
18739 GDT_ENTRY(0x009b, restart_pa, 0xffff);
18740
18741 /* Jump to the identity-mapped low memory code */
18742 +
18743 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18744 + gdt = get_cpu_gdt_table(smp_processor_id());
18745 + pax_open_kernel();
18746 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18747 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
18748 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
18749 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
18750 +#endif
18751 +#ifdef CONFIG_PAX_KERNEXEC
18752 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
18753 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
18754 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
18755 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
18756 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
18757 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
18758 +#endif
18759 + pax_close_kernel();
18760 +#endif
18761 +
18762 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18763 + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
18764 + unreachable();
18765 +#else
18766 restart_lowmem(type);
18767 +#endif
18768 +
18769 }
18770 #ifdef CONFIG_APM_MODULE
18771 EXPORT_SYMBOL(machine_real_restart);
18772 @@ -556,7 +586,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
18773 * try to force a triple fault and then cycle between hitting the keyboard
18774 * controller and doing that
18775 */
18776 -static void native_machine_emergency_restart(void)
18777 +__noreturn static void native_machine_emergency_restart(void)
18778 {
18779 int i;
18780 int attempt = 0;
18781 @@ -680,13 +710,13 @@ void native_machine_shutdown(void)
18782 #endif
18783 }
18784
18785 -static void __machine_emergency_restart(int emergency)
18786 +static __noreturn void __machine_emergency_restart(int emergency)
18787 {
18788 reboot_emergency = emergency;
18789 machine_ops.emergency_restart();
18790 }
18791
18792 -static void native_machine_restart(char *__unused)
18793 +static __noreturn void native_machine_restart(char *__unused)
18794 {
18795 printk("machine restart\n");
18796
18797 @@ -695,7 +725,7 @@ static void native_machine_restart(char *__unused)
18798 __machine_emergency_restart(0);
18799 }
18800
18801 -static void native_machine_halt(void)
18802 +static __noreturn void native_machine_halt(void)
18803 {
18804 /* stop other cpus and apics */
18805 machine_shutdown();
18806 @@ -706,7 +736,7 @@ static void native_machine_halt(void)
18807 stop_this_cpu(NULL);
18808 }
18809
18810 -static void native_machine_power_off(void)
18811 +__noreturn static void native_machine_power_off(void)
18812 {
18813 if (pm_power_off) {
18814 if (!reboot_force)
18815 @@ -715,6 +745,7 @@ static void native_machine_power_off(void)
18816 }
18817 /* a fallback in case there is no PM info available */
18818 tboot_shutdown(TB_SHUTDOWN_HALT);
18819 + unreachable();
18820 }
18821
18822 struct machine_ops machine_ops = {
18823 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
18824 index 7a6f3b3..bed145d7 100644
18825 --- a/arch/x86/kernel/relocate_kernel_64.S
18826 +++ b/arch/x86/kernel/relocate_kernel_64.S
18827 @@ -11,6 +11,7 @@
18828 #include <asm/kexec.h>
18829 #include <asm/processor-flags.h>
18830 #include <asm/pgtable_types.h>
18831 +#include <asm/alternative-asm.h>
18832
18833 /*
18834 * Must be relocatable PIC code callable as a C function
18835 @@ -160,13 +161,14 @@ identity_mapped:
18836 xorq %rbp, %rbp
18837 xorq %r8, %r8
18838 xorq %r9, %r9
18839 - xorq %r10, %r9
18840 + xorq %r10, %r10
18841 xorq %r11, %r11
18842 xorq %r12, %r12
18843 xorq %r13, %r13
18844 xorq %r14, %r14
18845 xorq %r15, %r15
18846
18847 + pax_force_retaddr 0, 1
18848 ret
18849
18850 1:
18851 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
18852 index d7d5099..28555d0 100644
18853 --- a/arch/x86/kernel/setup.c
18854 +++ b/arch/x86/kernel/setup.c
18855 @@ -448,7 +448,7 @@ static void __init parse_setup_data(void)
18856
18857 switch (data->type) {
18858 case SETUP_E820_EXT:
18859 - parse_e820_ext(data);
18860 + parse_e820_ext((struct setup_data __force_kernel *)data);
18861 break;
18862 case SETUP_DTB:
18863 add_dtb(pa_data);
18864 @@ -649,7 +649,7 @@ static void __init trim_bios_range(void)
18865 * area (640->1Mb) as ram even though it is not.
18866 * take them out.
18867 */
18868 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
18869 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
18870 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
18871 }
18872
18873 @@ -767,14 +767,14 @@ void __init setup_arch(char **cmdline_p)
18874
18875 if (!boot_params.hdr.root_flags)
18876 root_mountflags &= ~MS_RDONLY;
18877 - init_mm.start_code = (unsigned long) _text;
18878 - init_mm.end_code = (unsigned long) _etext;
18879 + init_mm.start_code = ktla_ktva((unsigned long) _text);
18880 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
18881 init_mm.end_data = (unsigned long) _edata;
18882 init_mm.brk = _brk_end;
18883
18884 - code_resource.start = virt_to_phys(_text);
18885 - code_resource.end = virt_to_phys(_etext)-1;
18886 - data_resource.start = virt_to_phys(_etext);
18887 + code_resource.start = virt_to_phys(ktla_ktva(_text));
18888 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
18889 + data_resource.start = virt_to_phys(_sdata);
18890 data_resource.end = virt_to_phys(_edata)-1;
18891 bss_resource.start = virt_to_phys(&__bss_start);
18892 bss_resource.end = virt_to_phys(&__bss_stop)-1;
18893 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
18894 index 5a98aa2..848d2be 100644
18895 --- a/arch/x86/kernel/setup_percpu.c
18896 +++ b/arch/x86/kernel/setup_percpu.c
18897 @@ -21,19 +21,17 @@
18898 #include <asm/cpu.h>
18899 #include <asm/stackprotector.h>
18900
18901 -DEFINE_PER_CPU(int, cpu_number);
18902 +#ifdef CONFIG_SMP
18903 +DEFINE_PER_CPU(unsigned int, cpu_number);
18904 EXPORT_PER_CPU_SYMBOL(cpu_number);
18905 +#endif
18906
18907 -#ifdef CONFIG_X86_64
18908 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
18909 -#else
18910 -#define BOOT_PERCPU_OFFSET 0
18911 -#endif
18912
18913 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
18914 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
18915
18916 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
18917 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
18918 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
18919 };
18920 EXPORT_SYMBOL(__per_cpu_offset);
18921 @@ -96,6 +94,8 @@ static bool __init pcpu_need_numa(void)
18922 * Pointer to the allocated area on success, NULL on failure.
18923 */
18924 static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
18925 + unsigned long align) __size_overflow(2);
18926 +static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
18927 unsigned long align)
18928 {
18929 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
18930 @@ -124,6 +124,8 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
18931 /*
18932 * Helpers for first chunk memory allocation
18933 */
18934 +static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) __size_overflow(2);
18935 +
18936 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
18937 {
18938 return pcpu_alloc_bootmem(cpu, size, align);
18939 @@ -155,10 +157,10 @@ static inline void setup_percpu_segment(int cpu)
18940 {
18941 #ifdef CONFIG_X86_32
18942 struct desc_struct gdt;
18943 + unsigned long base = per_cpu_offset(cpu);
18944
18945 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
18946 - 0x2 | DESCTYPE_S, 0x8);
18947 - gdt.s = 1;
18948 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
18949 + 0x83 | DESCTYPE_S, 0xC);
18950 write_gdt_entry(get_cpu_gdt_table(cpu),
18951 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
18952 #endif
18953 @@ -219,6 +221,11 @@ void __init setup_per_cpu_areas(void)
18954 /* alrighty, percpu areas up and running */
18955 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
18956 for_each_possible_cpu(cpu) {
18957 +#ifdef CONFIG_CC_STACKPROTECTOR
18958 +#ifdef CONFIG_X86_32
18959 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
18960 +#endif
18961 +#endif
18962 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
18963 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
18964 per_cpu(cpu_number, cpu) = cpu;
18965 @@ -259,6 +266,12 @@ void __init setup_per_cpu_areas(void)
18966 */
18967 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
18968 #endif
18969 +#ifdef CONFIG_CC_STACKPROTECTOR
18970 +#ifdef CONFIG_X86_32
18971 + if (!cpu)
18972 + per_cpu(stack_canary.canary, cpu) = canary;
18973 +#endif
18974 +#endif
18975 /*
18976 * Up to this point, the boot CPU has been using .init.data
18977 * area. Reload any changed state for the boot CPU.
18978 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
18979 index 46a01bd..2e88e6d 100644
18980 --- a/arch/x86/kernel/signal.c
18981 +++ b/arch/x86/kernel/signal.c
18982 @@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsigned long sp)
18983 * Align the stack pointer according to the i386 ABI,
18984 * i.e. so that on function entry ((sp + 4) & 15) == 0.
18985 */
18986 - sp = ((sp + 4) & -16ul) - 4;
18987 + sp = ((sp - 12) & -16ul) - 4;
18988 #else /* !CONFIG_X86_32 */
18989 sp = round_down(sp, 16) - 8;
18990 #endif
18991 @@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
18992 * Return an always-bogus address instead so we will die with SIGSEGV.
18993 */
18994 if (onsigstack && !likely(on_sig_stack(sp)))
18995 - return (void __user *)-1L;
18996 + return (__force void __user *)-1L;
18997
18998 /* save i387 state */
18999 if (used_math() && save_i387_xstate(*fpstate) < 0)
19000 - return (void __user *)-1L;
19001 + return (__force void __user *)-1L;
19002
19003 return (void __user *)sp;
19004 }
19005 @@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19006 }
19007
19008 if (current->mm->context.vdso)
19009 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19010 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19011 else
19012 - restorer = &frame->retcode;
19013 + restorer = (void __user *)&frame->retcode;
19014 if (ka->sa.sa_flags & SA_RESTORER)
19015 restorer = ka->sa.sa_restorer;
19016
19017 @@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19018 * reasons and because gdb uses it as a signature to notice
19019 * signal handler stack frames.
19020 */
19021 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
19022 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
19023
19024 if (err)
19025 return -EFAULT;
19026 @@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19027 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
19028
19029 /* Set up to return from userspace. */
19030 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19031 + if (current->mm->context.vdso)
19032 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19033 + else
19034 + restorer = (void __user *)&frame->retcode;
19035 if (ka->sa.sa_flags & SA_RESTORER)
19036 restorer = ka->sa.sa_restorer;
19037 put_user_ex(restorer, &frame->pretcode);
19038 @@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19039 * reasons and because gdb uses it as a signature to notice
19040 * signal handler stack frames.
19041 */
19042 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
19043 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
19044 } put_user_catch(err);
19045
19046 if (err)
19047 @@ -765,7 +768,7 @@ static void do_signal(struct pt_regs *regs)
19048 * X86_32: vm86 regs switched out by assembly code before reaching
19049 * here, so testing against kernel CS suffices.
19050 */
19051 - if (!user_mode(regs))
19052 + if (!user_mode_novm(regs))
19053 return;
19054
19055 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
19056 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
19057 index 66d250c..f1b10bd 100644
19058 --- a/arch/x86/kernel/smpboot.c
19059 +++ b/arch/x86/kernel/smpboot.c
19060 @@ -715,17 +715,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
19061 set_idle_for_cpu(cpu, c_idle.idle);
19062 do_rest:
19063 per_cpu(current_task, cpu) = c_idle.idle;
19064 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
19065 #ifdef CONFIG_X86_32
19066 /* Stack for startup_32 can be just as for start_secondary onwards */
19067 irq_ctx_init(cpu);
19068 #else
19069 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
19070 initial_gs = per_cpu_offset(cpu);
19071 - per_cpu(kernel_stack, cpu) =
19072 - (unsigned long)task_stack_page(c_idle.idle) -
19073 - KERNEL_STACK_OFFSET + THREAD_SIZE;
19074 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
19075 #endif
19076 +
19077 + pax_open_kernel();
19078 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
19079 + pax_close_kernel();
19080 +
19081 initial_code = (unsigned long)start_secondary;
19082 stack_start = c_idle.idle->thread.sp;
19083
19084 @@ -868,6 +871,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
19085
19086 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
19087
19088 +#ifdef CONFIG_PAX_PER_CPU_PGD
19089 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
19090 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19091 + KERNEL_PGD_PTRS);
19092 +#endif
19093 +
19094 err = do_boot_cpu(apicid, cpu);
19095 if (err) {
19096 pr_debug("do_boot_cpu failed %d\n", err);
19097 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
19098 index c346d11..d43b163 100644
19099 --- a/arch/x86/kernel/step.c
19100 +++ b/arch/x86/kernel/step.c
19101 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19102 struct desc_struct *desc;
19103 unsigned long base;
19104
19105 - seg &= ~7UL;
19106 + seg >>= 3;
19107
19108 mutex_lock(&child->mm->context.lock);
19109 - if (unlikely((seg >> 3) >= child->mm->context.size))
19110 + if (unlikely(seg >= child->mm->context.size))
19111 addr = -1L; /* bogus selector, access would fault */
19112 else {
19113 desc = child->mm->context.ldt + seg;
19114 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19115 addr += base;
19116 }
19117 mutex_unlock(&child->mm->context.lock);
19118 - }
19119 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
19120 + addr = ktla_ktva(addr);
19121
19122 return addr;
19123 }
19124 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19125 unsigned char opcode[15];
19126 unsigned long addr = convert_ip_to_linear(child, regs);
19127
19128 + if (addr == -EINVAL)
19129 + return 0;
19130 +
19131 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
19132 for (i = 0; i < copied; i++) {
19133 switch (opcode[i]) {
19134 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
19135 index 0b0cb5f..db6b9ed 100644
19136 --- a/arch/x86/kernel/sys_i386_32.c
19137 +++ b/arch/x86/kernel/sys_i386_32.c
19138 @@ -24,17 +24,224 @@
19139
19140 #include <asm/syscalls.h>
19141
19142 -/*
19143 - * Do a system call from kernel instead of calling sys_execve so we
19144 - * end up with proper pt_regs.
19145 - */
19146 -int kernel_execve(const char *filename,
19147 - const char *const argv[],
19148 - const char *const envp[])
19149 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
19150 {
19151 - long __res;
19152 - asm volatile ("int $0x80"
19153 - : "=a" (__res)
19154 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
19155 - return __res;
19156 + unsigned long pax_task_size = TASK_SIZE;
19157 +
19158 +#ifdef CONFIG_PAX_SEGMEXEC
19159 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
19160 + pax_task_size = SEGMEXEC_TASK_SIZE;
19161 +#endif
19162 +
19163 + if (len > pax_task_size || addr > pax_task_size - len)
19164 + return -EINVAL;
19165 +
19166 + return 0;
19167 +}
19168 +
19169 +unsigned long
19170 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
19171 + unsigned long len, unsigned long pgoff, unsigned long flags)
19172 +{
19173 + struct mm_struct *mm = current->mm;
19174 + struct vm_area_struct *vma;
19175 + unsigned long start_addr, pax_task_size = TASK_SIZE;
19176 +
19177 +#ifdef CONFIG_PAX_SEGMEXEC
19178 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19179 + pax_task_size = SEGMEXEC_TASK_SIZE;
19180 +#endif
19181 +
19182 + pax_task_size -= PAGE_SIZE;
19183 +
19184 + if (len > pax_task_size)
19185 + return -ENOMEM;
19186 +
19187 + if (flags & MAP_FIXED)
19188 + return addr;
19189 +
19190 +#ifdef CONFIG_PAX_RANDMMAP
19191 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19192 +#endif
19193 +
19194 + if (addr) {
19195 + addr = PAGE_ALIGN(addr);
19196 + if (pax_task_size - len >= addr) {
19197 + vma = find_vma(mm, addr);
19198 + if (check_heap_stack_gap(vma, addr, len))
19199 + return addr;
19200 + }
19201 + }
19202 + if (len > mm->cached_hole_size) {
19203 + start_addr = addr = mm->free_area_cache;
19204 + } else {
19205 + start_addr = addr = mm->mmap_base;
19206 + mm->cached_hole_size = 0;
19207 + }
19208 +
19209 +#ifdef CONFIG_PAX_PAGEEXEC
19210 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
19211 + start_addr = 0x00110000UL;
19212 +
19213 +#ifdef CONFIG_PAX_RANDMMAP
19214 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19215 + start_addr += mm->delta_mmap & 0x03FFF000UL;
19216 +#endif
19217 +
19218 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
19219 + start_addr = addr = mm->mmap_base;
19220 + else
19221 + addr = start_addr;
19222 + }
19223 +#endif
19224 +
19225 +full_search:
19226 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19227 + /* At this point: (!vma || addr < vma->vm_end). */
19228 + if (pax_task_size - len < addr) {
19229 + /*
19230 + * Start a new search - just in case we missed
19231 + * some holes.
19232 + */
19233 + if (start_addr != mm->mmap_base) {
19234 + start_addr = addr = mm->mmap_base;
19235 + mm->cached_hole_size = 0;
19236 + goto full_search;
19237 + }
19238 + return -ENOMEM;
19239 + }
19240 + if (check_heap_stack_gap(vma, addr, len))
19241 + break;
19242 + if (addr + mm->cached_hole_size < vma->vm_start)
19243 + mm->cached_hole_size = vma->vm_start - addr;
19244 + addr = vma->vm_end;
19245 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
19246 + start_addr = addr = mm->mmap_base;
19247 + mm->cached_hole_size = 0;
19248 + goto full_search;
19249 + }
19250 + }
19251 +
19252 + /*
19253 + * Remember the place where we stopped the search:
19254 + */
19255 + mm->free_area_cache = addr + len;
19256 + return addr;
19257 +}
19258 +
19259 +unsigned long
19260 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19261 + const unsigned long len, const unsigned long pgoff,
19262 + const unsigned long flags)
19263 +{
19264 + struct vm_area_struct *vma;
19265 + struct mm_struct *mm = current->mm;
19266 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
19267 +
19268 +#ifdef CONFIG_PAX_SEGMEXEC
19269 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19270 + pax_task_size = SEGMEXEC_TASK_SIZE;
19271 +#endif
19272 +
19273 + pax_task_size -= PAGE_SIZE;
19274 +
19275 + /* requested length too big for entire address space */
19276 + if (len > pax_task_size)
19277 + return -ENOMEM;
19278 +
19279 + if (flags & MAP_FIXED)
19280 + return addr;
19281 +
19282 +#ifdef CONFIG_PAX_PAGEEXEC
19283 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
19284 + goto bottomup;
19285 +#endif
19286 +
19287 +#ifdef CONFIG_PAX_RANDMMAP
19288 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19289 +#endif
19290 +
19291 + /* requesting a specific address */
19292 + if (addr) {
19293 + addr = PAGE_ALIGN(addr);
19294 + if (pax_task_size - len >= addr) {
19295 + vma = find_vma(mm, addr);
19296 + if (check_heap_stack_gap(vma, addr, len))
19297 + return addr;
19298 + }
19299 + }
19300 +
19301 + /* check if free_area_cache is useful for us */
19302 + if (len <= mm->cached_hole_size) {
19303 + mm->cached_hole_size = 0;
19304 + mm->free_area_cache = mm->mmap_base;
19305 + }
19306 +
19307 + /* either no address requested or can't fit in requested address hole */
19308 + addr = mm->free_area_cache;
19309 +
19310 + /* make sure it can fit in the remaining address space */
19311 + if (addr > len) {
19312 + vma = find_vma(mm, addr-len);
19313 + if (check_heap_stack_gap(vma, addr - len, len))
19314 + /* remember the address as a hint for next time */
19315 + return (mm->free_area_cache = addr-len);
19316 + }
19317 +
19318 + if (mm->mmap_base < len)
19319 + goto bottomup;
19320 +
19321 + addr = mm->mmap_base-len;
19322 +
19323 + do {
19324 + /*
19325 + * Lookup failure means no vma is above this address,
19326 + * else if new region fits below vma->vm_start,
19327 + * return with success:
19328 + */
19329 + vma = find_vma(mm, addr);
19330 + if (check_heap_stack_gap(vma, addr, len))
19331 + /* remember the address as a hint for next time */
19332 + return (mm->free_area_cache = addr);
19333 +
19334 + /* remember the largest hole we saw so far */
19335 + if (addr + mm->cached_hole_size < vma->vm_start)
19336 + mm->cached_hole_size = vma->vm_start - addr;
19337 +
19338 + /* try just below the current vma->vm_start */
19339 + addr = skip_heap_stack_gap(vma, len);
19340 + } while (!IS_ERR_VALUE(addr));
19341 +
19342 +bottomup:
19343 + /*
19344 + * A failed mmap() very likely causes application failure,
19345 + * so fall back to the bottom-up function here. This scenario
19346 + * can happen with large stack limits and large mmap()
19347 + * allocations.
19348 + */
19349 +
19350 +#ifdef CONFIG_PAX_SEGMEXEC
19351 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19352 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19353 + else
19354 +#endif
19355 +
19356 + mm->mmap_base = TASK_UNMAPPED_BASE;
19357 +
19358 +#ifdef CONFIG_PAX_RANDMMAP
19359 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19360 + mm->mmap_base += mm->delta_mmap;
19361 +#endif
19362 +
19363 + mm->free_area_cache = mm->mmap_base;
19364 + mm->cached_hole_size = ~0UL;
19365 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19366 + /*
19367 + * Restore the topdown base:
19368 + */
19369 + mm->mmap_base = base;
19370 + mm->free_area_cache = base;
19371 + mm->cached_hole_size = ~0UL;
19372 +
19373 + return addr;
19374 }
19375 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
19376 index 0514890..3dbebce 100644
19377 --- a/arch/x86/kernel/sys_x86_64.c
19378 +++ b/arch/x86/kernel/sys_x86_64.c
19379 @@ -95,8 +95,8 @@ out:
19380 return error;
19381 }
19382
19383 -static void find_start_end(unsigned long flags, unsigned long *begin,
19384 - unsigned long *end)
19385 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
19386 + unsigned long *begin, unsigned long *end)
19387 {
19388 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
19389 unsigned long new_begin;
19390 @@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
19391 *begin = new_begin;
19392 }
19393 } else {
19394 - *begin = TASK_UNMAPPED_BASE;
19395 + *begin = mm->mmap_base;
19396 *end = TASK_SIZE;
19397 }
19398 }
19399 @@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
19400 if (flags & MAP_FIXED)
19401 return addr;
19402
19403 - find_start_end(flags, &begin, &end);
19404 + find_start_end(mm, flags, &begin, &end);
19405
19406 if (len > end)
19407 return -ENOMEM;
19408
19409 +#ifdef CONFIG_PAX_RANDMMAP
19410 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19411 +#endif
19412 +
19413 if (addr) {
19414 addr = PAGE_ALIGN(addr);
19415 vma = find_vma(mm, addr);
19416 - if (end - len >= addr &&
19417 - (!vma || addr + len <= vma->vm_start))
19418 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
19419 return addr;
19420 }
19421 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
19422 @@ -172,7 +175,7 @@ full_search:
19423 }
19424 return -ENOMEM;
19425 }
19426 - if (!vma || addr + len <= vma->vm_start) {
19427 + if (check_heap_stack_gap(vma, addr, len)) {
19428 /*
19429 * Remember the place where we stopped the search:
19430 */
19431 @@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19432 {
19433 struct vm_area_struct *vma;
19434 struct mm_struct *mm = current->mm;
19435 - unsigned long addr = addr0;
19436 + unsigned long base = mm->mmap_base, addr = addr0;
19437
19438 /* requested length too big for entire address space */
19439 if (len > TASK_SIZE)
19440 @@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19441 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
19442 goto bottomup;
19443
19444 +#ifdef CONFIG_PAX_RANDMMAP
19445 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19446 +#endif
19447 +
19448 /* requesting a specific address */
19449 if (addr) {
19450 addr = PAGE_ALIGN(addr);
19451 - vma = find_vma(mm, addr);
19452 - if (TASK_SIZE - len >= addr &&
19453 - (!vma || addr + len <= vma->vm_start))
19454 - return addr;
19455 + if (TASK_SIZE - len >= addr) {
19456 + vma = find_vma(mm, addr);
19457 + if (check_heap_stack_gap(vma, addr, len))
19458 + return addr;
19459 + }
19460 }
19461
19462 /* check if free_area_cache is useful for us */
19463 @@ -232,7 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19464 ALIGN_TOPDOWN);
19465
19466 vma = find_vma(mm, tmp_addr);
19467 - if (!vma || tmp_addr + len <= vma->vm_start)
19468 + if (check_heap_stack_gap(vma, tmp_addr, len))
19469 /* remember the address as a hint for next time */
19470 return mm->free_area_cache = tmp_addr;
19471 }
19472 @@ -251,7 +259,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19473 * return with success:
19474 */
19475 vma = find_vma(mm, addr);
19476 - if (!vma || addr+len <= vma->vm_start)
19477 + if (check_heap_stack_gap(vma, addr, len))
19478 /* remember the address as a hint for next time */
19479 return mm->free_area_cache = addr;
19480
19481 @@ -260,8 +268,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19482 mm->cached_hole_size = vma->vm_start - addr;
19483
19484 /* try just below the current vma->vm_start */
19485 - addr = vma->vm_start-len;
19486 - } while (len < vma->vm_start);
19487 + addr = skip_heap_stack_gap(vma, len);
19488 + } while (!IS_ERR_VALUE(addr));
19489
19490 bottomup:
19491 /*
19492 @@ -270,13 +278,21 @@ bottomup:
19493 * can happen with large stack limits and large mmap()
19494 * allocations.
19495 */
19496 + mm->mmap_base = TASK_UNMAPPED_BASE;
19497 +
19498 +#ifdef CONFIG_PAX_RANDMMAP
19499 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19500 + mm->mmap_base += mm->delta_mmap;
19501 +#endif
19502 +
19503 + mm->free_area_cache = mm->mmap_base;
19504 mm->cached_hole_size = ~0UL;
19505 - mm->free_area_cache = TASK_UNMAPPED_BASE;
19506 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19507 /*
19508 * Restore the topdown base:
19509 */
19510 - mm->free_area_cache = mm->mmap_base;
19511 + mm->mmap_base = base;
19512 + mm->free_area_cache = base;
19513 mm->cached_hole_size = ~0UL;
19514
19515 return addr;
19516 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
19517 index e2410e2..4fe3fbc 100644
19518 --- a/arch/x86/kernel/tboot.c
19519 +++ b/arch/x86/kernel/tboot.c
19520 @@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
19521
19522 void tboot_shutdown(u32 shutdown_type)
19523 {
19524 - void (*shutdown)(void);
19525 + void (* __noreturn shutdown)(void);
19526
19527 if (!tboot_enabled())
19528 return;
19529 @@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
19530
19531 switch_to_tboot_pt();
19532
19533 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
19534 + shutdown = (void *)tboot->shutdown_entry;
19535 shutdown();
19536
19537 /* should not reach here */
19538 @@ -298,7 +298,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
19539 tboot_shutdown(acpi_shutdown_map[sleep_state]);
19540 }
19541
19542 -static atomic_t ap_wfs_count;
19543 +static atomic_unchecked_t ap_wfs_count;
19544
19545 static int tboot_wait_for_aps(int num_aps)
19546 {
19547 @@ -322,9 +322,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
19548 {
19549 switch (action) {
19550 case CPU_DYING:
19551 - atomic_inc(&ap_wfs_count);
19552 + atomic_inc_unchecked(&ap_wfs_count);
19553 if (num_online_cpus() == 1)
19554 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
19555 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
19556 return NOTIFY_BAD;
19557 break;
19558 }
19559 @@ -343,7 +343,7 @@ static __init int tboot_late_init(void)
19560
19561 tboot_create_trampoline();
19562
19563 - atomic_set(&ap_wfs_count, 0);
19564 + atomic_set_unchecked(&ap_wfs_count, 0);
19565 register_hotcpu_notifier(&tboot_cpu_notifier);
19566 return 0;
19567 }
19568 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
19569 index dd5fbf4..b7f2232 100644
19570 --- a/arch/x86/kernel/time.c
19571 +++ b/arch/x86/kernel/time.c
19572 @@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs)
19573 {
19574 unsigned long pc = instruction_pointer(regs);
19575
19576 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
19577 + if (!user_mode(regs) && in_lock_functions(pc)) {
19578 #ifdef CONFIG_FRAME_POINTER
19579 - return *(unsigned long *)(regs->bp + sizeof(long));
19580 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
19581 #else
19582 unsigned long *sp =
19583 (unsigned long *)kernel_stack_pointer(regs);
19584 @@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs)
19585 * or above a saved flags. Eflags has bits 22-31 zero,
19586 * kernel addresses don't.
19587 */
19588 +
19589 +#ifdef CONFIG_PAX_KERNEXEC
19590 + return ktla_ktva(sp[0]);
19591 +#else
19592 if (sp[0] >> 22)
19593 return sp[0];
19594 if (sp[1] >> 22)
19595 return sp[1];
19596 #endif
19597 +
19598 +#endif
19599 }
19600 return pc;
19601 }
19602 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
19603 index bcfec2d..8f88b4a 100644
19604 --- a/arch/x86/kernel/tls.c
19605 +++ b/arch/x86/kernel/tls.c
19606 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
19607 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
19608 return -EINVAL;
19609
19610 +#ifdef CONFIG_PAX_SEGMEXEC
19611 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
19612 + return -EINVAL;
19613 +#endif
19614 +
19615 set_tls_desc(p, idx, &info, 1);
19616
19617 return 0;
19618 diff --git a/arch/x86/kernel/tls.h b/arch/x86/kernel/tls.h
19619 index 2f083a2..7d3fecc 100644
19620 --- a/arch/x86/kernel/tls.h
19621 +++ b/arch/x86/kernel/tls.h
19622 @@ -16,6 +16,6 @@
19623
19624 extern user_regset_active_fn regset_tls_active;
19625 extern user_regset_get_fn regset_tls_get;
19626 -extern user_regset_set_fn regset_tls_set;
19627 +extern user_regset_set_fn regset_tls_set __size_overflow(4);
19628
19629 #endif /* _ARCH_X86_KERNEL_TLS_H */
19630 diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
19631 index 451c0a7..e57f551 100644
19632 --- a/arch/x86/kernel/trampoline_32.S
19633 +++ b/arch/x86/kernel/trampoline_32.S
19634 @@ -32,6 +32,12 @@
19635 #include <asm/segment.h>
19636 #include <asm/page_types.h>
19637
19638 +#ifdef CONFIG_PAX_KERNEXEC
19639 +#define ta(X) (X)
19640 +#else
19641 +#define ta(X) ((X) - __PAGE_OFFSET)
19642 +#endif
19643 +
19644 #ifdef CONFIG_SMP
19645
19646 .section ".x86_trampoline","a"
19647 @@ -62,7 +68,7 @@ r_base = .
19648 inc %ax # protected mode (PE) bit
19649 lmsw %ax # into protected mode
19650 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
19651 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
19652 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
19653
19654 # These need to be in the same 64K segment as the above;
19655 # hence we don't use the boot_gdt_descr defined in head.S
19656 diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
19657 index 09ff517..df19fbff 100644
19658 --- a/arch/x86/kernel/trampoline_64.S
19659 +++ b/arch/x86/kernel/trampoline_64.S
19660 @@ -90,7 +90,7 @@ startup_32:
19661 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
19662 movl %eax, %ds
19663
19664 - movl $X86_CR4_PAE, %eax
19665 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
19666 movl %eax, %cr4 # Enable PAE mode
19667
19668 # Setup trampoline 4 level pagetables
19669 @@ -138,7 +138,7 @@ tidt:
19670 # so the kernel can live anywhere
19671 .balign 4
19672 tgdt:
19673 - .short tgdt_end - tgdt # gdt limit
19674 + .short tgdt_end - tgdt - 1 # gdt limit
19675 .long tgdt - r_base
19676 .short 0
19677 .quad 0x00cf9b000000ffff # __KERNEL32_CS
19678 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
19679 index 4bbe04d..41d0943 100644
19680 --- a/arch/x86/kernel/traps.c
19681 +++ b/arch/x86/kernel/traps.c
19682 @@ -70,12 +70,6 @@ asmlinkage int system_call(void);
19683
19684 /* Do we ignore FPU interrupts ? */
19685 char ignore_fpu_irq;
19686 -
19687 -/*
19688 - * The IDT has to be page-aligned to simplify the Pentium
19689 - * F0 0F bug workaround.
19690 - */
19691 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
19692 #endif
19693
19694 DECLARE_BITMAP(used_vectors, NR_VECTORS);
19695 @@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
19696 }
19697
19698 static void __kprobes
19699 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19700 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
19701 long error_code, siginfo_t *info)
19702 {
19703 struct task_struct *tsk = current;
19704
19705 #ifdef CONFIG_X86_32
19706 - if (regs->flags & X86_VM_MASK) {
19707 + if (v8086_mode(regs)) {
19708 /*
19709 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
19710 * On nmi (interrupt 2), do_trap should not be called.
19711 @@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19712 }
19713 #endif
19714
19715 - if (!user_mode(regs))
19716 + if (!user_mode_novm(regs))
19717 goto kernel_trap;
19718
19719 #ifdef CONFIG_X86_32
19720 @@ -148,7 +142,7 @@ trap_signal:
19721 printk_ratelimit()) {
19722 printk(KERN_INFO
19723 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
19724 - tsk->comm, tsk->pid, str,
19725 + tsk->comm, task_pid_nr(tsk), str,
19726 regs->ip, regs->sp, error_code);
19727 print_vma_addr(" in ", regs->ip);
19728 printk("\n");
19729 @@ -165,8 +159,20 @@ kernel_trap:
19730 if (!fixup_exception(regs)) {
19731 tsk->thread.error_code = error_code;
19732 tsk->thread.trap_no = trapnr;
19733 +
19734 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19735 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
19736 + str = "PAX: suspicious stack segment fault";
19737 +#endif
19738 +
19739 die(str, regs, error_code);
19740 }
19741 +
19742 +#ifdef CONFIG_PAX_REFCOUNT
19743 + if (trapnr == 4)
19744 + pax_report_refcount_overflow(regs);
19745 +#endif
19746 +
19747 return;
19748
19749 #ifdef CONFIG_X86_32
19750 @@ -255,14 +261,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
19751 conditional_sti(regs);
19752
19753 #ifdef CONFIG_X86_32
19754 - if (regs->flags & X86_VM_MASK)
19755 + if (v8086_mode(regs))
19756 goto gp_in_vm86;
19757 #endif
19758
19759 tsk = current;
19760 - if (!user_mode(regs))
19761 + if (!user_mode_novm(regs))
19762 goto gp_in_kernel;
19763
19764 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19765 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
19766 + struct mm_struct *mm = tsk->mm;
19767 + unsigned long limit;
19768 +
19769 + down_write(&mm->mmap_sem);
19770 + limit = mm->context.user_cs_limit;
19771 + if (limit < TASK_SIZE) {
19772 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
19773 + up_write(&mm->mmap_sem);
19774 + return;
19775 + }
19776 + up_write(&mm->mmap_sem);
19777 + }
19778 +#endif
19779 +
19780 tsk->thread.error_code = error_code;
19781 tsk->thread.trap_no = 13;
19782
19783 @@ -295,6 +317,13 @@ gp_in_kernel:
19784 if (notify_die(DIE_GPF, "general protection fault", regs,
19785 error_code, 13, SIGSEGV) == NOTIFY_STOP)
19786 return;
19787 +
19788 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19789 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
19790 + die("PAX: suspicious general protection fault", regs, error_code);
19791 + else
19792 +#endif
19793 +
19794 die("general protection fault", regs, error_code);
19795 }
19796
19797 @@ -421,7 +450,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19798 /* It's safe to allow irq's after DR6 has been saved */
19799 preempt_conditional_sti(regs);
19800
19801 - if (regs->flags & X86_VM_MASK) {
19802 + if (v8086_mode(regs)) {
19803 handle_vm86_trap((struct kernel_vm86_regs *) regs,
19804 error_code, 1);
19805 preempt_conditional_cli(regs);
19806 @@ -436,7 +465,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19807 * We already checked v86 mode above, so we can check for kernel mode
19808 * by just checking the CPL of CS.
19809 */
19810 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
19811 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
19812 tsk->thread.debugreg6 &= ~DR_STEP;
19813 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
19814 regs->flags &= ~X86_EFLAGS_TF;
19815 @@ -466,7 +495,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
19816 return;
19817 conditional_sti(regs);
19818
19819 - if (!user_mode_vm(regs))
19820 + if (!user_mode(regs))
19821 {
19822 if (!fixup_exception(regs)) {
19823 task->thread.error_code = error_code;
19824 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
19825 index b9242ba..50c5edd 100644
19826 --- a/arch/x86/kernel/verify_cpu.S
19827 +++ b/arch/x86/kernel/verify_cpu.S
19828 @@ -20,6 +20,7 @@
19829 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
19830 * arch/x86/kernel/trampoline_64.S: secondary processor verification
19831 * arch/x86/kernel/head_32.S: processor startup
19832 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
19833 *
19834 * verify_cpu, returns the status of longmode and SSE in register %eax.
19835 * 0: Success 1: Failure
19836 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
19837 index 328cb37..f37fee1 100644
19838 --- a/arch/x86/kernel/vm86_32.c
19839 +++ b/arch/x86/kernel/vm86_32.c
19840 @@ -41,6 +41,7 @@
19841 #include <linux/ptrace.h>
19842 #include <linux/audit.h>
19843 #include <linux/stddef.h>
19844 +#include <linux/grsecurity.h>
19845
19846 #include <asm/uaccess.h>
19847 #include <asm/io.h>
19848 @@ -109,6 +110,9 @@ static int copy_vm86_regs_to_user(struct vm86_regs __user *user,
19849 /* convert vm86_regs to kernel_vm86_regs */
19850 static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs,
19851 const struct vm86_regs __user *user,
19852 + unsigned extra) __size_overflow(3);
19853 +static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs,
19854 + const struct vm86_regs __user *user,
19855 unsigned extra)
19856 {
19857 int ret = 0;
19858 @@ -148,7 +152,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
19859 do_exit(SIGSEGV);
19860 }
19861
19862 - tss = &per_cpu(init_tss, get_cpu());
19863 + tss = init_tss + get_cpu();
19864 current->thread.sp0 = current->thread.saved_sp0;
19865 current->thread.sysenter_cs = __KERNEL_CS;
19866 load_sp0(tss, &current->thread);
19867 @@ -210,6 +214,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
19868 struct task_struct *tsk;
19869 int tmp, ret = -EPERM;
19870
19871 +#ifdef CONFIG_GRKERNSEC_VM86
19872 + if (!capable(CAP_SYS_RAWIO)) {
19873 + gr_handle_vm86();
19874 + goto out;
19875 + }
19876 +#endif
19877 +
19878 tsk = current;
19879 if (tsk->thread.saved_sp0)
19880 goto out;
19881 @@ -240,6 +251,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
19882 int tmp, ret;
19883 struct vm86plus_struct __user *v86;
19884
19885 +#ifdef CONFIG_GRKERNSEC_VM86
19886 + if (!capable(CAP_SYS_RAWIO)) {
19887 + gr_handle_vm86();
19888 + ret = -EPERM;
19889 + goto out;
19890 + }
19891 +#endif
19892 +
19893 tsk = current;
19894 switch (cmd) {
19895 case VM86_REQUEST_IRQ:
19896 @@ -326,7 +345,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
19897 tsk->thread.saved_fs = info->regs32->fs;
19898 tsk->thread.saved_gs = get_user_gs(info->regs32);
19899
19900 - tss = &per_cpu(init_tss, get_cpu());
19901 + tss = init_tss + get_cpu();
19902 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
19903 if (cpu_has_sep)
19904 tsk->thread.sysenter_cs = 0;
19905 @@ -533,7 +552,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
19906 goto cannot_handle;
19907 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
19908 goto cannot_handle;
19909 - intr_ptr = (unsigned long __user *) (i << 2);
19910 + intr_ptr = (__force unsigned long __user *) (i << 2);
19911 if (get_user(segoffs, intr_ptr))
19912 goto cannot_handle;
19913 if ((segoffs >> 16) == BIOSSEG)
19914 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
19915 index 0f703f1..9e15f64 100644
19916 --- a/arch/x86/kernel/vmlinux.lds.S
19917 +++ b/arch/x86/kernel/vmlinux.lds.S
19918 @@ -26,6 +26,13 @@
19919 #include <asm/page_types.h>
19920 #include <asm/cache.h>
19921 #include <asm/boot.h>
19922 +#include <asm/segment.h>
19923 +
19924 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19925 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
19926 +#else
19927 +#define __KERNEL_TEXT_OFFSET 0
19928 +#endif
19929
19930 #undef i386 /* in case the preprocessor is a 32bit one */
19931
19932 @@ -69,30 +76,43 @@ jiffies_64 = jiffies;
19933
19934 PHDRS {
19935 text PT_LOAD FLAGS(5); /* R_E */
19936 +#ifdef CONFIG_X86_32
19937 + module PT_LOAD FLAGS(5); /* R_E */
19938 +#endif
19939 +#ifdef CONFIG_XEN
19940 + rodata PT_LOAD FLAGS(5); /* R_E */
19941 +#else
19942 + rodata PT_LOAD FLAGS(4); /* R__ */
19943 +#endif
19944 data PT_LOAD FLAGS(6); /* RW_ */
19945 -#ifdef CONFIG_X86_64
19946 + init.begin PT_LOAD FLAGS(6); /* RW_ */
19947 #ifdef CONFIG_SMP
19948 percpu PT_LOAD FLAGS(6); /* RW_ */
19949 #endif
19950 + text.init PT_LOAD FLAGS(5); /* R_E */
19951 + text.exit PT_LOAD FLAGS(5); /* R_E */
19952 init PT_LOAD FLAGS(7); /* RWE */
19953 -#endif
19954 note PT_NOTE FLAGS(0); /* ___ */
19955 }
19956
19957 SECTIONS
19958 {
19959 #ifdef CONFIG_X86_32
19960 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
19961 - phys_startup_32 = startup_32 - LOAD_OFFSET;
19962 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
19963 #else
19964 - . = __START_KERNEL;
19965 - phys_startup_64 = startup_64 - LOAD_OFFSET;
19966 + . = __START_KERNEL;
19967 #endif
19968
19969 /* Text and read-only data */
19970 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
19971 - _text = .;
19972 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
19973 /* bootstrapping code */
19974 +#ifdef CONFIG_X86_32
19975 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
19976 +#else
19977 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
19978 +#endif
19979 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
19980 + _text = .;
19981 HEAD_TEXT
19982 #ifdef CONFIG_X86_32
19983 . = ALIGN(PAGE_SIZE);
19984 @@ -108,13 +128,47 @@ SECTIONS
19985 IRQENTRY_TEXT
19986 *(.fixup)
19987 *(.gnu.warning)
19988 - /* End of text section */
19989 - _etext = .;
19990 } :text = 0x9090
19991
19992 - NOTES :text :note
19993 + . += __KERNEL_TEXT_OFFSET;
19994
19995 - EXCEPTION_TABLE(16) :text = 0x9090
19996 +#ifdef CONFIG_X86_32
19997 + . = ALIGN(PAGE_SIZE);
19998 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
19999 +
20000 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
20001 + MODULES_EXEC_VADDR = .;
20002 + BYTE(0)
20003 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
20004 + . = ALIGN(HPAGE_SIZE);
20005 + MODULES_EXEC_END = . - 1;
20006 +#endif
20007 +
20008 + } :module
20009 +#endif
20010 +
20011 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
20012 + /* End of text section */
20013 + _etext = . - __KERNEL_TEXT_OFFSET;
20014 + }
20015 +
20016 +#ifdef CONFIG_X86_32
20017 + . = ALIGN(PAGE_SIZE);
20018 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
20019 + *(.idt)
20020 + . = ALIGN(PAGE_SIZE);
20021 + *(.empty_zero_page)
20022 + *(.initial_pg_fixmap)
20023 + *(.initial_pg_pmd)
20024 + *(.initial_page_table)
20025 + *(.swapper_pg_dir)
20026 + } :rodata
20027 +#endif
20028 +
20029 + . = ALIGN(PAGE_SIZE);
20030 + NOTES :rodata :note
20031 +
20032 + EXCEPTION_TABLE(16) :rodata
20033
20034 #if defined(CONFIG_DEBUG_RODATA)
20035 /* .text should occupy whole number of pages */
20036 @@ -126,16 +180,20 @@ SECTIONS
20037
20038 /* Data */
20039 .data : AT(ADDR(.data) - LOAD_OFFSET) {
20040 +
20041 +#ifdef CONFIG_PAX_KERNEXEC
20042 + . = ALIGN(HPAGE_SIZE);
20043 +#else
20044 + . = ALIGN(PAGE_SIZE);
20045 +#endif
20046 +
20047 /* Start of data section */
20048 _sdata = .;
20049
20050 /* init_task */
20051 INIT_TASK_DATA(THREAD_SIZE)
20052
20053 -#ifdef CONFIG_X86_32
20054 - /* 32 bit has nosave before _edata */
20055 NOSAVE_DATA
20056 -#endif
20057
20058 PAGE_ALIGNED_DATA(PAGE_SIZE)
20059
20060 @@ -176,12 +234,19 @@ SECTIONS
20061 #endif /* CONFIG_X86_64 */
20062
20063 /* Init code and data - will be freed after init */
20064 - . = ALIGN(PAGE_SIZE);
20065 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
20066 + BYTE(0)
20067 +
20068 +#ifdef CONFIG_PAX_KERNEXEC
20069 + . = ALIGN(HPAGE_SIZE);
20070 +#else
20071 + . = ALIGN(PAGE_SIZE);
20072 +#endif
20073 +
20074 __init_begin = .; /* paired with __init_end */
20075 - }
20076 + } :init.begin
20077
20078 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
20079 +#ifdef CONFIG_SMP
20080 /*
20081 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
20082 * output PHDR, so the next output section - .init.text - should
20083 @@ -190,12 +255,27 @@ SECTIONS
20084 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
20085 #endif
20086
20087 - INIT_TEXT_SECTION(PAGE_SIZE)
20088 -#ifdef CONFIG_X86_64
20089 - :init
20090 -#endif
20091 + . = ALIGN(PAGE_SIZE);
20092 + init_begin = .;
20093 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
20094 + VMLINUX_SYMBOL(_sinittext) = .;
20095 + INIT_TEXT
20096 + VMLINUX_SYMBOL(_einittext) = .;
20097 + . = ALIGN(PAGE_SIZE);
20098 + } :text.init
20099
20100 - INIT_DATA_SECTION(16)
20101 + /*
20102 + * .exit.text is discard at runtime, not link time, to deal with
20103 + * references from .altinstructions and .eh_frame
20104 + */
20105 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20106 + EXIT_TEXT
20107 + . = ALIGN(16);
20108 + } :text.exit
20109 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
20110 +
20111 + . = ALIGN(PAGE_SIZE);
20112 + INIT_DATA_SECTION(16) :init
20113
20114 /*
20115 * Code and data for a variety of lowlevel trampolines, to be
20116 @@ -269,19 +349,12 @@ SECTIONS
20117 }
20118
20119 . = ALIGN(8);
20120 - /*
20121 - * .exit.text is discard at runtime, not link time, to deal with
20122 - * references from .altinstructions and .eh_frame
20123 - */
20124 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
20125 - EXIT_TEXT
20126 - }
20127
20128 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
20129 EXIT_DATA
20130 }
20131
20132 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
20133 +#ifndef CONFIG_SMP
20134 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
20135 #endif
20136
20137 @@ -300,16 +373,10 @@ SECTIONS
20138 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
20139 __smp_locks = .;
20140 *(.smp_locks)
20141 - . = ALIGN(PAGE_SIZE);
20142 __smp_locks_end = .;
20143 + . = ALIGN(PAGE_SIZE);
20144 }
20145
20146 -#ifdef CONFIG_X86_64
20147 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
20148 - NOSAVE_DATA
20149 - }
20150 -#endif
20151 -
20152 /* BSS */
20153 . = ALIGN(PAGE_SIZE);
20154 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
20155 @@ -325,6 +392,7 @@ SECTIONS
20156 __brk_base = .;
20157 . += 64 * 1024; /* 64k alignment slop space */
20158 *(.brk_reservation) /* areas brk users have reserved */
20159 + . = ALIGN(HPAGE_SIZE);
20160 __brk_limit = .;
20161 }
20162
20163 @@ -351,13 +419,12 @@ SECTIONS
20164 * for the boot processor.
20165 */
20166 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
20167 -INIT_PER_CPU(gdt_page);
20168 INIT_PER_CPU(irq_stack_union);
20169
20170 /*
20171 * Build-time check on the image size:
20172 */
20173 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
20174 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
20175 "kernel image bigger than KERNEL_IMAGE_SIZE");
20176
20177 #ifdef CONFIG_SMP
20178 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
20179 index b07ba93..a212969 100644
20180 --- a/arch/x86/kernel/vsyscall_64.c
20181 +++ b/arch/x86/kernel/vsyscall_64.c
20182 @@ -57,15 +57,13 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
20183 .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
20184 };
20185
20186 -static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
20187 +static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
20188
20189 static int __init vsyscall_setup(char *str)
20190 {
20191 if (str) {
20192 if (!strcmp("emulate", str))
20193 vsyscall_mode = EMULATE;
20194 - else if (!strcmp("native", str))
20195 - vsyscall_mode = NATIVE;
20196 else if (!strcmp("none", str))
20197 vsyscall_mode = NONE;
20198 else
20199 @@ -207,7 +205,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
20200
20201 tsk = current;
20202 if (seccomp_mode(&tsk->seccomp))
20203 - do_exit(SIGKILL);
20204 + do_group_exit(SIGKILL);
20205
20206 /*
20207 * With a real vsyscall, page faults cause SIGSEGV. We want to
20208 @@ -279,8 +277,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
20209 return true;
20210
20211 sigsegv:
20212 - force_sig(SIGSEGV, current);
20213 - return true;
20214 + do_group_exit(SIGKILL);
20215 }
20216
20217 /*
20218 @@ -333,10 +330,7 @@ void __init map_vsyscall(void)
20219 extern char __vvar_page;
20220 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
20221
20222 - __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
20223 - vsyscall_mode == NATIVE
20224 - ? PAGE_KERNEL_VSYSCALL
20225 - : PAGE_KERNEL_VVAR);
20226 + __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
20227 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
20228 (unsigned long)VSYSCALL_START);
20229
20230 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
20231 index 9796c2f..f686fbf 100644
20232 --- a/arch/x86/kernel/x8664_ksyms_64.c
20233 +++ b/arch/x86/kernel/x8664_ksyms_64.c
20234 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
20235 EXPORT_SYMBOL(copy_user_generic_string);
20236 EXPORT_SYMBOL(copy_user_generic_unrolled);
20237 EXPORT_SYMBOL(__copy_user_nocache);
20238 -EXPORT_SYMBOL(_copy_from_user);
20239 -EXPORT_SYMBOL(_copy_to_user);
20240
20241 EXPORT_SYMBOL(copy_page);
20242 EXPORT_SYMBOL(clear_page);
20243 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
20244 index 7110911..e8cdee5 100644
20245 --- a/arch/x86/kernel/xsave.c
20246 +++ b/arch/x86/kernel/xsave.c
20247 @@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
20248 fx_sw_user->xstate_size > fx_sw_user->extended_size)
20249 return -EINVAL;
20250
20251 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
20252 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
20253 fx_sw_user->extended_size -
20254 FP_XSTATE_MAGIC2_SIZE));
20255 if (err)
20256 @@ -266,7 +266,7 @@ fx_only:
20257 * the other extended state.
20258 */
20259 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
20260 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
20261 + return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
20262 }
20263
20264 /*
20265 @@ -295,7 +295,7 @@ int restore_i387_xstate(void __user *buf)
20266 if (use_xsave())
20267 err = restore_user_xstate(buf);
20268 else
20269 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
20270 + err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
20271 buf);
20272 if (unlikely(err)) {
20273 /*
20274 diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
20275 index 89b02bf..0f6511d 100644
20276 --- a/arch/x86/kvm/cpuid.c
20277 +++ b/arch/x86/kvm/cpuid.c
20278 @@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
20279 struct kvm_cpuid2 *cpuid,
20280 struct kvm_cpuid_entry2 __user *entries)
20281 {
20282 - int r;
20283 + int r, i;
20284
20285 r = -E2BIG;
20286 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
20287 goto out;
20288 r = -EFAULT;
20289 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
20290 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
20291 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
20292 goto out;
20293 + for (i = 0; i < cpuid->nent; ++i) {
20294 + struct kvm_cpuid_entry2 cpuid_entry;
20295 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
20296 + goto out;
20297 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
20298 + }
20299 vcpu->arch.cpuid_nent = cpuid->nent;
20300 kvm_apic_set_version(vcpu);
20301 kvm_x86_ops->cpuid_update(vcpu);
20302 @@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
20303 struct kvm_cpuid2 *cpuid,
20304 struct kvm_cpuid_entry2 __user *entries)
20305 {
20306 - int r;
20307 + int r, i;
20308
20309 r = -E2BIG;
20310 if (cpuid->nent < vcpu->arch.cpuid_nent)
20311 goto out;
20312 r = -EFAULT;
20313 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
20314 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
20315 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
20316 goto out;
20317 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
20318 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
20319 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
20320 + goto out;
20321 + }
20322 return 0;
20323
20324 out:
20325 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
20326 index 0982507..7f6d72f 100644
20327 --- a/arch/x86/kvm/emulate.c
20328 +++ b/arch/x86/kvm/emulate.c
20329 @@ -250,6 +250,7 @@ struct gprefix {
20330
20331 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
20332 do { \
20333 + unsigned long _tmp; \
20334 __asm__ __volatile__ ( \
20335 _PRE_EFLAGS("0", "4", "2") \
20336 _op _suffix " %"_x"3,%1; " \
20337 @@ -264,8 +265,6 @@ struct gprefix {
20338 /* Raw emulation: instruction has two explicit operands. */
20339 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
20340 do { \
20341 - unsigned long _tmp; \
20342 - \
20343 switch ((ctxt)->dst.bytes) { \
20344 case 2: \
20345 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
20346 @@ -281,7 +280,6 @@ struct gprefix {
20347
20348 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
20349 do { \
20350 - unsigned long _tmp; \
20351 switch ((ctxt)->dst.bytes) { \
20352 case 1: \
20353 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
20354 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
20355 index cfdc6e0..ab92e84 100644
20356 --- a/arch/x86/kvm/lapic.c
20357 +++ b/arch/x86/kvm/lapic.c
20358 @@ -54,7 +54,7 @@
20359 #define APIC_BUS_CYCLE_NS 1
20360
20361 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
20362 -#define apic_debug(fmt, arg...)
20363 +#define apic_debug(fmt, arg...) do {} while (0)
20364
20365 #define APIC_LVT_NUM 6
20366 /* 14 is the version for Xeon and Pentium 8.4.8*/
20367 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
20368 index 1561028..0ed7f14 100644
20369 --- a/arch/x86/kvm/paging_tmpl.h
20370 +++ b/arch/x86/kvm/paging_tmpl.h
20371 @@ -197,7 +197,7 @@ retry_walk:
20372 if (unlikely(kvm_is_error_hva(host_addr)))
20373 goto error;
20374
20375 - ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
20376 + ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
20377 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
20378 goto error;
20379
20380 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
20381 index e385214..f8df033 100644
20382 --- a/arch/x86/kvm/svm.c
20383 +++ b/arch/x86/kvm/svm.c
20384 @@ -3420,7 +3420,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
20385 int cpu = raw_smp_processor_id();
20386
20387 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
20388 +
20389 + pax_open_kernel();
20390 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
20391 + pax_close_kernel();
20392 +
20393 load_TR_desc();
20394 }
20395
20396 @@ -3798,6 +3802,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
20397 #endif
20398 #endif
20399
20400 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20401 + __set_fs(current_thread_info()->addr_limit);
20402 +#endif
20403 +
20404 reload_tss(vcpu);
20405
20406 local_irq_disable();
20407 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
20408 index a7a6f60..04b745a 100644
20409 --- a/arch/x86/kvm/vmx.c
20410 +++ b/arch/x86/kvm/vmx.c
20411 @@ -1306,7 +1306,11 @@ static void reload_tss(void)
20412 struct desc_struct *descs;
20413
20414 descs = (void *)gdt->address;
20415 +
20416 + pax_open_kernel();
20417 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
20418 + pax_close_kernel();
20419 +
20420 load_TR_desc();
20421 }
20422
20423 @@ -2637,8 +2641,11 @@ static __init int hardware_setup(void)
20424 if (!cpu_has_vmx_flexpriority())
20425 flexpriority_enabled = 0;
20426
20427 - if (!cpu_has_vmx_tpr_shadow())
20428 - kvm_x86_ops->update_cr8_intercept = NULL;
20429 + if (!cpu_has_vmx_tpr_shadow()) {
20430 + pax_open_kernel();
20431 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
20432 + pax_close_kernel();
20433 + }
20434
20435 if (enable_ept && !cpu_has_vmx_ept_2m_page())
20436 kvm_disable_largepages();
20437 @@ -3654,7 +3661,7 @@ static void vmx_set_constant_host_state(void)
20438 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
20439
20440 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
20441 - vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
20442 + vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
20443
20444 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
20445 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
20446 @@ -6192,6 +6199,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20447 "jmp .Lkvm_vmx_return \n\t"
20448 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
20449 ".Lkvm_vmx_return: "
20450 +
20451 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20452 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
20453 + ".Lkvm_vmx_return2: "
20454 +#endif
20455 +
20456 /* Save guest registers, load host registers, keep flags */
20457 "mov %0, %c[wordsize](%%"R"sp) \n\t"
20458 "pop %0 \n\t"
20459 @@ -6240,6 +6253,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20460 #endif
20461 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
20462 [wordsize]"i"(sizeof(ulong))
20463 +
20464 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20465 + ,[cs]"i"(__KERNEL_CS)
20466 +#endif
20467 +
20468 : "cc", "memory"
20469 , R"ax", R"bx", R"di", R"si"
20470 #ifdef CONFIG_X86_64
20471 @@ -6268,7 +6286,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20472 }
20473 }
20474
20475 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
20476 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
20477 +
20478 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20479 + loadsegment(fs, __KERNEL_PERCPU);
20480 +#endif
20481 +
20482 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20483 + __set_fs(current_thread_info()->addr_limit);
20484 +#endif
20485 +
20486 vmx->loaded_vmcs->launched = 1;
20487
20488 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
20489 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
20490 index 8d1c6c6..6e6d611 100644
20491 --- a/arch/x86/kvm/x86.c
20492 +++ b/arch/x86/kvm/x86.c
20493 @@ -873,6 +873,7 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
20494 return kvm_set_msr(vcpu, index, *data);
20495 }
20496
20497 +static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) __size_overflow(2);
20498 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
20499 {
20500 int version;
20501 @@ -1307,12 +1308,13 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
20502 return 0;
20503 }
20504
20505 +static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) __size_overflow(2);
20506 static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
20507 {
20508 struct kvm *kvm = vcpu->kvm;
20509 int lm = is_long_mode(vcpu);
20510 - u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
20511 - : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
20512 + u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
20513 + : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
20514 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
20515 : kvm->arch.xen_hvm_config.blob_size_32;
20516 u32 page_num = data & ~PAGE_MASK;
20517 @@ -2145,6 +2147,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
20518 if (n < msr_list.nmsrs)
20519 goto out;
20520 r = -EFAULT;
20521 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
20522 + goto out;
20523 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
20524 num_msrs_to_save * sizeof(u32)))
20525 goto out;
20526 @@ -2266,7 +2270,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
20527 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
20528 struct kvm_interrupt *irq)
20529 {
20530 - if (irq->irq < 0 || irq->irq >= 256)
20531 + if (irq->irq >= 256)
20532 return -EINVAL;
20533 if (irqchip_in_kernel(vcpu->kvm))
20534 return -ENXIO;
20535 @@ -3499,6 +3503,9 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
20536
20537 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
20538 struct kvm_vcpu *vcpu, u32 access,
20539 + struct x86_exception *exception) __size_overflow(1,3);
20540 +static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
20541 + struct kvm_vcpu *vcpu, u32 access,
20542 struct x86_exception *exception)
20543 {
20544 void *data = val;
20545 @@ -3530,6 +3537,9 @@ out:
20546 /* used for instruction fetching */
20547 static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
20548 gva_t addr, void *val, unsigned int bytes,
20549 + struct x86_exception *exception) __size_overflow(2,4);
20550 +static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
20551 + gva_t addr, void *val, unsigned int bytes,
20552 struct x86_exception *exception)
20553 {
20554 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
20555 @@ -3554,6 +3564,9 @@ EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
20556
20557 static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
20558 gva_t addr, void *val, unsigned int bytes,
20559 + struct x86_exception *exception) __size_overflow(2,4);
20560 +static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
20561 + gva_t addr, void *val, unsigned int bytes,
20562 struct x86_exception *exception)
20563 {
20564 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
20565 @@ -3667,12 +3680,16 @@ static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
20566 }
20567
20568 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
20569 + void *val, int bytes) __size_overflow(2);
20570 +static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
20571 void *val, int bytes)
20572 {
20573 return !kvm_read_guest(vcpu->kvm, gpa, val, bytes);
20574 }
20575
20576 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
20577 + void *val, int bytes) __size_overflow(2);
20578 +static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
20579 void *val, int bytes)
20580 {
20581 return emulator_write_phys(vcpu, gpa, val, bytes);
20582 @@ -3823,6 +3840,12 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
20583 const void *old,
20584 const void *new,
20585 unsigned int bytes,
20586 + struct x86_exception *exception) __size_overflow(5);
20587 +static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
20588 + unsigned long addr,
20589 + const void *old,
20590 + const void *new,
20591 + unsigned int bytes,
20592 struct x86_exception *exception)
20593 {
20594 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
20595 @@ -4782,7 +4805,7 @@ static void kvm_set_mmio_spte_mask(void)
20596 kvm_mmu_set_mmio_spte_mask(mask);
20597 }
20598
20599 -int kvm_arch_init(void *opaque)
20600 +int kvm_arch_init(const void *opaque)
20601 {
20602 int r;
20603 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
20604 diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
20605 index cb80c29..aeee86c 100644
20606 --- a/arch/x86/kvm/x86.h
20607 +++ b/arch/x86/kvm/x86.h
20608 @@ -116,11 +116,11 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data);
20609
20610 int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
20611 gva_t addr, void *val, unsigned int bytes,
20612 - struct x86_exception *exception);
20613 + struct x86_exception *exception) __size_overflow(2,4);
20614
20615 int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
20616 gva_t addr, void *val, unsigned int bytes,
20617 - struct x86_exception *exception);
20618 + struct x86_exception *exception) __size_overflow(2,4);
20619
20620 extern u64 host_xcr0;
20621
20622 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
20623 index 642d880..44e0f3f 100644
20624 --- a/arch/x86/lguest/boot.c
20625 +++ b/arch/x86/lguest/boot.c
20626 @@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
20627 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
20628 * Launcher to reboot us.
20629 */
20630 -static void lguest_restart(char *reason)
20631 +static __noreturn void lguest_restart(char *reason)
20632 {
20633 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
20634 + BUG();
20635 }
20636
20637 /*G:050
20638 diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
20639 index 042f682..c92afb6 100644
20640 --- a/arch/x86/lib/atomic64_32.c
20641 +++ b/arch/x86/lib/atomic64_32.c
20642 @@ -8,18 +8,30 @@
20643
20644 long long atomic64_read_cx8(long long, const atomic64_t *v);
20645 EXPORT_SYMBOL(atomic64_read_cx8);
20646 +long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
20647 +EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
20648 long long atomic64_set_cx8(long long, const atomic64_t *v);
20649 EXPORT_SYMBOL(atomic64_set_cx8);
20650 +long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
20651 +EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
20652 long long atomic64_xchg_cx8(long long, unsigned high);
20653 EXPORT_SYMBOL(atomic64_xchg_cx8);
20654 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
20655 EXPORT_SYMBOL(atomic64_add_return_cx8);
20656 +long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
20657 +EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
20658 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
20659 EXPORT_SYMBOL(atomic64_sub_return_cx8);
20660 +long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
20661 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
20662 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
20663 EXPORT_SYMBOL(atomic64_inc_return_cx8);
20664 +long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
20665 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
20666 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
20667 EXPORT_SYMBOL(atomic64_dec_return_cx8);
20668 +long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
20669 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
20670 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
20671 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
20672 int atomic64_inc_not_zero_cx8(atomic64_t *v);
20673 @@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
20674 #ifndef CONFIG_X86_CMPXCHG64
20675 long long atomic64_read_386(long long, const atomic64_t *v);
20676 EXPORT_SYMBOL(atomic64_read_386);
20677 +long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
20678 +EXPORT_SYMBOL(atomic64_read_unchecked_386);
20679 long long atomic64_set_386(long long, const atomic64_t *v);
20680 EXPORT_SYMBOL(atomic64_set_386);
20681 +long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
20682 +EXPORT_SYMBOL(atomic64_set_unchecked_386);
20683 long long atomic64_xchg_386(long long, unsigned high);
20684 EXPORT_SYMBOL(atomic64_xchg_386);
20685 long long atomic64_add_return_386(long long a, atomic64_t *v);
20686 EXPORT_SYMBOL(atomic64_add_return_386);
20687 +long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
20688 +EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
20689 long long atomic64_sub_return_386(long long a, atomic64_t *v);
20690 EXPORT_SYMBOL(atomic64_sub_return_386);
20691 +long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
20692 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
20693 long long atomic64_inc_return_386(long long a, atomic64_t *v);
20694 EXPORT_SYMBOL(atomic64_inc_return_386);
20695 +long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
20696 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
20697 long long atomic64_dec_return_386(long long a, atomic64_t *v);
20698 EXPORT_SYMBOL(atomic64_dec_return_386);
20699 +long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
20700 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
20701 long long atomic64_add_386(long long a, atomic64_t *v);
20702 EXPORT_SYMBOL(atomic64_add_386);
20703 +long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
20704 +EXPORT_SYMBOL(atomic64_add_unchecked_386);
20705 long long atomic64_sub_386(long long a, atomic64_t *v);
20706 EXPORT_SYMBOL(atomic64_sub_386);
20707 +long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
20708 +EXPORT_SYMBOL(atomic64_sub_unchecked_386);
20709 long long atomic64_inc_386(long long a, atomic64_t *v);
20710 EXPORT_SYMBOL(atomic64_inc_386);
20711 +long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
20712 +EXPORT_SYMBOL(atomic64_inc_unchecked_386);
20713 long long atomic64_dec_386(long long a, atomic64_t *v);
20714 EXPORT_SYMBOL(atomic64_dec_386);
20715 +long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
20716 +EXPORT_SYMBOL(atomic64_dec_unchecked_386);
20717 long long atomic64_dec_if_positive_386(atomic64_t *v);
20718 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
20719 int atomic64_inc_not_zero_386(atomic64_t *v);
20720 diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
20721 index e8e7e0d..56fd1b0 100644
20722 --- a/arch/x86/lib/atomic64_386_32.S
20723 +++ b/arch/x86/lib/atomic64_386_32.S
20724 @@ -48,6 +48,10 @@ BEGIN(read)
20725 movl (v), %eax
20726 movl 4(v), %edx
20727 RET_ENDP
20728 +BEGIN(read_unchecked)
20729 + movl (v), %eax
20730 + movl 4(v), %edx
20731 +RET_ENDP
20732 #undef v
20733
20734 #define v %esi
20735 @@ -55,6 +59,10 @@ BEGIN(set)
20736 movl %ebx, (v)
20737 movl %ecx, 4(v)
20738 RET_ENDP
20739 +BEGIN(set_unchecked)
20740 + movl %ebx, (v)
20741 + movl %ecx, 4(v)
20742 +RET_ENDP
20743 #undef v
20744
20745 #define v %esi
20746 @@ -70,6 +78,20 @@ RET_ENDP
20747 BEGIN(add)
20748 addl %eax, (v)
20749 adcl %edx, 4(v)
20750 +
20751 +#ifdef CONFIG_PAX_REFCOUNT
20752 + jno 0f
20753 + subl %eax, (v)
20754 + sbbl %edx, 4(v)
20755 + int $4
20756 +0:
20757 + _ASM_EXTABLE(0b, 0b)
20758 +#endif
20759 +
20760 +RET_ENDP
20761 +BEGIN(add_unchecked)
20762 + addl %eax, (v)
20763 + adcl %edx, 4(v)
20764 RET_ENDP
20765 #undef v
20766
20767 @@ -77,6 +99,24 @@ RET_ENDP
20768 BEGIN(add_return)
20769 addl (v), %eax
20770 adcl 4(v), %edx
20771 +
20772 +#ifdef CONFIG_PAX_REFCOUNT
20773 + into
20774 +1234:
20775 + _ASM_EXTABLE(1234b, 2f)
20776 +#endif
20777 +
20778 + movl %eax, (v)
20779 + movl %edx, 4(v)
20780 +
20781 +#ifdef CONFIG_PAX_REFCOUNT
20782 +2:
20783 +#endif
20784 +
20785 +RET_ENDP
20786 +BEGIN(add_return_unchecked)
20787 + addl (v), %eax
20788 + adcl 4(v), %edx
20789 movl %eax, (v)
20790 movl %edx, 4(v)
20791 RET_ENDP
20792 @@ -86,6 +126,20 @@ RET_ENDP
20793 BEGIN(sub)
20794 subl %eax, (v)
20795 sbbl %edx, 4(v)
20796 +
20797 +#ifdef CONFIG_PAX_REFCOUNT
20798 + jno 0f
20799 + addl %eax, (v)
20800 + adcl %edx, 4(v)
20801 + int $4
20802 +0:
20803 + _ASM_EXTABLE(0b, 0b)
20804 +#endif
20805 +
20806 +RET_ENDP
20807 +BEGIN(sub_unchecked)
20808 + subl %eax, (v)
20809 + sbbl %edx, 4(v)
20810 RET_ENDP
20811 #undef v
20812
20813 @@ -96,6 +150,27 @@ BEGIN(sub_return)
20814 sbbl $0, %edx
20815 addl (v), %eax
20816 adcl 4(v), %edx
20817 +
20818 +#ifdef CONFIG_PAX_REFCOUNT
20819 + into
20820 +1234:
20821 + _ASM_EXTABLE(1234b, 2f)
20822 +#endif
20823 +
20824 + movl %eax, (v)
20825 + movl %edx, 4(v)
20826 +
20827 +#ifdef CONFIG_PAX_REFCOUNT
20828 +2:
20829 +#endif
20830 +
20831 +RET_ENDP
20832 +BEGIN(sub_return_unchecked)
20833 + negl %edx
20834 + negl %eax
20835 + sbbl $0, %edx
20836 + addl (v), %eax
20837 + adcl 4(v), %edx
20838 movl %eax, (v)
20839 movl %edx, 4(v)
20840 RET_ENDP
20841 @@ -105,6 +180,20 @@ RET_ENDP
20842 BEGIN(inc)
20843 addl $1, (v)
20844 adcl $0, 4(v)
20845 +
20846 +#ifdef CONFIG_PAX_REFCOUNT
20847 + jno 0f
20848 + subl $1, (v)
20849 + sbbl $0, 4(v)
20850 + int $4
20851 +0:
20852 + _ASM_EXTABLE(0b, 0b)
20853 +#endif
20854 +
20855 +RET_ENDP
20856 +BEGIN(inc_unchecked)
20857 + addl $1, (v)
20858 + adcl $0, 4(v)
20859 RET_ENDP
20860 #undef v
20861
20862 @@ -114,6 +203,26 @@ BEGIN(inc_return)
20863 movl 4(v), %edx
20864 addl $1, %eax
20865 adcl $0, %edx
20866 +
20867 +#ifdef CONFIG_PAX_REFCOUNT
20868 + into
20869 +1234:
20870 + _ASM_EXTABLE(1234b, 2f)
20871 +#endif
20872 +
20873 + movl %eax, (v)
20874 + movl %edx, 4(v)
20875 +
20876 +#ifdef CONFIG_PAX_REFCOUNT
20877 +2:
20878 +#endif
20879 +
20880 +RET_ENDP
20881 +BEGIN(inc_return_unchecked)
20882 + movl (v), %eax
20883 + movl 4(v), %edx
20884 + addl $1, %eax
20885 + adcl $0, %edx
20886 movl %eax, (v)
20887 movl %edx, 4(v)
20888 RET_ENDP
20889 @@ -123,6 +232,20 @@ RET_ENDP
20890 BEGIN(dec)
20891 subl $1, (v)
20892 sbbl $0, 4(v)
20893 +
20894 +#ifdef CONFIG_PAX_REFCOUNT
20895 + jno 0f
20896 + addl $1, (v)
20897 + adcl $0, 4(v)
20898 + int $4
20899 +0:
20900 + _ASM_EXTABLE(0b, 0b)
20901 +#endif
20902 +
20903 +RET_ENDP
20904 +BEGIN(dec_unchecked)
20905 + subl $1, (v)
20906 + sbbl $0, 4(v)
20907 RET_ENDP
20908 #undef v
20909
20910 @@ -132,6 +255,26 @@ BEGIN(dec_return)
20911 movl 4(v), %edx
20912 subl $1, %eax
20913 sbbl $0, %edx
20914 +
20915 +#ifdef CONFIG_PAX_REFCOUNT
20916 + into
20917 +1234:
20918 + _ASM_EXTABLE(1234b, 2f)
20919 +#endif
20920 +
20921 + movl %eax, (v)
20922 + movl %edx, 4(v)
20923 +
20924 +#ifdef CONFIG_PAX_REFCOUNT
20925 +2:
20926 +#endif
20927 +
20928 +RET_ENDP
20929 +BEGIN(dec_return_unchecked)
20930 + movl (v), %eax
20931 + movl 4(v), %edx
20932 + subl $1, %eax
20933 + sbbl $0, %edx
20934 movl %eax, (v)
20935 movl %edx, 4(v)
20936 RET_ENDP
20937 @@ -143,6 +286,13 @@ BEGIN(add_unless)
20938 adcl %edx, %edi
20939 addl (v), %eax
20940 adcl 4(v), %edx
20941 +
20942 +#ifdef CONFIG_PAX_REFCOUNT
20943 + into
20944 +1234:
20945 + _ASM_EXTABLE(1234b, 2f)
20946 +#endif
20947 +
20948 cmpl %eax, %esi
20949 je 3f
20950 1:
20951 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
20952 1:
20953 addl $1, %eax
20954 adcl $0, %edx
20955 +
20956 +#ifdef CONFIG_PAX_REFCOUNT
20957 + into
20958 +1234:
20959 + _ASM_EXTABLE(1234b, 2f)
20960 +#endif
20961 +
20962 movl %eax, (v)
20963 movl %edx, 4(v)
20964 movl $1, %eax
20965 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
20966 movl 4(v), %edx
20967 subl $1, %eax
20968 sbbl $0, %edx
20969 +
20970 +#ifdef CONFIG_PAX_REFCOUNT
20971 + into
20972 +1234:
20973 + _ASM_EXTABLE(1234b, 1f)
20974 +#endif
20975 +
20976 js 1f
20977 movl %eax, (v)
20978 movl %edx, 4(v)
20979 diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
20980 index 391a083..3a2cf39 100644
20981 --- a/arch/x86/lib/atomic64_cx8_32.S
20982 +++ b/arch/x86/lib/atomic64_cx8_32.S
20983 @@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
20984 CFI_STARTPROC
20985
20986 read64 %ecx
20987 + pax_force_retaddr
20988 ret
20989 CFI_ENDPROC
20990 ENDPROC(atomic64_read_cx8)
20991
20992 +ENTRY(atomic64_read_unchecked_cx8)
20993 + CFI_STARTPROC
20994 +
20995 + read64 %ecx
20996 + pax_force_retaddr
20997 + ret
20998 + CFI_ENDPROC
20999 +ENDPROC(atomic64_read_unchecked_cx8)
21000 +
21001 ENTRY(atomic64_set_cx8)
21002 CFI_STARTPROC
21003
21004 @@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
21005 cmpxchg8b (%esi)
21006 jne 1b
21007
21008 + pax_force_retaddr
21009 ret
21010 CFI_ENDPROC
21011 ENDPROC(atomic64_set_cx8)
21012
21013 +ENTRY(atomic64_set_unchecked_cx8)
21014 + CFI_STARTPROC
21015 +
21016 +1:
21017 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
21018 + * are atomic on 586 and newer */
21019 + cmpxchg8b (%esi)
21020 + jne 1b
21021 +
21022 + pax_force_retaddr
21023 + ret
21024 + CFI_ENDPROC
21025 +ENDPROC(atomic64_set_unchecked_cx8)
21026 +
21027 ENTRY(atomic64_xchg_cx8)
21028 CFI_STARTPROC
21029
21030 @@ -62,12 +87,13 @@ ENTRY(atomic64_xchg_cx8)
21031 cmpxchg8b (%esi)
21032 jne 1b
21033
21034 + pax_force_retaddr
21035 ret
21036 CFI_ENDPROC
21037 ENDPROC(atomic64_xchg_cx8)
21038
21039 -.macro addsub_return func ins insc
21040 -ENTRY(atomic64_\func\()_return_cx8)
21041 +.macro addsub_return func ins insc unchecked=""
21042 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
21043 CFI_STARTPROC
21044 SAVE ebp
21045 SAVE ebx
21046 @@ -84,27 +110,44 @@ ENTRY(atomic64_\func\()_return_cx8)
21047 movl %edx, %ecx
21048 \ins\()l %esi, %ebx
21049 \insc\()l %edi, %ecx
21050 +
21051 +.ifb \unchecked
21052 +#ifdef CONFIG_PAX_REFCOUNT
21053 + into
21054 +2:
21055 + _ASM_EXTABLE(2b, 3f)
21056 +#endif
21057 +.endif
21058 +
21059 LOCK_PREFIX
21060 cmpxchg8b (%ebp)
21061 jne 1b
21062 -
21063 -10:
21064 movl %ebx, %eax
21065 movl %ecx, %edx
21066 +
21067 +.ifb \unchecked
21068 +#ifdef CONFIG_PAX_REFCOUNT
21069 +3:
21070 +#endif
21071 +.endif
21072 +
21073 RESTORE edi
21074 RESTORE esi
21075 RESTORE ebx
21076 RESTORE ebp
21077 + pax_force_retaddr
21078 ret
21079 CFI_ENDPROC
21080 -ENDPROC(atomic64_\func\()_return_cx8)
21081 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
21082 .endm
21083
21084 addsub_return add add adc
21085 addsub_return sub sub sbb
21086 +addsub_return add add adc _unchecked
21087 +addsub_return sub sub sbb _unchecked
21088
21089 -.macro incdec_return func ins insc
21090 -ENTRY(atomic64_\func\()_return_cx8)
21091 +.macro incdec_return func ins insc unchecked=""
21092 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
21093 CFI_STARTPROC
21094 SAVE ebx
21095
21096 @@ -114,21 +157,39 @@ ENTRY(atomic64_\func\()_return_cx8)
21097 movl %edx, %ecx
21098 \ins\()l $1, %ebx
21099 \insc\()l $0, %ecx
21100 +
21101 +.ifb \unchecked
21102 +#ifdef CONFIG_PAX_REFCOUNT
21103 + into
21104 +2:
21105 + _ASM_EXTABLE(2b, 3f)
21106 +#endif
21107 +.endif
21108 +
21109 LOCK_PREFIX
21110 cmpxchg8b (%esi)
21111 jne 1b
21112
21113 -10:
21114 movl %ebx, %eax
21115 movl %ecx, %edx
21116 +
21117 +.ifb \unchecked
21118 +#ifdef CONFIG_PAX_REFCOUNT
21119 +3:
21120 +#endif
21121 +.endif
21122 +
21123 RESTORE ebx
21124 + pax_force_retaddr
21125 ret
21126 CFI_ENDPROC
21127 -ENDPROC(atomic64_\func\()_return_cx8)
21128 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
21129 .endm
21130
21131 incdec_return inc add adc
21132 incdec_return dec sub sbb
21133 +incdec_return inc add adc _unchecked
21134 +incdec_return dec sub sbb _unchecked
21135
21136 ENTRY(atomic64_dec_if_positive_cx8)
21137 CFI_STARTPROC
21138 @@ -140,6 +201,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
21139 movl %edx, %ecx
21140 subl $1, %ebx
21141 sbb $0, %ecx
21142 +
21143 +#ifdef CONFIG_PAX_REFCOUNT
21144 + into
21145 +1234:
21146 + _ASM_EXTABLE(1234b, 2f)
21147 +#endif
21148 +
21149 js 2f
21150 LOCK_PREFIX
21151 cmpxchg8b (%esi)
21152 @@ -149,6 +217,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
21153 movl %ebx, %eax
21154 movl %ecx, %edx
21155 RESTORE ebx
21156 + pax_force_retaddr
21157 ret
21158 CFI_ENDPROC
21159 ENDPROC(atomic64_dec_if_positive_cx8)
21160 @@ -174,6 +243,13 @@ ENTRY(atomic64_add_unless_cx8)
21161 movl %edx, %ecx
21162 addl %esi, %ebx
21163 adcl %edi, %ecx
21164 +
21165 +#ifdef CONFIG_PAX_REFCOUNT
21166 + into
21167 +1234:
21168 + _ASM_EXTABLE(1234b, 3f)
21169 +#endif
21170 +
21171 LOCK_PREFIX
21172 cmpxchg8b (%ebp)
21173 jne 1b
21174 @@ -184,6 +260,7 @@ ENTRY(atomic64_add_unless_cx8)
21175 CFI_ADJUST_CFA_OFFSET -8
21176 RESTORE ebx
21177 RESTORE ebp
21178 + pax_force_retaddr
21179 ret
21180 4:
21181 cmpl %edx, 4(%esp)
21182 @@ -206,6 +283,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
21183 movl %edx, %ecx
21184 addl $1, %ebx
21185 adcl $0, %ecx
21186 +
21187 +#ifdef CONFIG_PAX_REFCOUNT
21188 + into
21189 +1234:
21190 + _ASM_EXTABLE(1234b, 3f)
21191 +#endif
21192 +
21193 LOCK_PREFIX
21194 cmpxchg8b (%esi)
21195 jne 1b
21196 @@ -213,6 +297,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
21197 movl $1, %eax
21198 3:
21199 RESTORE ebx
21200 + pax_force_retaddr
21201 ret
21202 4:
21203 testl %edx, %edx
21204 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
21205 index 78d16a5..fbcf666 100644
21206 --- a/arch/x86/lib/checksum_32.S
21207 +++ b/arch/x86/lib/checksum_32.S
21208 @@ -28,7 +28,8 @@
21209 #include <linux/linkage.h>
21210 #include <asm/dwarf2.h>
21211 #include <asm/errno.h>
21212 -
21213 +#include <asm/segment.h>
21214 +
21215 /*
21216 * computes a partial checksum, e.g. for TCP/UDP fragments
21217 */
21218 @@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
21219
21220 #define ARGBASE 16
21221 #define FP 12
21222 -
21223 -ENTRY(csum_partial_copy_generic)
21224 +
21225 +ENTRY(csum_partial_copy_generic_to_user)
21226 CFI_STARTPROC
21227 +
21228 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21229 + pushl_cfi %gs
21230 + popl_cfi %es
21231 + jmp csum_partial_copy_generic
21232 +#endif
21233 +
21234 +ENTRY(csum_partial_copy_generic_from_user)
21235 +
21236 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21237 + pushl_cfi %gs
21238 + popl_cfi %ds
21239 +#endif
21240 +
21241 +ENTRY(csum_partial_copy_generic)
21242 subl $4,%esp
21243 CFI_ADJUST_CFA_OFFSET 4
21244 pushl_cfi %edi
21245 @@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
21246 jmp 4f
21247 SRC(1: movw (%esi), %bx )
21248 addl $2, %esi
21249 -DST( movw %bx, (%edi) )
21250 +DST( movw %bx, %es:(%edi) )
21251 addl $2, %edi
21252 addw %bx, %ax
21253 adcl $0, %eax
21254 @@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
21255 SRC(1: movl (%esi), %ebx )
21256 SRC( movl 4(%esi), %edx )
21257 adcl %ebx, %eax
21258 -DST( movl %ebx, (%edi) )
21259 +DST( movl %ebx, %es:(%edi) )
21260 adcl %edx, %eax
21261 -DST( movl %edx, 4(%edi) )
21262 +DST( movl %edx, %es:4(%edi) )
21263
21264 SRC( movl 8(%esi), %ebx )
21265 SRC( movl 12(%esi), %edx )
21266 adcl %ebx, %eax
21267 -DST( movl %ebx, 8(%edi) )
21268 +DST( movl %ebx, %es:8(%edi) )
21269 adcl %edx, %eax
21270 -DST( movl %edx, 12(%edi) )
21271 +DST( movl %edx, %es:12(%edi) )
21272
21273 SRC( movl 16(%esi), %ebx )
21274 SRC( movl 20(%esi), %edx )
21275 adcl %ebx, %eax
21276 -DST( movl %ebx, 16(%edi) )
21277 +DST( movl %ebx, %es:16(%edi) )
21278 adcl %edx, %eax
21279 -DST( movl %edx, 20(%edi) )
21280 +DST( movl %edx, %es:20(%edi) )
21281
21282 SRC( movl 24(%esi), %ebx )
21283 SRC( movl 28(%esi), %edx )
21284 adcl %ebx, %eax
21285 -DST( movl %ebx, 24(%edi) )
21286 +DST( movl %ebx, %es:24(%edi) )
21287 adcl %edx, %eax
21288 -DST( movl %edx, 28(%edi) )
21289 +DST( movl %edx, %es:28(%edi) )
21290
21291 lea 32(%esi), %esi
21292 lea 32(%edi), %edi
21293 @@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
21294 shrl $2, %edx # This clears CF
21295 SRC(3: movl (%esi), %ebx )
21296 adcl %ebx, %eax
21297 -DST( movl %ebx, (%edi) )
21298 +DST( movl %ebx, %es:(%edi) )
21299 lea 4(%esi), %esi
21300 lea 4(%edi), %edi
21301 dec %edx
21302 @@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
21303 jb 5f
21304 SRC( movw (%esi), %cx )
21305 leal 2(%esi), %esi
21306 -DST( movw %cx, (%edi) )
21307 +DST( movw %cx, %es:(%edi) )
21308 leal 2(%edi), %edi
21309 je 6f
21310 shll $16,%ecx
21311 SRC(5: movb (%esi), %cl )
21312 -DST( movb %cl, (%edi) )
21313 +DST( movb %cl, %es:(%edi) )
21314 6: addl %ecx, %eax
21315 adcl $0, %eax
21316 7:
21317 @@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
21318
21319 6001:
21320 movl ARGBASE+20(%esp), %ebx # src_err_ptr
21321 - movl $-EFAULT, (%ebx)
21322 + movl $-EFAULT, %ss:(%ebx)
21323
21324 # zero the complete destination - computing the rest
21325 # is too much work
21326 @@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
21327
21328 6002:
21329 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21330 - movl $-EFAULT,(%ebx)
21331 + movl $-EFAULT,%ss:(%ebx)
21332 jmp 5000b
21333
21334 .previous
21335
21336 + pushl_cfi %ss
21337 + popl_cfi %ds
21338 + pushl_cfi %ss
21339 + popl_cfi %es
21340 popl_cfi %ebx
21341 CFI_RESTORE ebx
21342 popl_cfi %esi
21343 @@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
21344 popl_cfi %ecx # equivalent to addl $4,%esp
21345 ret
21346 CFI_ENDPROC
21347 -ENDPROC(csum_partial_copy_generic)
21348 +ENDPROC(csum_partial_copy_generic_to_user)
21349
21350 #else
21351
21352 /* Version for PentiumII/PPro */
21353
21354 #define ROUND1(x) \
21355 + nop; nop; nop; \
21356 SRC(movl x(%esi), %ebx ) ; \
21357 addl %ebx, %eax ; \
21358 - DST(movl %ebx, x(%edi) ) ;
21359 + DST(movl %ebx, %es:x(%edi)) ;
21360
21361 #define ROUND(x) \
21362 + nop; nop; nop; \
21363 SRC(movl x(%esi), %ebx ) ; \
21364 adcl %ebx, %eax ; \
21365 - DST(movl %ebx, x(%edi) ) ;
21366 + DST(movl %ebx, %es:x(%edi)) ;
21367
21368 #define ARGBASE 12
21369 -
21370 -ENTRY(csum_partial_copy_generic)
21371 +
21372 +ENTRY(csum_partial_copy_generic_to_user)
21373 CFI_STARTPROC
21374 +
21375 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21376 + pushl_cfi %gs
21377 + popl_cfi %es
21378 + jmp csum_partial_copy_generic
21379 +#endif
21380 +
21381 +ENTRY(csum_partial_copy_generic_from_user)
21382 +
21383 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21384 + pushl_cfi %gs
21385 + popl_cfi %ds
21386 +#endif
21387 +
21388 +ENTRY(csum_partial_copy_generic)
21389 pushl_cfi %ebx
21390 CFI_REL_OFFSET ebx, 0
21391 pushl_cfi %edi
21392 @@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
21393 subl %ebx, %edi
21394 lea -1(%esi),%edx
21395 andl $-32,%edx
21396 - lea 3f(%ebx,%ebx), %ebx
21397 + lea 3f(%ebx,%ebx,2), %ebx
21398 testl %esi, %esi
21399 jmp *%ebx
21400 1: addl $64,%esi
21401 @@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
21402 jb 5f
21403 SRC( movw (%esi), %dx )
21404 leal 2(%esi), %esi
21405 -DST( movw %dx, (%edi) )
21406 +DST( movw %dx, %es:(%edi) )
21407 leal 2(%edi), %edi
21408 je 6f
21409 shll $16,%edx
21410 5:
21411 SRC( movb (%esi), %dl )
21412 -DST( movb %dl, (%edi) )
21413 +DST( movb %dl, %es:(%edi) )
21414 6: addl %edx, %eax
21415 adcl $0, %eax
21416 7:
21417 .section .fixup, "ax"
21418 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
21419 - movl $-EFAULT, (%ebx)
21420 + movl $-EFAULT, %ss:(%ebx)
21421 # zero the complete destination (computing the rest is too much work)
21422 movl ARGBASE+8(%esp),%edi # dst
21423 movl ARGBASE+12(%esp),%ecx # len
21424 @@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
21425 rep; stosb
21426 jmp 7b
21427 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21428 - movl $-EFAULT, (%ebx)
21429 + movl $-EFAULT, %ss:(%ebx)
21430 jmp 7b
21431 .previous
21432
21433 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21434 + pushl_cfi %ss
21435 + popl_cfi %ds
21436 + pushl_cfi %ss
21437 + popl_cfi %es
21438 +#endif
21439 +
21440 popl_cfi %esi
21441 CFI_RESTORE esi
21442 popl_cfi %edi
21443 @@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
21444 CFI_RESTORE ebx
21445 ret
21446 CFI_ENDPROC
21447 -ENDPROC(csum_partial_copy_generic)
21448 +ENDPROC(csum_partial_copy_generic_to_user)
21449
21450 #undef ROUND
21451 #undef ROUND1
21452 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
21453 index f2145cf..cea889d 100644
21454 --- a/arch/x86/lib/clear_page_64.S
21455 +++ b/arch/x86/lib/clear_page_64.S
21456 @@ -11,6 +11,7 @@ ENTRY(clear_page_c)
21457 movl $4096/8,%ecx
21458 xorl %eax,%eax
21459 rep stosq
21460 + pax_force_retaddr
21461 ret
21462 CFI_ENDPROC
21463 ENDPROC(clear_page_c)
21464 @@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
21465 movl $4096,%ecx
21466 xorl %eax,%eax
21467 rep stosb
21468 + pax_force_retaddr
21469 ret
21470 CFI_ENDPROC
21471 ENDPROC(clear_page_c_e)
21472 @@ -43,6 +45,7 @@ ENTRY(clear_page)
21473 leaq 64(%rdi),%rdi
21474 jnz .Lloop
21475 nop
21476 + pax_force_retaddr
21477 ret
21478 CFI_ENDPROC
21479 .Lclear_page_end:
21480 @@ -58,7 +61,7 @@ ENDPROC(clear_page)
21481
21482 #include <asm/cpufeature.h>
21483
21484 - .section .altinstr_replacement,"ax"
21485 + .section .altinstr_replacement,"a"
21486 1: .byte 0xeb /* jmp <disp8> */
21487 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
21488 2: .byte 0xeb /* jmp <disp8> */
21489 diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
21490 index 1e572c5..2a162cd 100644
21491 --- a/arch/x86/lib/cmpxchg16b_emu.S
21492 +++ b/arch/x86/lib/cmpxchg16b_emu.S
21493 @@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
21494
21495 popf
21496 mov $1, %al
21497 + pax_force_retaddr
21498 ret
21499
21500 not_same:
21501 popf
21502 xor %al,%al
21503 + pax_force_retaddr
21504 ret
21505
21506 CFI_ENDPROC
21507 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
21508 index 01c805b..dccb07f 100644
21509 --- a/arch/x86/lib/copy_page_64.S
21510 +++ b/arch/x86/lib/copy_page_64.S
21511 @@ -9,6 +9,7 @@ copy_page_c:
21512 CFI_STARTPROC
21513 movl $4096/8,%ecx
21514 rep movsq
21515 + pax_force_retaddr
21516 ret
21517 CFI_ENDPROC
21518 ENDPROC(copy_page_c)
21519 @@ -39,7 +40,7 @@ ENTRY(copy_page)
21520 movq 16 (%rsi), %rdx
21521 movq 24 (%rsi), %r8
21522 movq 32 (%rsi), %r9
21523 - movq 40 (%rsi), %r10
21524 + movq 40 (%rsi), %r13
21525 movq 48 (%rsi), %r11
21526 movq 56 (%rsi), %r12
21527
21528 @@ -50,7 +51,7 @@ ENTRY(copy_page)
21529 movq %rdx, 16 (%rdi)
21530 movq %r8, 24 (%rdi)
21531 movq %r9, 32 (%rdi)
21532 - movq %r10, 40 (%rdi)
21533 + movq %r13, 40 (%rdi)
21534 movq %r11, 48 (%rdi)
21535 movq %r12, 56 (%rdi)
21536
21537 @@ -69,7 +70,7 @@ ENTRY(copy_page)
21538 movq 16 (%rsi), %rdx
21539 movq 24 (%rsi), %r8
21540 movq 32 (%rsi), %r9
21541 - movq 40 (%rsi), %r10
21542 + movq 40 (%rsi), %r13
21543 movq 48 (%rsi), %r11
21544 movq 56 (%rsi), %r12
21545
21546 @@ -78,7 +79,7 @@ ENTRY(copy_page)
21547 movq %rdx, 16 (%rdi)
21548 movq %r8, 24 (%rdi)
21549 movq %r9, 32 (%rdi)
21550 - movq %r10, 40 (%rdi)
21551 + movq %r13, 40 (%rdi)
21552 movq %r11, 48 (%rdi)
21553 movq %r12, 56 (%rdi)
21554
21555 @@ -95,6 +96,7 @@ ENTRY(copy_page)
21556 CFI_RESTORE r13
21557 addq $3*8,%rsp
21558 CFI_ADJUST_CFA_OFFSET -3*8
21559 + pax_force_retaddr
21560 ret
21561 .Lcopy_page_end:
21562 CFI_ENDPROC
21563 @@ -105,7 +107,7 @@ ENDPROC(copy_page)
21564
21565 #include <asm/cpufeature.h>
21566
21567 - .section .altinstr_replacement,"ax"
21568 + .section .altinstr_replacement,"a"
21569 1: .byte 0xeb /* jmp <disp8> */
21570 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
21571 2:
21572 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
21573 index 0248402..821c786 100644
21574 --- a/arch/x86/lib/copy_user_64.S
21575 +++ b/arch/x86/lib/copy_user_64.S
21576 @@ -16,6 +16,7 @@
21577 #include <asm/thread_info.h>
21578 #include <asm/cpufeature.h>
21579 #include <asm/alternative-asm.h>
21580 +#include <asm/pgtable.h>
21581
21582 /*
21583 * By placing feature2 after feature1 in altinstructions section, we logically
21584 @@ -29,7 +30,7 @@
21585 .byte 0xe9 /* 32bit jump */
21586 .long \orig-1f /* by default jump to orig */
21587 1:
21588 - .section .altinstr_replacement,"ax"
21589 + .section .altinstr_replacement,"a"
21590 2: .byte 0xe9 /* near jump with 32bit immediate */
21591 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
21592 3: .byte 0xe9 /* near jump with 32bit immediate */
21593 @@ -71,47 +72,20 @@
21594 #endif
21595 .endm
21596
21597 -/* Standard copy_to_user with segment limit checking */
21598 -ENTRY(_copy_to_user)
21599 - CFI_STARTPROC
21600 - GET_THREAD_INFO(%rax)
21601 - movq %rdi,%rcx
21602 - addq %rdx,%rcx
21603 - jc bad_to_user
21604 - cmpq TI_addr_limit(%rax),%rcx
21605 - ja bad_to_user
21606 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
21607 - copy_user_generic_unrolled,copy_user_generic_string, \
21608 - copy_user_enhanced_fast_string
21609 - CFI_ENDPROC
21610 -ENDPROC(_copy_to_user)
21611 -
21612 -/* Standard copy_from_user with segment limit checking */
21613 -ENTRY(_copy_from_user)
21614 - CFI_STARTPROC
21615 - GET_THREAD_INFO(%rax)
21616 - movq %rsi,%rcx
21617 - addq %rdx,%rcx
21618 - jc bad_from_user
21619 - cmpq TI_addr_limit(%rax),%rcx
21620 - ja bad_from_user
21621 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
21622 - copy_user_generic_unrolled,copy_user_generic_string, \
21623 - copy_user_enhanced_fast_string
21624 - CFI_ENDPROC
21625 -ENDPROC(_copy_from_user)
21626 -
21627 .section .fixup,"ax"
21628 /* must zero dest */
21629 ENTRY(bad_from_user)
21630 bad_from_user:
21631 CFI_STARTPROC
21632 + testl %edx,%edx
21633 + js bad_to_user
21634 movl %edx,%ecx
21635 xorl %eax,%eax
21636 rep
21637 stosb
21638 bad_to_user:
21639 movl %edx,%eax
21640 + pax_force_retaddr
21641 ret
21642 CFI_ENDPROC
21643 ENDPROC(bad_from_user)
21644 @@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
21645 jz 17f
21646 1: movq (%rsi),%r8
21647 2: movq 1*8(%rsi),%r9
21648 -3: movq 2*8(%rsi),%r10
21649 +3: movq 2*8(%rsi),%rax
21650 4: movq 3*8(%rsi),%r11
21651 5: movq %r8,(%rdi)
21652 6: movq %r9,1*8(%rdi)
21653 -7: movq %r10,2*8(%rdi)
21654 +7: movq %rax,2*8(%rdi)
21655 8: movq %r11,3*8(%rdi)
21656 9: movq 4*8(%rsi),%r8
21657 10: movq 5*8(%rsi),%r9
21658 -11: movq 6*8(%rsi),%r10
21659 +11: movq 6*8(%rsi),%rax
21660 12: movq 7*8(%rsi),%r11
21661 13: movq %r8,4*8(%rdi)
21662 14: movq %r9,5*8(%rdi)
21663 -15: movq %r10,6*8(%rdi)
21664 +15: movq %rax,6*8(%rdi)
21665 16: movq %r11,7*8(%rdi)
21666 leaq 64(%rsi),%rsi
21667 leaq 64(%rdi),%rdi
21668 @@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
21669 decl %ecx
21670 jnz 21b
21671 23: xor %eax,%eax
21672 + pax_force_retaddr
21673 ret
21674
21675 .section .fixup,"ax"
21676 @@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
21677 3: rep
21678 movsb
21679 4: xorl %eax,%eax
21680 + pax_force_retaddr
21681 ret
21682
21683 .section .fixup,"ax"
21684 @@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
21685 1: rep
21686 movsb
21687 2: xorl %eax,%eax
21688 + pax_force_retaddr
21689 ret
21690
21691 .section .fixup,"ax"
21692 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
21693 index cb0c112..e3a6895 100644
21694 --- a/arch/x86/lib/copy_user_nocache_64.S
21695 +++ b/arch/x86/lib/copy_user_nocache_64.S
21696 @@ -8,12 +8,14 @@
21697
21698 #include <linux/linkage.h>
21699 #include <asm/dwarf2.h>
21700 +#include <asm/alternative-asm.h>
21701
21702 #define FIX_ALIGNMENT 1
21703
21704 #include <asm/current.h>
21705 #include <asm/asm-offsets.h>
21706 #include <asm/thread_info.h>
21707 +#include <asm/pgtable.h>
21708
21709 .macro ALIGN_DESTINATION
21710 #ifdef FIX_ALIGNMENT
21711 @@ -50,6 +52,15 @@
21712 */
21713 ENTRY(__copy_user_nocache)
21714 CFI_STARTPROC
21715 +
21716 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21717 + mov $PAX_USER_SHADOW_BASE,%rcx
21718 + cmp %rcx,%rsi
21719 + jae 1f
21720 + add %rcx,%rsi
21721 +1:
21722 +#endif
21723 +
21724 cmpl $8,%edx
21725 jb 20f /* less then 8 bytes, go to byte copy loop */
21726 ALIGN_DESTINATION
21727 @@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
21728 jz 17f
21729 1: movq (%rsi),%r8
21730 2: movq 1*8(%rsi),%r9
21731 -3: movq 2*8(%rsi),%r10
21732 +3: movq 2*8(%rsi),%rax
21733 4: movq 3*8(%rsi),%r11
21734 5: movnti %r8,(%rdi)
21735 6: movnti %r9,1*8(%rdi)
21736 -7: movnti %r10,2*8(%rdi)
21737 +7: movnti %rax,2*8(%rdi)
21738 8: movnti %r11,3*8(%rdi)
21739 9: movq 4*8(%rsi),%r8
21740 10: movq 5*8(%rsi),%r9
21741 -11: movq 6*8(%rsi),%r10
21742 +11: movq 6*8(%rsi),%rax
21743 12: movq 7*8(%rsi),%r11
21744 13: movnti %r8,4*8(%rdi)
21745 14: movnti %r9,5*8(%rdi)
21746 -15: movnti %r10,6*8(%rdi)
21747 +15: movnti %rax,6*8(%rdi)
21748 16: movnti %r11,7*8(%rdi)
21749 leaq 64(%rsi),%rsi
21750 leaq 64(%rdi),%rdi
21751 @@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
21752 jnz 21b
21753 23: xorl %eax,%eax
21754 sfence
21755 + pax_force_retaddr
21756 ret
21757
21758 .section .fixup,"ax"
21759 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
21760 index fb903b7..c92b7f7 100644
21761 --- a/arch/x86/lib/csum-copy_64.S
21762 +++ b/arch/x86/lib/csum-copy_64.S
21763 @@ -8,6 +8,7 @@
21764 #include <linux/linkage.h>
21765 #include <asm/dwarf2.h>
21766 #include <asm/errno.h>
21767 +#include <asm/alternative-asm.h>
21768
21769 /*
21770 * Checksum copy with exception handling.
21771 @@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
21772 CFI_RESTORE rbp
21773 addq $7*8, %rsp
21774 CFI_ADJUST_CFA_OFFSET -7*8
21775 + pax_force_retaddr 0, 1
21776 ret
21777 CFI_RESTORE_STATE
21778
21779 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
21780 index 459b58a..9570bc7 100644
21781 --- a/arch/x86/lib/csum-wrappers_64.c
21782 +++ b/arch/x86/lib/csum-wrappers_64.c
21783 @@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
21784 len -= 2;
21785 }
21786 }
21787 - isum = csum_partial_copy_generic((__force const void *)src,
21788 +
21789 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21790 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21791 + src += PAX_USER_SHADOW_BASE;
21792 +#endif
21793 +
21794 + isum = csum_partial_copy_generic((const void __force_kernel *)src,
21795 dst, len, isum, errp, NULL);
21796 if (unlikely(*errp))
21797 goto out_err;
21798 @@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
21799 }
21800
21801 *errp = 0;
21802 - return csum_partial_copy_generic(src, (void __force *)dst,
21803 +
21804 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21805 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
21806 + dst += PAX_USER_SHADOW_BASE;
21807 +#endif
21808 +
21809 + return csum_partial_copy_generic(src, (void __force_kernel *)dst,
21810 len, isum, NULL, errp);
21811 }
21812 EXPORT_SYMBOL(csum_partial_copy_to_user);
21813 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
21814 index 51f1504..ddac4c1 100644
21815 --- a/arch/x86/lib/getuser.S
21816 +++ b/arch/x86/lib/getuser.S
21817 @@ -33,15 +33,38 @@
21818 #include <asm/asm-offsets.h>
21819 #include <asm/thread_info.h>
21820 #include <asm/asm.h>
21821 +#include <asm/segment.h>
21822 +#include <asm/pgtable.h>
21823 +#include <asm/alternative-asm.h>
21824 +
21825 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21826 +#define __copyuser_seg gs;
21827 +#else
21828 +#define __copyuser_seg
21829 +#endif
21830
21831 .text
21832 ENTRY(__get_user_1)
21833 CFI_STARTPROC
21834 +
21835 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21836 GET_THREAD_INFO(%_ASM_DX)
21837 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21838 jae bad_get_user
21839 -1: movzb (%_ASM_AX),%edx
21840 +
21841 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21842 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21843 + cmp %_ASM_DX,%_ASM_AX
21844 + jae 1234f
21845 + add %_ASM_DX,%_ASM_AX
21846 +1234:
21847 +#endif
21848 +
21849 +#endif
21850 +
21851 +1: __copyuser_seg movzb (%_ASM_AX),%edx
21852 xor %eax,%eax
21853 + pax_force_retaddr
21854 ret
21855 CFI_ENDPROC
21856 ENDPROC(__get_user_1)
21857 @@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
21858 ENTRY(__get_user_2)
21859 CFI_STARTPROC
21860 add $1,%_ASM_AX
21861 +
21862 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21863 jc bad_get_user
21864 GET_THREAD_INFO(%_ASM_DX)
21865 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21866 jae bad_get_user
21867 -2: movzwl -1(%_ASM_AX),%edx
21868 +
21869 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21870 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21871 + cmp %_ASM_DX,%_ASM_AX
21872 + jae 1234f
21873 + add %_ASM_DX,%_ASM_AX
21874 +1234:
21875 +#endif
21876 +
21877 +#endif
21878 +
21879 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
21880 xor %eax,%eax
21881 + pax_force_retaddr
21882 ret
21883 CFI_ENDPROC
21884 ENDPROC(__get_user_2)
21885 @@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
21886 ENTRY(__get_user_4)
21887 CFI_STARTPROC
21888 add $3,%_ASM_AX
21889 +
21890 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21891 jc bad_get_user
21892 GET_THREAD_INFO(%_ASM_DX)
21893 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21894 jae bad_get_user
21895 -3: mov -3(%_ASM_AX),%edx
21896 +
21897 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21898 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21899 + cmp %_ASM_DX,%_ASM_AX
21900 + jae 1234f
21901 + add %_ASM_DX,%_ASM_AX
21902 +1234:
21903 +#endif
21904 +
21905 +#endif
21906 +
21907 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
21908 xor %eax,%eax
21909 + pax_force_retaddr
21910 ret
21911 CFI_ENDPROC
21912 ENDPROC(__get_user_4)
21913 @@ -80,8 +131,18 @@ ENTRY(__get_user_8)
21914 GET_THREAD_INFO(%_ASM_DX)
21915 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21916 jae bad_get_user
21917 +
21918 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21919 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21920 + cmp %_ASM_DX,%_ASM_AX
21921 + jae 1234f
21922 + add %_ASM_DX,%_ASM_AX
21923 +1234:
21924 +#endif
21925 +
21926 4: movq -7(%_ASM_AX),%_ASM_DX
21927 xor %eax,%eax
21928 + pax_force_retaddr
21929 ret
21930 CFI_ENDPROC
21931 ENDPROC(__get_user_8)
21932 @@ -91,6 +152,7 @@ bad_get_user:
21933 CFI_STARTPROC
21934 xor %edx,%edx
21935 mov $(-EFAULT),%_ASM_AX
21936 + pax_force_retaddr
21937 ret
21938 CFI_ENDPROC
21939 END(bad_get_user)
21940 diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
21941 index 5a1f9f3..ba9f577 100644
21942 --- a/arch/x86/lib/insn.c
21943 +++ b/arch/x86/lib/insn.c
21944 @@ -21,6 +21,11 @@
21945 #include <linux/string.h>
21946 #include <asm/inat.h>
21947 #include <asm/insn.h>
21948 +#ifdef __KERNEL__
21949 +#include <asm/pgtable_types.h>
21950 +#else
21951 +#define ktla_ktva(addr) addr
21952 +#endif
21953
21954 /* Verify next sizeof(t) bytes can be on the same instruction */
21955 #define validate_next(t, insn, n) \
21956 @@ -49,8 +54,8 @@
21957 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
21958 {
21959 memset(insn, 0, sizeof(*insn));
21960 - insn->kaddr = kaddr;
21961 - insn->next_byte = kaddr;
21962 + insn->kaddr = ktla_ktva(kaddr);
21963 + insn->next_byte = ktla_ktva(kaddr);
21964 insn->x86_64 = x86_64 ? 1 : 0;
21965 insn->opnd_bytes = 4;
21966 if (x86_64)
21967 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
21968 index 05a95e7..326f2fa 100644
21969 --- a/arch/x86/lib/iomap_copy_64.S
21970 +++ b/arch/x86/lib/iomap_copy_64.S
21971 @@ -17,6 +17,7 @@
21972
21973 #include <linux/linkage.h>
21974 #include <asm/dwarf2.h>
21975 +#include <asm/alternative-asm.h>
21976
21977 /*
21978 * override generic version in lib/iomap_copy.c
21979 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
21980 CFI_STARTPROC
21981 movl %edx,%ecx
21982 rep movsd
21983 + pax_force_retaddr
21984 ret
21985 CFI_ENDPROC
21986 ENDPROC(__iowrite32_copy)
21987 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
21988 index efbf2a0..8893637 100644
21989 --- a/arch/x86/lib/memcpy_64.S
21990 +++ b/arch/x86/lib/memcpy_64.S
21991 @@ -34,6 +34,7 @@
21992 rep movsq
21993 movl %edx, %ecx
21994 rep movsb
21995 + pax_force_retaddr
21996 ret
21997 .Lmemcpy_e:
21998 .previous
21999 @@ -51,6 +52,7 @@
22000
22001 movl %edx, %ecx
22002 rep movsb
22003 + pax_force_retaddr
22004 ret
22005 .Lmemcpy_e_e:
22006 .previous
22007 @@ -81,13 +83,13 @@ ENTRY(memcpy)
22008 */
22009 movq 0*8(%rsi), %r8
22010 movq 1*8(%rsi), %r9
22011 - movq 2*8(%rsi), %r10
22012 + movq 2*8(%rsi), %rcx
22013 movq 3*8(%rsi), %r11
22014 leaq 4*8(%rsi), %rsi
22015
22016 movq %r8, 0*8(%rdi)
22017 movq %r9, 1*8(%rdi)
22018 - movq %r10, 2*8(%rdi)
22019 + movq %rcx, 2*8(%rdi)
22020 movq %r11, 3*8(%rdi)
22021 leaq 4*8(%rdi), %rdi
22022 jae .Lcopy_forward_loop
22023 @@ -110,12 +112,12 @@ ENTRY(memcpy)
22024 subq $0x20, %rdx
22025 movq -1*8(%rsi), %r8
22026 movq -2*8(%rsi), %r9
22027 - movq -3*8(%rsi), %r10
22028 + movq -3*8(%rsi), %rcx
22029 movq -4*8(%rsi), %r11
22030 leaq -4*8(%rsi), %rsi
22031 movq %r8, -1*8(%rdi)
22032 movq %r9, -2*8(%rdi)
22033 - movq %r10, -3*8(%rdi)
22034 + movq %rcx, -3*8(%rdi)
22035 movq %r11, -4*8(%rdi)
22036 leaq -4*8(%rdi), %rdi
22037 jae .Lcopy_backward_loop
22038 @@ -135,12 +137,13 @@ ENTRY(memcpy)
22039 */
22040 movq 0*8(%rsi), %r8
22041 movq 1*8(%rsi), %r9
22042 - movq -2*8(%rsi, %rdx), %r10
22043 + movq -2*8(%rsi, %rdx), %rcx
22044 movq -1*8(%rsi, %rdx), %r11
22045 movq %r8, 0*8(%rdi)
22046 movq %r9, 1*8(%rdi)
22047 - movq %r10, -2*8(%rdi, %rdx)
22048 + movq %rcx, -2*8(%rdi, %rdx)
22049 movq %r11, -1*8(%rdi, %rdx)
22050 + pax_force_retaddr
22051 retq
22052 .p2align 4
22053 .Lless_16bytes:
22054 @@ -153,6 +156,7 @@ ENTRY(memcpy)
22055 movq -1*8(%rsi, %rdx), %r9
22056 movq %r8, 0*8(%rdi)
22057 movq %r9, -1*8(%rdi, %rdx)
22058 + pax_force_retaddr
22059 retq
22060 .p2align 4
22061 .Lless_8bytes:
22062 @@ -166,6 +170,7 @@ ENTRY(memcpy)
22063 movl -4(%rsi, %rdx), %r8d
22064 movl %ecx, (%rdi)
22065 movl %r8d, -4(%rdi, %rdx)
22066 + pax_force_retaddr
22067 retq
22068 .p2align 4
22069 .Lless_3bytes:
22070 @@ -183,6 +188,7 @@ ENTRY(memcpy)
22071 jnz .Lloop_1
22072
22073 .Lend:
22074 + pax_force_retaddr
22075 retq
22076 CFI_ENDPROC
22077 ENDPROC(memcpy)
22078 diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
22079 index ee16461..c39c199 100644
22080 --- a/arch/x86/lib/memmove_64.S
22081 +++ b/arch/x86/lib/memmove_64.S
22082 @@ -61,13 +61,13 @@ ENTRY(memmove)
22083 5:
22084 sub $0x20, %rdx
22085 movq 0*8(%rsi), %r11
22086 - movq 1*8(%rsi), %r10
22087 + movq 1*8(%rsi), %rcx
22088 movq 2*8(%rsi), %r9
22089 movq 3*8(%rsi), %r8
22090 leaq 4*8(%rsi), %rsi
22091
22092 movq %r11, 0*8(%rdi)
22093 - movq %r10, 1*8(%rdi)
22094 + movq %rcx, 1*8(%rdi)
22095 movq %r9, 2*8(%rdi)
22096 movq %r8, 3*8(%rdi)
22097 leaq 4*8(%rdi), %rdi
22098 @@ -81,10 +81,10 @@ ENTRY(memmove)
22099 4:
22100 movq %rdx, %rcx
22101 movq -8(%rsi, %rdx), %r11
22102 - lea -8(%rdi, %rdx), %r10
22103 + lea -8(%rdi, %rdx), %r9
22104 shrq $3, %rcx
22105 rep movsq
22106 - movq %r11, (%r10)
22107 + movq %r11, (%r9)
22108 jmp 13f
22109 .Lmemmove_end_forward:
22110
22111 @@ -95,14 +95,14 @@ ENTRY(memmove)
22112 7:
22113 movq %rdx, %rcx
22114 movq (%rsi), %r11
22115 - movq %rdi, %r10
22116 + movq %rdi, %r9
22117 leaq -8(%rsi, %rdx), %rsi
22118 leaq -8(%rdi, %rdx), %rdi
22119 shrq $3, %rcx
22120 std
22121 rep movsq
22122 cld
22123 - movq %r11, (%r10)
22124 + movq %r11, (%r9)
22125 jmp 13f
22126
22127 /*
22128 @@ -127,13 +127,13 @@ ENTRY(memmove)
22129 8:
22130 subq $0x20, %rdx
22131 movq -1*8(%rsi), %r11
22132 - movq -2*8(%rsi), %r10
22133 + movq -2*8(%rsi), %rcx
22134 movq -3*8(%rsi), %r9
22135 movq -4*8(%rsi), %r8
22136 leaq -4*8(%rsi), %rsi
22137
22138 movq %r11, -1*8(%rdi)
22139 - movq %r10, -2*8(%rdi)
22140 + movq %rcx, -2*8(%rdi)
22141 movq %r9, -3*8(%rdi)
22142 movq %r8, -4*8(%rdi)
22143 leaq -4*8(%rdi), %rdi
22144 @@ -151,11 +151,11 @@ ENTRY(memmove)
22145 * Move data from 16 bytes to 31 bytes.
22146 */
22147 movq 0*8(%rsi), %r11
22148 - movq 1*8(%rsi), %r10
22149 + movq 1*8(%rsi), %rcx
22150 movq -2*8(%rsi, %rdx), %r9
22151 movq -1*8(%rsi, %rdx), %r8
22152 movq %r11, 0*8(%rdi)
22153 - movq %r10, 1*8(%rdi)
22154 + movq %rcx, 1*8(%rdi)
22155 movq %r9, -2*8(%rdi, %rdx)
22156 movq %r8, -1*8(%rdi, %rdx)
22157 jmp 13f
22158 @@ -167,9 +167,9 @@ ENTRY(memmove)
22159 * Move data from 8 bytes to 15 bytes.
22160 */
22161 movq 0*8(%rsi), %r11
22162 - movq -1*8(%rsi, %rdx), %r10
22163 + movq -1*8(%rsi, %rdx), %r9
22164 movq %r11, 0*8(%rdi)
22165 - movq %r10, -1*8(%rdi, %rdx)
22166 + movq %r9, -1*8(%rdi, %rdx)
22167 jmp 13f
22168 10:
22169 cmpq $4, %rdx
22170 @@ -178,9 +178,9 @@ ENTRY(memmove)
22171 * Move data from 4 bytes to 7 bytes.
22172 */
22173 movl (%rsi), %r11d
22174 - movl -4(%rsi, %rdx), %r10d
22175 + movl -4(%rsi, %rdx), %r9d
22176 movl %r11d, (%rdi)
22177 - movl %r10d, -4(%rdi, %rdx)
22178 + movl %r9d, -4(%rdi, %rdx)
22179 jmp 13f
22180 11:
22181 cmp $2, %rdx
22182 @@ -189,9 +189,9 @@ ENTRY(memmove)
22183 * Move data from 2 bytes to 3 bytes.
22184 */
22185 movw (%rsi), %r11w
22186 - movw -2(%rsi, %rdx), %r10w
22187 + movw -2(%rsi, %rdx), %r9w
22188 movw %r11w, (%rdi)
22189 - movw %r10w, -2(%rdi, %rdx)
22190 + movw %r9w, -2(%rdi, %rdx)
22191 jmp 13f
22192 12:
22193 cmp $1, %rdx
22194 @@ -202,6 +202,7 @@ ENTRY(memmove)
22195 movb (%rsi), %r11b
22196 movb %r11b, (%rdi)
22197 13:
22198 + pax_force_retaddr
22199 retq
22200 CFI_ENDPROC
22201
22202 @@ -210,6 +211,7 @@ ENTRY(memmove)
22203 /* Forward moving data. */
22204 movq %rdx, %rcx
22205 rep movsb
22206 + pax_force_retaddr
22207 retq
22208 .Lmemmove_end_forward_efs:
22209 .previous
22210 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
22211 index 79bd454..dff325a 100644
22212 --- a/arch/x86/lib/memset_64.S
22213 +++ b/arch/x86/lib/memset_64.S
22214 @@ -31,6 +31,7 @@
22215 movl %r8d,%ecx
22216 rep stosb
22217 movq %r9,%rax
22218 + pax_force_retaddr
22219 ret
22220 .Lmemset_e:
22221 .previous
22222 @@ -53,6 +54,7 @@
22223 movl %edx,%ecx
22224 rep stosb
22225 movq %r9,%rax
22226 + pax_force_retaddr
22227 ret
22228 .Lmemset_e_e:
22229 .previous
22230 @@ -60,13 +62,13 @@
22231 ENTRY(memset)
22232 ENTRY(__memset)
22233 CFI_STARTPROC
22234 - movq %rdi,%r10
22235 movq %rdx,%r11
22236
22237 /* expand byte value */
22238 movzbl %sil,%ecx
22239 movabs $0x0101010101010101,%rax
22240 mul %rcx /* with rax, clobbers rdx */
22241 + movq %rdi,%rdx
22242
22243 /* align dst */
22244 movl %edi,%r9d
22245 @@ -120,7 +122,8 @@ ENTRY(__memset)
22246 jnz .Lloop_1
22247
22248 .Lende:
22249 - movq %r10,%rax
22250 + movq %rdx,%rax
22251 + pax_force_retaddr
22252 ret
22253
22254 CFI_RESTORE_STATE
22255 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
22256 index c9f2d9b..e7fd2c0 100644
22257 --- a/arch/x86/lib/mmx_32.c
22258 +++ b/arch/x86/lib/mmx_32.c
22259 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22260 {
22261 void *p;
22262 int i;
22263 + unsigned long cr0;
22264
22265 if (unlikely(in_interrupt()))
22266 return __memcpy(to, from, len);
22267 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22268 kernel_fpu_begin();
22269
22270 __asm__ __volatile__ (
22271 - "1: prefetch (%0)\n" /* This set is 28 bytes */
22272 - " prefetch 64(%0)\n"
22273 - " prefetch 128(%0)\n"
22274 - " prefetch 192(%0)\n"
22275 - " prefetch 256(%0)\n"
22276 + "1: prefetch (%1)\n" /* This set is 28 bytes */
22277 + " prefetch 64(%1)\n"
22278 + " prefetch 128(%1)\n"
22279 + " prefetch 192(%1)\n"
22280 + " prefetch 256(%1)\n"
22281 "2: \n"
22282 ".section .fixup, \"ax\"\n"
22283 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22284 + "3: \n"
22285 +
22286 +#ifdef CONFIG_PAX_KERNEXEC
22287 + " movl %%cr0, %0\n"
22288 + " movl %0, %%eax\n"
22289 + " andl $0xFFFEFFFF, %%eax\n"
22290 + " movl %%eax, %%cr0\n"
22291 +#endif
22292 +
22293 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22294 +
22295 +#ifdef CONFIG_PAX_KERNEXEC
22296 + " movl %0, %%cr0\n"
22297 +#endif
22298 +
22299 " jmp 2b\n"
22300 ".previous\n"
22301 _ASM_EXTABLE(1b, 3b)
22302 - : : "r" (from));
22303 + : "=&r" (cr0) : "r" (from) : "ax");
22304
22305 for ( ; i > 5; i--) {
22306 __asm__ __volatile__ (
22307 - "1: prefetch 320(%0)\n"
22308 - "2: movq (%0), %%mm0\n"
22309 - " movq 8(%0), %%mm1\n"
22310 - " movq 16(%0), %%mm2\n"
22311 - " movq 24(%0), %%mm3\n"
22312 - " movq %%mm0, (%1)\n"
22313 - " movq %%mm1, 8(%1)\n"
22314 - " movq %%mm2, 16(%1)\n"
22315 - " movq %%mm3, 24(%1)\n"
22316 - " movq 32(%0), %%mm0\n"
22317 - " movq 40(%0), %%mm1\n"
22318 - " movq 48(%0), %%mm2\n"
22319 - " movq 56(%0), %%mm3\n"
22320 - " movq %%mm0, 32(%1)\n"
22321 - " movq %%mm1, 40(%1)\n"
22322 - " movq %%mm2, 48(%1)\n"
22323 - " movq %%mm3, 56(%1)\n"
22324 + "1: prefetch 320(%1)\n"
22325 + "2: movq (%1), %%mm0\n"
22326 + " movq 8(%1), %%mm1\n"
22327 + " movq 16(%1), %%mm2\n"
22328 + " movq 24(%1), %%mm3\n"
22329 + " movq %%mm0, (%2)\n"
22330 + " movq %%mm1, 8(%2)\n"
22331 + " movq %%mm2, 16(%2)\n"
22332 + " movq %%mm3, 24(%2)\n"
22333 + " movq 32(%1), %%mm0\n"
22334 + " movq 40(%1), %%mm1\n"
22335 + " movq 48(%1), %%mm2\n"
22336 + " movq 56(%1), %%mm3\n"
22337 + " movq %%mm0, 32(%2)\n"
22338 + " movq %%mm1, 40(%2)\n"
22339 + " movq %%mm2, 48(%2)\n"
22340 + " movq %%mm3, 56(%2)\n"
22341 ".section .fixup, \"ax\"\n"
22342 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22343 + "3:\n"
22344 +
22345 +#ifdef CONFIG_PAX_KERNEXEC
22346 + " movl %%cr0, %0\n"
22347 + " movl %0, %%eax\n"
22348 + " andl $0xFFFEFFFF, %%eax\n"
22349 + " movl %%eax, %%cr0\n"
22350 +#endif
22351 +
22352 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22353 +
22354 +#ifdef CONFIG_PAX_KERNEXEC
22355 + " movl %0, %%cr0\n"
22356 +#endif
22357 +
22358 " jmp 2b\n"
22359 ".previous\n"
22360 _ASM_EXTABLE(1b, 3b)
22361 - : : "r" (from), "r" (to) : "memory");
22362 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22363
22364 from += 64;
22365 to += 64;
22366 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
22367 static void fast_copy_page(void *to, void *from)
22368 {
22369 int i;
22370 + unsigned long cr0;
22371
22372 kernel_fpu_begin();
22373
22374 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
22375 * but that is for later. -AV
22376 */
22377 __asm__ __volatile__(
22378 - "1: prefetch (%0)\n"
22379 - " prefetch 64(%0)\n"
22380 - " prefetch 128(%0)\n"
22381 - " prefetch 192(%0)\n"
22382 - " prefetch 256(%0)\n"
22383 + "1: prefetch (%1)\n"
22384 + " prefetch 64(%1)\n"
22385 + " prefetch 128(%1)\n"
22386 + " prefetch 192(%1)\n"
22387 + " prefetch 256(%1)\n"
22388 "2: \n"
22389 ".section .fixup, \"ax\"\n"
22390 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22391 + "3: \n"
22392 +
22393 +#ifdef CONFIG_PAX_KERNEXEC
22394 + " movl %%cr0, %0\n"
22395 + " movl %0, %%eax\n"
22396 + " andl $0xFFFEFFFF, %%eax\n"
22397 + " movl %%eax, %%cr0\n"
22398 +#endif
22399 +
22400 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22401 +
22402 +#ifdef CONFIG_PAX_KERNEXEC
22403 + " movl %0, %%cr0\n"
22404 +#endif
22405 +
22406 " jmp 2b\n"
22407 ".previous\n"
22408 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
22409 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22410
22411 for (i = 0; i < (4096-320)/64; i++) {
22412 __asm__ __volatile__ (
22413 - "1: prefetch 320(%0)\n"
22414 - "2: movq (%0), %%mm0\n"
22415 - " movntq %%mm0, (%1)\n"
22416 - " movq 8(%0), %%mm1\n"
22417 - " movntq %%mm1, 8(%1)\n"
22418 - " movq 16(%0), %%mm2\n"
22419 - " movntq %%mm2, 16(%1)\n"
22420 - " movq 24(%0), %%mm3\n"
22421 - " movntq %%mm3, 24(%1)\n"
22422 - " movq 32(%0), %%mm4\n"
22423 - " movntq %%mm4, 32(%1)\n"
22424 - " movq 40(%0), %%mm5\n"
22425 - " movntq %%mm5, 40(%1)\n"
22426 - " movq 48(%0), %%mm6\n"
22427 - " movntq %%mm6, 48(%1)\n"
22428 - " movq 56(%0), %%mm7\n"
22429 - " movntq %%mm7, 56(%1)\n"
22430 + "1: prefetch 320(%1)\n"
22431 + "2: movq (%1), %%mm0\n"
22432 + " movntq %%mm0, (%2)\n"
22433 + " movq 8(%1), %%mm1\n"
22434 + " movntq %%mm1, 8(%2)\n"
22435 + " movq 16(%1), %%mm2\n"
22436 + " movntq %%mm2, 16(%2)\n"
22437 + " movq 24(%1), %%mm3\n"
22438 + " movntq %%mm3, 24(%2)\n"
22439 + " movq 32(%1), %%mm4\n"
22440 + " movntq %%mm4, 32(%2)\n"
22441 + " movq 40(%1), %%mm5\n"
22442 + " movntq %%mm5, 40(%2)\n"
22443 + " movq 48(%1), %%mm6\n"
22444 + " movntq %%mm6, 48(%2)\n"
22445 + " movq 56(%1), %%mm7\n"
22446 + " movntq %%mm7, 56(%2)\n"
22447 ".section .fixup, \"ax\"\n"
22448 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22449 + "3:\n"
22450 +
22451 +#ifdef CONFIG_PAX_KERNEXEC
22452 + " movl %%cr0, %0\n"
22453 + " movl %0, %%eax\n"
22454 + " andl $0xFFFEFFFF, %%eax\n"
22455 + " movl %%eax, %%cr0\n"
22456 +#endif
22457 +
22458 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22459 +
22460 +#ifdef CONFIG_PAX_KERNEXEC
22461 + " movl %0, %%cr0\n"
22462 +#endif
22463 +
22464 " jmp 2b\n"
22465 ".previous\n"
22466 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
22467 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22468
22469 from += 64;
22470 to += 64;
22471 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
22472 static void fast_copy_page(void *to, void *from)
22473 {
22474 int i;
22475 + unsigned long cr0;
22476
22477 kernel_fpu_begin();
22478
22479 __asm__ __volatile__ (
22480 - "1: prefetch (%0)\n"
22481 - " prefetch 64(%0)\n"
22482 - " prefetch 128(%0)\n"
22483 - " prefetch 192(%0)\n"
22484 - " prefetch 256(%0)\n"
22485 + "1: prefetch (%1)\n"
22486 + " prefetch 64(%1)\n"
22487 + " prefetch 128(%1)\n"
22488 + " prefetch 192(%1)\n"
22489 + " prefetch 256(%1)\n"
22490 "2: \n"
22491 ".section .fixup, \"ax\"\n"
22492 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22493 + "3: \n"
22494 +
22495 +#ifdef CONFIG_PAX_KERNEXEC
22496 + " movl %%cr0, %0\n"
22497 + " movl %0, %%eax\n"
22498 + " andl $0xFFFEFFFF, %%eax\n"
22499 + " movl %%eax, %%cr0\n"
22500 +#endif
22501 +
22502 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22503 +
22504 +#ifdef CONFIG_PAX_KERNEXEC
22505 + " movl %0, %%cr0\n"
22506 +#endif
22507 +
22508 " jmp 2b\n"
22509 ".previous\n"
22510 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
22511 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22512
22513 for (i = 0; i < 4096/64; i++) {
22514 __asm__ __volatile__ (
22515 - "1: prefetch 320(%0)\n"
22516 - "2: movq (%0), %%mm0\n"
22517 - " movq 8(%0), %%mm1\n"
22518 - " movq 16(%0), %%mm2\n"
22519 - " movq 24(%0), %%mm3\n"
22520 - " movq %%mm0, (%1)\n"
22521 - " movq %%mm1, 8(%1)\n"
22522 - " movq %%mm2, 16(%1)\n"
22523 - " movq %%mm3, 24(%1)\n"
22524 - " movq 32(%0), %%mm0\n"
22525 - " movq 40(%0), %%mm1\n"
22526 - " movq 48(%0), %%mm2\n"
22527 - " movq 56(%0), %%mm3\n"
22528 - " movq %%mm0, 32(%1)\n"
22529 - " movq %%mm1, 40(%1)\n"
22530 - " movq %%mm2, 48(%1)\n"
22531 - " movq %%mm3, 56(%1)\n"
22532 + "1: prefetch 320(%1)\n"
22533 + "2: movq (%1), %%mm0\n"
22534 + " movq 8(%1), %%mm1\n"
22535 + " movq 16(%1), %%mm2\n"
22536 + " movq 24(%1), %%mm3\n"
22537 + " movq %%mm0, (%2)\n"
22538 + " movq %%mm1, 8(%2)\n"
22539 + " movq %%mm2, 16(%2)\n"
22540 + " movq %%mm3, 24(%2)\n"
22541 + " movq 32(%1), %%mm0\n"
22542 + " movq 40(%1), %%mm1\n"
22543 + " movq 48(%1), %%mm2\n"
22544 + " movq 56(%1), %%mm3\n"
22545 + " movq %%mm0, 32(%2)\n"
22546 + " movq %%mm1, 40(%2)\n"
22547 + " movq %%mm2, 48(%2)\n"
22548 + " movq %%mm3, 56(%2)\n"
22549 ".section .fixup, \"ax\"\n"
22550 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22551 + "3:\n"
22552 +
22553 +#ifdef CONFIG_PAX_KERNEXEC
22554 + " movl %%cr0, %0\n"
22555 + " movl %0, %%eax\n"
22556 + " andl $0xFFFEFFFF, %%eax\n"
22557 + " movl %%eax, %%cr0\n"
22558 +#endif
22559 +
22560 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22561 +
22562 +#ifdef CONFIG_PAX_KERNEXEC
22563 + " movl %0, %%cr0\n"
22564 +#endif
22565 +
22566 " jmp 2b\n"
22567 ".previous\n"
22568 _ASM_EXTABLE(1b, 3b)
22569 - : : "r" (from), "r" (to) : "memory");
22570 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22571
22572 from += 64;
22573 to += 64;
22574 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
22575 index 69fa106..adda88b 100644
22576 --- a/arch/x86/lib/msr-reg.S
22577 +++ b/arch/x86/lib/msr-reg.S
22578 @@ -3,6 +3,7 @@
22579 #include <asm/dwarf2.h>
22580 #include <asm/asm.h>
22581 #include <asm/msr.h>
22582 +#include <asm/alternative-asm.h>
22583
22584 #ifdef CONFIG_X86_64
22585 /*
22586 @@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
22587 CFI_STARTPROC
22588 pushq_cfi %rbx
22589 pushq_cfi %rbp
22590 - movq %rdi, %r10 /* Save pointer */
22591 + movq %rdi, %r9 /* Save pointer */
22592 xorl %r11d, %r11d /* Return value */
22593 movl (%rdi), %eax
22594 movl 4(%rdi), %ecx
22595 @@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
22596 movl 28(%rdi), %edi
22597 CFI_REMEMBER_STATE
22598 1: \op
22599 -2: movl %eax, (%r10)
22600 +2: movl %eax, (%r9)
22601 movl %r11d, %eax /* Return value */
22602 - movl %ecx, 4(%r10)
22603 - movl %edx, 8(%r10)
22604 - movl %ebx, 12(%r10)
22605 - movl %ebp, 20(%r10)
22606 - movl %esi, 24(%r10)
22607 - movl %edi, 28(%r10)
22608 + movl %ecx, 4(%r9)
22609 + movl %edx, 8(%r9)
22610 + movl %ebx, 12(%r9)
22611 + movl %ebp, 20(%r9)
22612 + movl %esi, 24(%r9)
22613 + movl %edi, 28(%r9)
22614 popq_cfi %rbp
22615 popq_cfi %rbx
22616 + pax_force_retaddr
22617 ret
22618 3:
22619 CFI_RESTORE_STATE
22620 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
22621 index 36b0d15..d381858 100644
22622 --- a/arch/x86/lib/putuser.S
22623 +++ b/arch/x86/lib/putuser.S
22624 @@ -15,7 +15,9 @@
22625 #include <asm/thread_info.h>
22626 #include <asm/errno.h>
22627 #include <asm/asm.h>
22628 -
22629 +#include <asm/segment.h>
22630 +#include <asm/pgtable.h>
22631 +#include <asm/alternative-asm.h>
22632
22633 /*
22634 * __put_user_X
22635 @@ -29,52 +31,119 @@
22636 * as they get called from within inline assembly.
22637 */
22638
22639 -#define ENTER CFI_STARTPROC ; \
22640 - GET_THREAD_INFO(%_ASM_BX)
22641 -#define EXIT ret ; \
22642 +#define ENTER CFI_STARTPROC
22643 +#define EXIT pax_force_retaddr; ret ; \
22644 CFI_ENDPROC
22645
22646 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22647 +#define _DEST %_ASM_CX,%_ASM_BX
22648 +#else
22649 +#define _DEST %_ASM_CX
22650 +#endif
22651 +
22652 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22653 +#define __copyuser_seg gs;
22654 +#else
22655 +#define __copyuser_seg
22656 +#endif
22657 +
22658 .text
22659 ENTRY(__put_user_1)
22660 ENTER
22661 +
22662 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22663 + GET_THREAD_INFO(%_ASM_BX)
22664 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
22665 jae bad_put_user
22666 -1: movb %al,(%_ASM_CX)
22667 +
22668 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22669 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22670 + cmp %_ASM_BX,%_ASM_CX
22671 + jb 1234f
22672 + xor %ebx,%ebx
22673 +1234:
22674 +#endif
22675 +
22676 +#endif
22677 +
22678 +1: __copyuser_seg movb %al,(_DEST)
22679 xor %eax,%eax
22680 EXIT
22681 ENDPROC(__put_user_1)
22682
22683 ENTRY(__put_user_2)
22684 ENTER
22685 +
22686 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22687 + GET_THREAD_INFO(%_ASM_BX)
22688 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22689 sub $1,%_ASM_BX
22690 cmp %_ASM_BX,%_ASM_CX
22691 jae bad_put_user
22692 -2: movw %ax,(%_ASM_CX)
22693 +
22694 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22695 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22696 + cmp %_ASM_BX,%_ASM_CX
22697 + jb 1234f
22698 + xor %ebx,%ebx
22699 +1234:
22700 +#endif
22701 +
22702 +#endif
22703 +
22704 +2: __copyuser_seg movw %ax,(_DEST)
22705 xor %eax,%eax
22706 EXIT
22707 ENDPROC(__put_user_2)
22708
22709 ENTRY(__put_user_4)
22710 ENTER
22711 +
22712 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22713 + GET_THREAD_INFO(%_ASM_BX)
22714 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22715 sub $3,%_ASM_BX
22716 cmp %_ASM_BX,%_ASM_CX
22717 jae bad_put_user
22718 -3: movl %eax,(%_ASM_CX)
22719 +
22720 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22721 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22722 + cmp %_ASM_BX,%_ASM_CX
22723 + jb 1234f
22724 + xor %ebx,%ebx
22725 +1234:
22726 +#endif
22727 +
22728 +#endif
22729 +
22730 +3: __copyuser_seg movl %eax,(_DEST)
22731 xor %eax,%eax
22732 EXIT
22733 ENDPROC(__put_user_4)
22734
22735 ENTRY(__put_user_8)
22736 ENTER
22737 +
22738 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22739 + GET_THREAD_INFO(%_ASM_BX)
22740 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22741 sub $7,%_ASM_BX
22742 cmp %_ASM_BX,%_ASM_CX
22743 jae bad_put_user
22744 -4: mov %_ASM_AX,(%_ASM_CX)
22745 +
22746 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22747 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22748 + cmp %_ASM_BX,%_ASM_CX
22749 + jb 1234f
22750 + xor %ebx,%ebx
22751 +1234:
22752 +#endif
22753 +
22754 +#endif
22755 +
22756 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
22757 #ifdef CONFIG_X86_32
22758 -5: movl %edx,4(%_ASM_CX)
22759 +5: __copyuser_seg movl %edx,4(_DEST)
22760 #endif
22761 xor %eax,%eax
22762 EXIT
22763 diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
22764 index 1cad221..de671ee 100644
22765 --- a/arch/x86/lib/rwlock.S
22766 +++ b/arch/x86/lib/rwlock.S
22767 @@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
22768 FRAME
22769 0: LOCK_PREFIX
22770 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22771 +
22772 +#ifdef CONFIG_PAX_REFCOUNT
22773 + jno 1234f
22774 + LOCK_PREFIX
22775 + WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22776 + int $4
22777 +1234:
22778 + _ASM_EXTABLE(1234b, 1234b)
22779 +#endif
22780 +
22781 1: rep; nop
22782 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
22783 jne 1b
22784 LOCK_PREFIX
22785 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22786 +
22787 +#ifdef CONFIG_PAX_REFCOUNT
22788 + jno 1234f
22789 + LOCK_PREFIX
22790 + WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22791 + int $4
22792 +1234:
22793 + _ASM_EXTABLE(1234b, 1234b)
22794 +#endif
22795 +
22796 jnz 0b
22797 ENDFRAME
22798 + pax_force_retaddr
22799 ret
22800 CFI_ENDPROC
22801 END(__write_lock_failed)
22802 @@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
22803 FRAME
22804 0: LOCK_PREFIX
22805 READ_LOCK_SIZE(inc) (%__lock_ptr)
22806 +
22807 +#ifdef CONFIG_PAX_REFCOUNT
22808 + jno 1234f
22809 + LOCK_PREFIX
22810 + READ_LOCK_SIZE(dec) (%__lock_ptr)
22811 + int $4
22812 +1234:
22813 + _ASM_EXTABLE(1234b, 1234b)
22814 +#endif
22815 +
22816 1: rep; nop
22817 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
22818 js 1b
22819 LOCK_PREFIX
22820 READ_LOCK_SIZE(dec) (%__lock_ptr)
22821 +
22822 +#ifdef CONFIG_PAX_REFCOUNT
22823 + jno 1234f
22824 + LOCK_PREFIX
22825 + READ_LOCK_SIZE(inc) (%__lock_ptr)
22826 + int $4
22827 +1234:
22828 + _ASM_EXTABLE(1234b, 1234b)
22829 +#endif
22830 +
22831 js 0b
22832 ENDFRAME
22833 + pax_force_retaddr
22834 ret
22835 CFI_ENDPROC
22836 END(__read_lock_failed)
22837 diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
22838 index 5dff5f0..cadebf4 100644
22839 --- a/arch/x86/lib/rwsem.S
22840 +++ b/arch/x86/lib/rwsem.S
22841 @@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
22842 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22843 CFI_RESTORE __ASM_REG(dx)
22844 restore_common_regs
22845 + pax_force_retaddr
22846 ret
22847 CFI_ENDPROC
22848 ENDPROC(call_rwsem_down_read_failed)
22849 @@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
22850 movq %rax,%rdi
22851 call rwsem_down_write_failed
22852 restore_common_regs
22853 + pax_force_retaddr
22854 ret
22855 CFI_ENDPROC
22856 ENDPROC(call_rwsem_down_write_failed)
22857 @@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
22858 movq %rax,%rdi
22859 call rwsem_wake
22860 restore_common_regs
22861 -1: ret
22862 +1: pax_force_retaddr
22863 + ret
22864 CFI_ENDPROC
22865 ENDPROC(call_rwsem_wake)
22866
22867 @@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
22868 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22869 CFI_RESTORE __ASM_REG(dx)
22870 restore_common_regs
22871 + pax_force_retaddr
22872 ret
22873 CFI_ENDPROC
22874 ENDPROC(call_rwsem_downgrade_wake)
22875 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
22876 index a63efd6..ccecad8 100644
22877 --- a/arch/x86/lib/thunk_64.S
22878 +++ b/arch/x86/lib/thunk_64.S
22879 @@ -8,6 +8,7 @@
22880 #include <linux/linkage.h>
22881 #include <asm/dwarf2.h>
22882 #include <asm/calling.h>
22883 +#include <asm/alternative-asm.h>
22884
22885 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
22886 .macro THUNK name, func, put_ret_addr_in_rdi=0
22887 @@ -41,5 +42,6 @@
22888 SAVE_ARGS
22889 restore:
22890 RESTORE_ARGS
22891 + pax_force_retaddr
22892 ret
22893 CFI_ENDPROC
22894 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
22895 index e218d5d..a99a1eb 100644
22896 --- a/arch/x86/lib/usercopy_32.c
22897 +++ b/arch/x86/lib/usercopy_32.c
22898 @@ -43,7 +43,7 @@ do { \
22899 __asm__ __volatile__( \
22900 " testl %1,%1\n" \
22901 " jz 2f\n" \
22902 - "0: lodsb\n" \
22903 + "0: "__copyuser_seg"lodsb\n" \
22904 " stosb\n" \
22905 " testb %%al,%%al\n" \
22906 " jz 1f\n" \
22907 @@ -128,10 +128,12 @@ do { \
22908 int __d0; \
22909 might_fault(); \
22910 __asm__ __volatile__( \
22911 + __COPYUSER_SET_ES \
22912 "0: rep; stosl\n" \
22913 " movl %2,%0\n" \
22914 "1: rep; stosb\n" \
22915 "2:\n" \
22916 + __COPYUSER_RESTORE_ES \
22917 ".section .fixup,\"ax\"\n" \
22918 "3: lea 0(%2,%0,4),%0\n" \
22919 " jmp 2b\n" \
22920 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
22921 might_fault();
22922
22923 __asm__ __volatile__(
22924 + __COPYUSER_SET_ES
22925 " testl %0, %0\n"
22926 " jz 3f\n"
22927 " andl %0,%%ecx\n"
22928 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
22929 " subl %%ecx,%0\n"
22930 " addl %0,%%eax\n"
22931 "1:\n"
22932 + __COPYUSER_RESTORE_ES
22933 ".section .fixup,\"ax\"\n"
22934 "2: xorl %%eax,%%eax\n"
22935 " jmp 1b\n"
22936 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
22937
22938 #ifdef CONFIG_X86_INTEL_USERCOPY
22939 static unsigned long
22940 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
22941 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
22942 {
22943 int d0, d1;
22944 __asm__ __volatile__(
22945 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22946 " .align 2,0x90\n"
22947 "3: movl 0(%4), %%eax\n"
22948 "4: movl 4(%4), %%edx\n"
22949 - "5: movl %%eax, 0(%3)\n"
22950 - "6: movl %%edx, 4(%3)\n"
22951 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
22952 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
22953 "7: movl 8(%4), %%eax\n"
22954 "8: movl 12(%4),%%edx\n"
22955 - "9: movl %%eax, 8(%3)\n"
22956 - "10: movl %%edx, 12(%3)\n"
22957 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
22958 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
22959 "11: movl 16(%4), %%eax\n"
22960 "12: movl 20(%4), %%edx\n"
22961 - "13: movl %%eax, 16(%3)\n"
22962 - "14: movl %%edx, 20(%3)\n"
22963 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
22964 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
22965 "15: movl 24(%4), %%eax\n"
22966 "16: movl 28(%4), %%edx\n"
22967 - "17: movl %%eax, 24(%3)\n"
22968 - "18: movl %%edx, 28(%3)\n"
22969 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
22970 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
22971 "19: movl 32(%4), %%eax\n"
22972 "20: movl 36(%4), %%edx\n"
22973 - "21: movl %%eax, 32(%3)\n"
22974 - "22: movl %%edx, 36(%3)\n"
22975 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
22976 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
22977 "23: movl 40(%4), %%eax\n"
22978 "24: movl 44(%4), %%edx\n"
22979 - "25: movl %%eax, 40(%3)\n"
22980 - "26: movl %%edx, 44(%3)\n"
22981 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
22982 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
22983 "27: movl 48(%4), %%eax\n"
22984 "28: movl 52(%4), %%edx\n"
22985 - "29: movl %%eax, 48(%3)\n"
22986 - "30: movl %%edx, 52(%3)\n"
22987 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
22988 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
22989 "31: movl 56(%4), %%eax\n"
22990 "32: movl 60(%4), %%edx\n"
22991 - "33: movl %%eax, 56(%3)\n"
22992 - "34: movl %%edx, 60(%3)\n"
22993 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
22994 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
22995 " addl $-64, %0\n"
22996 " addl $64, %4\n"
22997 " addl $64, %3\n"
22998 @@ -278,10 +282,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22999 " shrl $2, %0\n"
23000 " andl $3, %%eax\n"
23001 " cld\n"
23002 + __COPYUSER_SET_ES
23003 "99: rep; movsl\n"
23004 "36: movl %%eax, %0\n"
23005 "37: rep; movsb\n"
23006 "100:\n"
23007 + __COPYUSER_RESTORE_ES
23008 ".section .fixup,\"ax\"\n"
23009 "101: lea 0(%%eax,%0,4),%0\n"
23010 " jmp 100b\n"
23011 @@ -334,46 +340,155 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23012 }
23013
23014 static unsigned long
23015 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
23016 +{
23017 + int d0, d1;
23018 + __asm__ __volatile__(
23019 + " .align 2,0x90\n"
23020 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
23021 + " cmpl $67, %0\n"
23022 + " jbe 3f\n"
23023 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
23024 + " .align 2,0x90\n"
23025 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
23026 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
23027 + "5: movl %%eax, 0(%3)\n"
23028 + "6: movl %%edx, 4(%3)\n"
23029 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
23030 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
23031 + "9: movl %%eax, 8(%3)\n"
23032 + "10: movl %%edx, 12(%3)\n"
23033 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
23034 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
23035 + "13: movl %%eax, 16(%3)\n"
23036 + "14: movl %%edx, 20(%3)\n"
23037 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
23038 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
23039 + "17: movl %%eax, 24(%3)\n"
23040 + "18: movl %%edx, 28(%3)\n"
23041 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
23042 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
23043 + "21: movl %%eax, 32(%3)\n"
23044 + "22: movl %%edx, 36(%3)\n"
23045 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
23046 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
23047 + "25: movl %%eax, 40(%3)\n"
23048 + "26: movl %%edx, 44(%3)\n"
23049 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
23050 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
23051 + "29: movl %%eax, 48(%3)\n"
23052 + "30: movl %%edx, 52(%3)\n"
23053 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
23054 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
23055 + "33: movl %%eax, 56(%3)\n"
23056 + "34: movl %%edx, 60(%3)\n"
23057 + " addl $-64, %0\n"
23058 + " addl $64, %4\n"
23059 + " addl $64, %3\n"
23060 + " cmpl $63, %0\n"
23061 + " ja 1b\n"
23062 + "35: movl %0, %%eax\n"
23063 + " shrl $2, %0\n"
23064 + " andl $3, %%eax\n"
23065 + " cld\n"
23066 + "99: rep; "__copyuser_seg" movsl\n"
23067 + "36: movl %%eax, %0\n"
23068 + "37: rep; "__copyuser_seg" movsb\n"
23069 + "100:\n"
23070 + ".section .fixup,\"ax\"\n"
23071 + "101: lea 0(%%eax,%0,4),%0\n"
23072 + " jmp 100b\n"
23073 + ".previous\n"
23074 + ".section __ex_table,\"a\"\n"
23075 + " .align 4\n"
23076 + " .long 1b,100b\n"
23077 + " .long 2b,100b\n"
23078 + " .long 3b,100b\n"
23079 + " .long 4b,100b\n"
23080 + " .long 5b,100b\n"
23081 + " .long 6b,100b\n"
23082 + " .long 7b,100b\n"
23083 + " .long 8b,100b\n"
23084 + " .long 9b,100b\n"
23085 + " .long 10b,100b\n"
23086 + " .long 11b,100b\n"
23087 + " .long 12b,100b\n"
23088 + " .long 13b,100b\n"
23089 + " .long 14b,100b\n"
23090 + " .long 15b,100b\n"
23091 + " .long 16b,100b\n"
23092 + " .long 17b,100b\n"
23093 + " .long 18b,100b\n"
23094 + " .long 19b,100b\n"
23095 + " .long 20b,100b\n"
23096 + " .long 21b,100b\n"
23097 + " .long 22b,100b\n"
23098 + " .long 23b,100b\n"
23099 + " .long 24b,100b\n"
23100 + " .long 25b,100b\n"
23101 + " .long 26b,100b\n"
23102 + " .long 27b,100b\n"
23103 + " .long 28b,100b\n"
23104 + " .long 29b,100b\n"
23105 + " .long 30b,100b\n"
23106 + " .long 31b,100b\n"
23107 + " .long 32b,100b\n"
23108 + " .long 33b,100b\n"
23109 + " .long 34b,100b\n"
23110 + " .long 35b,100b\n"
23111 + " .long 36b,100b\n"
23112 + " .long 37b,100b\n"
23113 + " .long 99b,101b\n"
23114 + ".previous"
23115 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
23116 + : "1"(to), "2"(from), "0"(size)
23117 + : "eax", "edx", "memory");
23118 + return size;
23119 +}
23120 +
23121 +static unsigned long
23122 +__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) __size_overflow(3);
23123 +static unsigned long
23124 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23125 {
23126 int d0, d1;
23127 __asm__ __volatile__(
23128 " .align 2,0x90\n"
23129 - "0: movl 32(%4), %%eax\n"
23130 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23131 " cmpl $67, %0\n"
23132 " jbe 2f\n"
23133 - "1: movl 64(%4), %%eax\n"
23134 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23135 " .align 2,0x90\n"
23136 - "2: movl 0(%4), %%eax\n"
23137 - "21: movl 4(%4), %%edx\n"
23138 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23139 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23140 " movl %%eax, 0(%3)\n"
23141 " movl %%edx, 4(%3)\n"
23142 - "3: movl 8(%4), %%eax\n"
23143 - "31: movl 12(%4),%%edx\n"
23144 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23145 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23146 " movl %%eax, 8(%3)\n"
23147 " movl %%edx, 12(%3)\n"
23148 - "4: movl 16(%4), %%eax\n"
23149 - "41: movl 20(%4), %%edx\n"
23150 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23151 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23152 " movl %%eax, 16(%3)\n"
23153 " movl %%edx, 20(%3)\n"
23154 - "10: movl 24(%4), %%eax\n"
23155 - "51: movl 28(%4), %%edx\n"
23156 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23157 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23158 " movl %%eax, 24(%3)\n"
23159 " movl %%edx, 28(%3)\n"
23160 - "11: movl 32(%4), %%eax\n"
23161 - "61: movl 36(%4), %%edx\n"
23162 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23163 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23164 " movl %%eax, 32(%3)\n"
23165 " movl %%edx, 36(%3)\n"
23166 - "12: movl 40(%4), %%eax\n"
23167 - "71: movl 44(%4), %%edx\n"
23168 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23169 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23170 " movl %%eax, 40(%3)\n"
23171 " movl %%edx, 44(%3)\n"
23172 - "13: movl 48(%4), %%eax\n"
23173 - "81: movl 52(%4), %%edx\n"
23174 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23175 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23176 " movl %%eax, 48(%3)\n"
23177 " movl %%edx, 52(%3)\n"
23178 - "14: movl 56(%4), %%eax\n"
23179 - "91: movl 60(%4), %%edx\n"
23180 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23181 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23182 " movl %%eax, 56(%3)\n"
23183 " movl %%edx, 60(%3)\n"
23184 " addl $-64, %0\n"
23185 @@ -385,9 +500,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23186 " shrl $2, %0\n"
23187 " andl $3, %%eax\n"
23188 " cld\n"
23189 - "6: rep; movsl\n"
23190 + "6: rep; "__copyuser_seg" movsl\n"
23191 " movl %%eax,%0\n"
23192 - "7: rep; movsb\n"
23193 + "7: rep; "__copyuser_seg" movsb\n"
23194 "8:\n"
23195 ".section .fixup,\"ax\"\n"
23196 "9: lea 0(%%eax,%0,4),%0\n"
23197 @@ -434,47 +549,49 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23198 */
23199
23200 static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23201 + const void __user *from, unsigned long size) __size_overflow(3);
23202 +static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23203 const void __user *from, unsigned long size)
23204 {
23205 int d0, d1;
23206
23207 __asm__ __volatile__(
23208 " .align 2,0x90\n"
23209 - "0: movl 32(%4), %%eax\n"
23210 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23211 " cmpl $67, %0\n"
23212 " jbe 2f\n"
23213 - "1: movl 64(%4), %%eax\n"
23214 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23215 " .align 2,0x90\n"
23216 - "2: movl 0(%4), %%eax\n"
23217 - "21: movl 4(%4), %%edx\n"
23218 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23219 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23220 " movnti %%eax, 0(%3)\n"
23221 " movnti %%edx, 4(%3)\n"
23222 - "3: movl 8(%4), %%eax\n"
23223 - "31: movl 12(%4),%%edx\n"
23224 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23225 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23226 " movnti %%eax, 8(%3)\n"
23227 " movnti %%edx, 12(%3)\n"
23228 - "4: movl 16(%4), %%eax\n"
23229 - "41: movl 20(%4), %%edx\n"
23230 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23231 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23232 " movnti %%eax, 16(%3)\n"
23233 " movnti %%edx, 20(%3)\n"
23234 - "10: movl 24(%4), %%eax\n"
23235 - "51: movl 28(%4), %%edx\n"
23236 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23237 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23238 " movnti %%eax, 24(%3)\n"
23239 " movnti %%edx, 28(%3)\n"
23240 - "11: movl 32(%4), %%eax\n"
23241 - "61: movl 36(%4), %%edx\n"
23242 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23243 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23244 " movnti %%eax, 32(%3)\n"
23245 " movnti %%edx, 36(%3)\n"
23246 - "12: movl 40(%4), %%eax\n"
23247 - "71: movl 44(%4), %%edx\n"
23248 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23249 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23250 " movnti %%eax, 40(%3)\n"
23251 " movnti %%edx, 44(%3)\n"
23252 - "13: movl 48(%4), %%eax\n"
23253 - "81: movl 52(%4), %%edx\n"
23254 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23255 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23256 " movnti %%eax, 48(%3)\n"
23257 " movnti %%edx, 52(%3)\n"
23258 - "14: movl 56(%4), %%eax\n"
23259 - "91: movl 60(%4), %%edx\n"
23260 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23261 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23262 " movnti %%eax, 56(%3)\n"
23263 " movnti %%edx, 60(%3)\n"
23264 " addl $-64, %0\n"
23265 @@ -487,9 +604,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23266 " shrl $2, %0\n"
23267 " andl $3, %%eax\n"
23268 " cld\n"
23269 - "6: rep; movsl\n"
23270 + "6: rep; "__copyuser_seg" movsl\n"
23271 " movl %%eax,%0\n"
23272 - "7: rep; movsb\n"
23273 + "7: rep; "__copyuser_seg" movsb\n"
23274 "8:\n"
23275 ".section .fixup,\"ax\"\n"
23276 "9: lea 0(%%eax,%0,4),%0\n"
23277 @@ -531,47 +648,49 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23278 }
23279
23280 static unsigned long __copy_user_intel_nocache(void *to,
23281 + const void __user *from, unsigned long size) __size_overflow(3);
23282 +static unsigned long __copy_user_intel_nocache(void *to,
23283 const void __user *from, unsigned long size)
23284 {
23285 int d0, d1;
23286
23287 __asm__ __volatile__(
23288 " .align 2,0x90\n"
23289 - "0: movl 32(%4), %%eax\n"
23290 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23291 " cmpl $67, %0\n"
23292 " jbe 2f\n"
23293 - "1: movl 64(%4), %%eax\n"
23294 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23295 " .align 2,0x90\n"
23296 - "2: movl 0(%4), %%eax\n"
23297 - "21: movl 4(%4), %%edx\n"
23298 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23299 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23300 " movnti %%eax, 0(%3)\n"
23301 " movnti %%edx, 4(%3)\n"
23302 - "3: movl 8(%4), %%eax\n"
23303 - "31: movl 12(%4),%%edx\n"
23304 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23305 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23306 " movnti %%eax, 8(%3)\n"
23307 " movnti %%edx, 12(%3)\n"
23308 - "4: movl 16(%4), %%eax\n"
23309 - "41: movl 20(%4), %%edx\n"
23310 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23311 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23312 " movnti %%eax, 16(%3)\n"
23313 " movnti %%edx, 20(%3)\n"
23314 - "10: movl 24(%4), %%eax\n"
23315 - "51: movl 28(%4), %%edx\n"
23316 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23317 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23318 " movnti %%eax, 24(%3)\n"
23319 " movnti %%edx, 28(%3)\n"
23320 - "11: movl 32(%4), %%eax\n"
23321 - "61: movl 36(%4), %%edx\n"
23322 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23323 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23324 " movnti %%eax, 32(%3)\n"
23325 " movnti %%edx, 36(%3)\n"
23326 - "12: movl 40(%4), %%eax\n"
23327 - "71: movl 44(%4), %%edx\n"
23328 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23329 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23330 " movnti %%eax, 40(%3)\n"
23331 " movnti %%edx, 44(%3)\n"
23332 - "13: movl 48(%4), %%eax\n"
23333 - "81: movl 52(%4), %%edx\n"
23334 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23335 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23336 " movnti %%eax, 48(%3)\n"
23337 " movnti %%edx, 52(%3)\n"
23338 - "14: movl 56(%4), %%eax\n"
23339 - "91: movl 60(%4), %%edx\n"
23340 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23341 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23342 " movnti %%eax, 56(%3)\n"
23343 " movnti %%edx, 60(%3)\n"
23344 " addl $-64, %0\n"
23345 @@ -584,9 +703,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
23346 " shrl $2, %0\n"
23347 " andl $3, %%eax\n"
23348 " cld\n"
23349 - "6: rep; movsl\n"
23350 + "6: rep; "__copyuser_seg" movsl\n"
23351 " movl %%eax,%0\n"
23352 - "7: rep; movsb\n"
23353 + "7: rep; "__copyuser_seg" movsb\n"
23354 "8:\n"
23355 ".section .fixup,\"ax\"\n"
23356 "9: lea 0(%%eax,%0,4),%0\n"
23357 @@ -629,32 +748,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
23358 */
23359 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
23360 unsigned long size);
23361 -unsigned long __copy_user_intel(void __user *to, const void *from,
23362 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
23363 + unsigned long size);
23364 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
23365 unsigned long size);
23366 unsigned long __copy_user_zeroing_intel_nocache(void *to,
23367 const void __user *from, unsigned long size);
23368 #endif /* CONFIG_X86_INTEL_USERCOPY */
23369
23370 /* Generic arbitrary sized copy. */
23371 -#define __copy_user(to, from, size) \
23372 +#define __copy_user(to, from, size, prefix, set, restore) \
23373 do { \
23374 int __d0, __d1, __d2; \
23375 __asm__ __volatile__( \
23376 + set \
23377 " cmp $7,%0\n" \
23378 " jbe 1f\n" \
23379 " movl %1,%0\n" \
23380 " negl %0\n" \
23381 " andl $7,%0\n" \
23382 " subl %0,%3\n" \
23383 - "4: rep; movsb\n" \
23384 + "4: rep; "prefix"movsb\n" \
23385 " movl %3,%0\n" \
23386 " shrl $2,%0\n" \
23387 " andl $3,%3\n" \
23388 " .align 2,0x90\n" \
23389 - "0: rep; movsl\n" \
23390 + "0: rep; "prefix"movsl\n" \
23391 " movl %3,%0\n" \
23392 - "1: rep; movsb\n" \
23393 + "1: rep; "prefix"movsb\n" \
23394 "2:\n" \
23395 + restore \
23396 ".section .fixup,\"ax\"\n" \
23397 "5: addl %3,%0\n" \
23398 " jmp 2b\n" \
23399 @@ -682,14 +805,14 @@ do { \
23400 " negl %0\n" \
23401 " andl $7,%0\n" \
23402 " subl %0,%3\n" \
23403 - "4: rep; movsb\n" \
23404 + "4: rep; "__copyuser_seg"movsb\n" \
23405 " movl %3,%0\n" \
23406 " shrl $2,%0\n" \
23407 " andl $3,%3\n" \
23408 " .align 2,0x90\n" \
23409 - "0: rep; movsl\n" \
23410 + "0: rep; "__copyuser_seg"movsl\n" \
23411 " movl %3,%0\n" \
23412 - "1: rep; movsb\n" \
23413 + "1: rep; "__copyuser_seg"movsb\n" \
23414 "2:\n" \
23415 ".section .fixup,\"ax\"\n" \
23416 "5: addl %3,%0\n" \
23417 @@ -775,9 +898,9 @@ survive:
23418 }
23419 #endif
23420 if (movsl_is_ok(to, from, n))
23421 - __copy_user(to, from, n);
23422 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
23423 else
23424 - n = __copy_user_intel(to, from, n);
23425 + n = __generic_copy_to_user_intel(to, from, n);
23426 return n;
23427 }
23428 EXPORT_SYMBOL(__copy_to_user_ll);
23429 @@ -797,10 +920,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
23430 unsigned long n)
23431 {
23432 if (movsl_is_ok(to, from, n))
23433 - __copy_user(to, from, n);
23434 + __copy_user(to, from, n, __copyuser_seg, "", "");
23435 else
23436 - n = __copy_user_intel((void __user *)to,
23437 - (const void *)from, n);
23438 + n = __generic_copy_from_user_intel(to, from, n);
23439 return n;
23440 }
23441 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
23442 @@ -827,65 +949,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
23443 if (n > 64 && cpu_has_xmm2)
23444 n = __copy_user_intel_nocache(to, from, n);
23445 else
23446 - __copy_user(to, from, n);
23447 + __copy_user(to, from, n, __copyuser_seg, "", "");
23448 #else
23449 - __copy_user(to, from, n);
23450 + __copy_user(to, from, n, __copyuser_seg, "", "");
23451 #endif
23452 return n;
23453 }
23454 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
23455
23456 -/**
23457 - * copy_to_user: - Copy a block of data into user space.
23458 - * @to: Destination address, in user space.
23459 - * @from: Source address, in kernel space.
23460 - * @n: Number of bytes to copy.
23461 - *
23462 - * Context: User context only. This function may sleep.
23463 - *
23464 - * Copy data from kernel space to user space.
23465 - *
23466 - * Returns number of bytes that could not be copied.
23467 - * On success, this will be zero.
23468 - */
23469 -unsigned long
23470 -copy_to_user(void __user *to, const void *from, unsigned long n)
23471 -{
23472 - if (access_ok(VERIFY_WRITE, to, n))
23473 - n = __copy_to_user(to, from, n);
23474 - return n;
23475 -}
23476 -EXPORT_SYMBOL(copy_to_user);
23477 -
23478 -/**
23479 - * copy_from_user: - Copy a block of data from user space.
23480 - * @to: Destination address, in kernel space.
23481 - * @from: Source address, in user space.
23482 - * @n: Number of bytes to copy.
23483 - *
23484 - * Context: User context only. This function may sleep.
23485 - *
23486 - * Copy data from user space to kernel space.
23487 - *
23488 - * Returns number of bytes that could not be copied.
23489 - * On success, this will be zero.
23490 - *
23491 - * If some data could not be copied, this function will pad the copied
23492 - * data to the requested size using zero bytes.
23493 - */
23494 -unsigned long
23495 -_copy_from_user(void *to, const void __user *from, unsigned long n)
23496 -{
23497 - if (access_ok(VERIFY_READ, from, n))
23498 - n = __copy_from_user(to, from, n);
23499 - else
23500 - memset(to, 0, n);
23501 - return n;
23502 -}
23503 -EXPORT_SYMBOL(_copy_from_user);
23504 -
23505 void copy_from_user_overflow(void)
23506 {
23507 WARN(1, "Buffer overflow detected!\n");
23508 }
23509 EXPORT_SYMBOL(copy_from_user_overflow);
23510 +
23511 +void copy_to_user_overflow(void)
23512 +{
23513 + WARN(1, "Buffer overflow detected!\n");
23514 +}
23515 +EXPORT_SYMBOL(copy_to_user_overflow);
23516 +
23517 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23518 +void __set_fs(mm_segment_t x)
23519 +{
23520 + switch (x.seg) {
23521 + case 0:
23522 + loadsegment(gs, 0);
23523 + break;
23524 + case TASK_SIZE_MAX:
23525 + loadsegment(gs, __USER_DS);
23526 + break;
23527 + case -1UL:
23528 + loadsegment(gs, __KERNEL_DS);
23529 + break;
23530 + default:
23531 + BUG();
23532 + }
23533 + return;
23534 +}
23535 +EXPORT_SYMBOL(__set_fs);
23536 +
23537 +void set_fs(mm_segment_t x)
23538 +{
23539 + current_thread_info()->addr_limit = x;
23540 + __set_fs(x);
23541 +}
23542 +EXPORT_SYMBOL(set_fs);
23543 +#endif
23544 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
23545 index b7c2849..8633ad8 100644
23546 --- a/arch/x86/lib/usercopy_64.c
23547 +++ b/arch/x86/lib/usercopy_64.c
23548 @@ -42,6 +42,12 @@ long
23549 __strncpy_from_user(char *dst, const char __user *src, long count)
23550 {
23551 long res;
23552 +
23553 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23554 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
23555 + src += PAX_USER_SHADOW_BASE;
23556 +#endif
23557 +
23558 __do_strncpy_from_user(dst, src, count, res);
23559 return res;
23560 }
23561 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
23562 {
23563 long __d0;
23564 might_fault();
23565 +
23566 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23567 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
23568 + addr += PAX_USER_SHADOW_BASE;
23569 +#endif
23570 +
23571 /* no memory constraint because it doesn't change any memory gcc knows
23572 about */
23573 asm volatile(
23574 @@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
23575 }
23576 EXPORT_SYMBOL(strlen_user);
23577
23578 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
23579 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
23580 {
23581 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23582 - return copy_user_generic((__force void *)to, (__force void *)from, len);
23583 - }
23584 - return len;
23585 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23586 +
23587 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23588 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
23589 + to += PAX_USER_SHADOW_BASE;
23590 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
23591 + from += PAX_USER_SHADOW_BASE;
23592 +#endif
23593 +
23594 + return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
23595 + }
23596 + return len;
23597 }
23598 EXPORT_SYMBOL(copy_in_user);
23599
23600 @@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
23601 * it is not necessary to optimize tail handling.
23602 */
23603 unsigned long
23604 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23605 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
23606 {
23607 char c;
23608 unsigned zero_len;
23609 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
23610 index 1fb85db..8b3540b 100644
23611 --- a/arch/x86/mm/extable.c
23612 +++ b/arch/x86/mm/extable.c
23613 @@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
23614 const struct exception_table_entry *fixup;
23615
23616 #ifdef CONFIG_PNPBIOS
23617 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
23618 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
23619 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
23620 extern u32 pnp_bios_is_utter_crap;
23621 pnp_bios_is_utter_crap = 1;
23622 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
23623 index f0b4caf..d92fd42 100644
23624 --- a/arch/x86/mm/fault.c
23625 +++ b/arch/x86/mm/fault.c
23626 @@ -13,11 +13,18 @@
23627 #include <linux/perf_event.h> /* perf_sw_event */
23628 #include <linux/hugetlb.h> /* hstate_index_to_shift */
23629 #include <linux/prefetch.h> /* prefetchw */
23630 +#include <linux/unistd.h>
23631 +#include <linux/compiler.h>
23632
23633 #include <asm/traps.h> /* dotraplinkage, ... */
23634 #include <asm/pgalloc.h> /* pgd_*(), ... */
23635 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
23636 #include <asm/fixmap.h> /* VSYSCALL_START */
23637 +#include <asm/tlbflush.h>
23638 +
23639 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23640 +#include <asm/stacktrace.h>
23641 +#endif
23642
23643 /*
23644 * Page fault error code bits:
23645 @@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
23646 int ret = 0;
23647
23648 /* kprobe_running() needs smp_processor_id() */
23649 - if (kprobes_built_in() && !user_mode_vm(regs)) {
23650 + if (kprobes_built_in() && !user_mode(regs)) {
23651 preempt_disable();
23652 if (kprobe_running() && kprobe_fault_handler(regs, 14))
23653 ret = 1;
23654 @@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
23655 return !instr_lo || (instr_lo>>1) == 1;
23656 case 0x00:
23657 /* Prefetch instruction is 0x0F0D or 0x0F18 */
23658 - if (probe_kernel_address(instr, opcode))
23659 + if (user_mode(regs)) {
23660 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23661 + return 0;
23662 + } else if (probe_kernel_address(instr, opcode))
23663 return 0;
23664
23665 *prefetch = (instr_lo == 0xF) &&
23666 @@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
23667 while (instr < max_instr) {
23668 unsigned char opcode;
23669
23670 - if (probe_kernel_address(instr, opcode))
23671 + if (user_mode(regs)) {
23672 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23673 + break;
23674 + } else if (probe_kernel_address(instr, opcode))
23675 break;
23676
23677 instr++;
23678 @@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
23679 force_sig_info(si_signo, &info, tsk);
23680 }
23681
23682 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23683 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
23684 +#endif
23685 +
23686 +#ifdef CONFIG_PAX_EMUTRAMP
23687 +static int pax_handle_fetch_fault(struct pt_regs *regs);
23688 +#endif
23689 +
23690 +#ifdef CONFIG_PAX_PAGEEXEC
23691 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
23692 +{
23693 + pgd_t *pgd;
23694 + pud_t *pud;
23695 + pmd_t *pmd;
23696 +
23697 + pgd = pgd_offset(mm, address);
23698 + if (!pgd_present(*pgd))
23699 + return NULL;
23700 + pud = pud_offset(pgd, address);
23701 + if (!pud_present(*pud))
23702 + return NULL;
23703 + pmd = pmd_offset(pud, address);
23704 + if (!pmd_present(*pmd))
23705 + return NULL;
23706 + return pmd;
23707 +}
23708 +#endif
23709 +
23710 DEFINE_SPINLOCK(pgd_lock);
23711 LIST_HEAD(pgd_list);
23712
23713 @@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
23714 for (address = VMALLOC_START & PMD_MASK;
23715 address >= TASK_SIZE && address < FIXADDR_TOP;
23716 address += PMD_SIZE) {
23717 +
23718 +#ifdef CONFIG_PAX_PER_CPU_PGD
23719 + unsigned long cpu;
23720 +#else
23721 struct page *page;
23722 +#endif
23723
23724 spin_lock(&pgd_lock);
23725 +
23726 +#ifdef CONFIG_PAX_PER_CPU_PGD
23727 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
23728 + pgd_t *pgd = get_cpu_pgd(cpu);
23729 + pmd_t *ret;
23730 +#else
23731 list_for_each_entry(page, &pgd_list, lru) {
23732 + pgd_t *pgd = page_address(page);
23733 spinlock_t *pgt_lock;
23734 pmd_t *ret;
23735
23736 @@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
23737 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
23738
23739 spin_lock(pgt_lock);
23740 - ret = vmalloc_sync_one(page_address(page), address);
23741 +#endif
23742 +
23743 + ret = vmalloc_sync_one(pgd, address);
23744 +
23745 +#ifndef CONFIG_PAX_PER_CPU_PGD
23746 spin_unlock(pgt_lock);
23747 +#endif
23748
23749 if (!ret)
23750 break;
23751 @@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23752 * an interrupt in the middle of a task switch..
23753 */
23754 pgd_paddr = read_cr3();
23755 +
23756 +#ifdef CONFIG_PAX_PER_CPU_PGD
23757 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
23758 +#endif
23759 +
23760 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
23761 if (!pmd_k)
23762 return -1;
23763 @@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23764 * happen within a race in page table update. In the later
23765 * case just flush:
23766 */
23767 +
23768 +#ifdef CONFIG_PAX_PER_CPU_PGD
23769 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
23770 + pgd = pgd_offset_cpu(smp_processor_id(), address);
23771 +#else
23772 pgd = pgd_offset(current->active_mm, address);
23773 +#endif
23774 +
23775 pgd_ref = pgd_offset_k(address);
23776 if (pgd_none(*pgd_ref))
23777 return -1;
23778 @@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
23779 static int is_errata100(struct pt_regs *regs, unsigned long address)
23780 {
23781 #ifdef CONFIG_X86_64
23782 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
23783 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
23784 return 1;
23785 #endif
23786 return 0;
23787 @@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
23788 }
23789
23790 static const char nx_warning[] = KERN_CRIT
23791 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
23792 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
23793
23794 static void
23795 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23796 @@ -576,15 +646,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23797 if (!oops_may_print())
23798 return;
23799
23800 - if (error_code & PF_INSTR) {
23801 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
23802 unsigned int level;
23803
23804 pte_t *pte = lookup_address(address, &level);
23805
23806 if (pte && pte_present(*pte) && !pte_exec(*pte))
23807 - printk(nx_warning, current_uid());
23808 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
23809 }
23810
23811 +#ifdef CONFIG_PAX_KERNEXEC
23812 + if (init_mm.start_code <= address && address < init_mm.end_code) {
23813 + if (current->signal->curr_ip)
23814 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23815 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
23816 + else
23817 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23818 + current->comm, task_pid_nr(current), current_uid(), current_euid());
23819 + }
23820 +#endif
23821 +
23822 printk(KERN_ALERT "BUG: unable to handle kernel ");
23823 if (address < PAGE_SIZE)
23824 printk(KERN_CONT "NULL pointer dereference");
23825 @@ -748,6 +829,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
23826 }
23827 #endif
23828
23829 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23830 + if (pax_is_fetch_fault(regs, error_code, address)) {
23831 +
23832 +#ifdef CONFIG_PAX_EMUTRAMP
23833 + switch (pax_handle_fetch_fault(regs)) {
23834 + case 2:
23835 + return;
23836 + }
23837 +#endif
23838 +
23839 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23840 + do_group_exit(SIGKILL);
23841 + }
23842 +#endif
23843 +
23844 if (unlikely(show_unhandled_signals))
23845 show_signal_msg(regs, error_code, address, tsk);
23846
23847 @@ -844,7 +940,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
23848 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
23849 printk(KERN_ERR
23850 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
23851 - tsk->comm, tsk->pid, address);
23852 + tsk->comm, task_pid_nr(tsk), address);
23853 code = BUS_MCEERR_AR;
23854 }
23855 #endif
23856 @@ -900,6 +996,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
23857 return 1;
23858 }
23859
23860 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23861 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
23862 +{
23863 + pte_t *pte;
23864 + pmd_t *pmd;
23865 + spinlock_t *ptl;
23866 + unsigned char pte_mask;
23867 +
23868 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
23869 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
23870 + return 0;
23871 +
23872 + /* PaX: it's our fault, let's handle it if we can */
23873 +
23874 + /* PaX: take a look at read faults before acquiring any locks */
23875 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
23876 + /* instruction fetch attempt from a protected page in user mode */
23877 + up_read(&mm->mmap_sem);
23878 +
23879 +#ifdef CONFIG_PAX_EMUTRAMP
23880 + switch (pax_handle_fetch_fault(regs)) {
23881 + case 2:
23882 + return 1;
23883 + }
23884 +#endif
23885 +
23886 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23887 + do_group_exit(SIGKILL);
23888 + }
23889 +
23890 + pmd = pax_get_pmd(mm, address);
23891 + if (unlikely(!pmd))
23892 + return 0;
23893 +
23894 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
23895 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
23896 + pte_unmap_unlock(pte, ptl);
23897 + return 0;
23898 + }
23899 +
23900 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
23901 + /* write attempt to a protected page in user mode */
23902 + pte_unmap_unlock(pte, ptl);
23903 + return 0;
23904 + }
23905 +
23906 +#ifdef CONFIG_SMP
23907 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
23908 +#else
23909 + if (likely(address > get_limit(regs->cs)))
23910 +#endif
23911 + {
23912 + set_pte(pte, pte_mkread(*pte));
23913 + __flush_tlb_one(address);
23914 + pte_unmap_unlock(pte, ptl);
23915 + up_read(&mm->mmap_sem);
23916 + return 1;
23917 + }
23918 +
23919 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
23920 +
23921 + /*
23922 + * PaX: fill DTLB with user rights and retry
23923 + */
23924 + __asm__ __volatile__ (
23925 + "orb %2,(%1)\n"
23926 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
23927 +/*
23928 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
23929 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
23930 + * page fault when examined during a TLB load attempt. this is true not only
23931 + * for PTEs holding a non-present entry but also present entries that will
23932 + * raise a page fault (such as those set up by PaX, or the copy-on-write
23933 + * mechanism). in effect it means that we do *not* need to flush the TLBs
23934 + * for our target pages since their PTEs are simply not in the TLBs at all.
23935 +
23936 + * the best thing in omitting it is that we gain around 15-20% speed in the
23937 + * fast path of the page fault handler and can get rid of tracing since we
23938 + * can no longer flush unintended entries.
23939 + */
23940 + "invlpg (%0)\n"
23941 +#endif
23942 + __copyuser_seg"testb $0,(%0)\n"
23943 + "xorb %3,(%1)\n"
23944 + :
23945 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
23946 + : "memory", "cc");
23947 + pte_unmap_unlock(pte, ptl);
23948 + up_read(&mm->mmap_sem);
23949 + return 1;
23950 +}
23951 +#endif
23952 +
23953 /*
23954 * Handle a spurious fault caused by a stale TLB entry.
23955 *
23956 @@ -972,6 +1161,9 @@ int show_unhandled_signals = 1;
23957 static inline int
23958 access_error(unsigned long error_code, struct vm_area_struct *vma)
23959 {
23960 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
23961 + return 1;
23962 +
23963 if (error_code & PF_WRITE) {
23964 /* write, present and write, not present: */
23965 if (unlikely(!(vma->vm_flags & VM_WRITE)))
23966 @@ -1005,18 +1197,32 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
23967 {
23968 struct vm_area_struct *vma;
23969 struct task_struct *tsk;
23970 - unsigned long address;
23971 struct mm_struct *mm;
23972 int fault;
23973 int write = error_code & PF_WRITE;
23974 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
23975 (write ? FAULT_FLAG_WRITE : 0);
23976
23977 - tsk = current;
23978 - mm = tsk->mm;
23979 -
23980 /* Get the faulting address: */
23981 - address = read_cr2();
23982 + unsigned long address = read_cr2();
23983 +
23984 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23985 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
23986 + if (!search_exception_tables(regs->ip)) {
23987 + bad_area_nosemaphore(regs, error_code, address);
23988 + return;
23989 + }
23990 + if (address < PAX_USER_SHADOW_BASE) {
23991 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
23992 + printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
23993 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
23994 + } else
23995 + address -= PAX_USER_SHADOW_BASE;
23996 + }
23997 +#endif
23998 +
23999 + tsk = current;
24000 + mm = tsk->mm;
24001
24002 /*
24003 * Detect and handle instructions that would cause a page fault for
24004 @@ -1077,7 +1283,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24005 * User-mode registers count as a user access even for any
24006 * potential system fault or CPU buglet:
24007 */
24008 - if (user_mode_vm(regs)) {
24009 + if (user_mode(regs)) {
24010 local_irq_enable();
24011 error_code |= PF_USER;
24012 } else {
24013 @@ -1132,6 +1338,11 @@ retry:
24014 might_sleep();
24015 }
24016
24017 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24018 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
24019 + return;
24020 +#endif
24021 +
24022 vma = find_vma(mm, address);
24023 if (unlikely(!vma)) {
24024 bad_area(regs, error_code, address);
24025 @@ -1143,18 +1354,24 @@ retry:
24026 bad_area(regs, error_code, address);
24027 return;
24028 }
24029 - if (error_code & PF_USER) {
24030 - /*
24031 - * Accessing the stack below %sp is always a bug.
24032 - * The large cushion allows instructions like enter
24033 - * and pusha to work. ("enter $65535, $31" pushes
24034 - * 32 pointers and then decrements %sp by 65535.)
24035 - */
24036 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
24037 - bad_area(regs, error_code, address);
24038 - return;
24039 - }
24040 + /*
24041 + * Accessing the stack below %sp is always a bug.
24042 + * The large cushion allows instructions like enter
24043 + * and pusha to work. ("enter $65535, $31" pushes
24044 + * 32 pointers and then decrements %sp by 65535.)
24045 + */
24046 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
24047 + bad_area(regs, error_code, address);
24048 + return;
24049 }
24050 +
24051 +#ifdef CONFIG_PAX_SEGMEXEC
24052 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
24053 + bad_area(regs, error_code, address);
24054 + return;
24055 + }
24056 +#endif
24057 +
24058 if (unlikely(expand_stack(vma, address))) {
24059 bad_area(regs, error_code, address);
24060 return;
24061 @@ -1209,3 +1426,292 @@ good_area:
24062
24063 up_read(&mm->mmap_sem);
24064 }
24065 +
24066 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24067 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
24068 +{
24069 + struct mm_struct *mm = current->mm;
24070 + unsigned long ip = regs->ip;
24071 +
24072 + if (v8086_mode(regs))
24073 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
24074 +
24075 +#ifdef CONFIG_PAX_PAGEEXEC
24076 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
24077 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
24078 + return true;
24079 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
24080 + return true;
24081 + return false;
24082 + }
24083 +#endif
24084 +
24085 +#ifdef CONFIG_PAX_SEGMEXEC
24086 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
24087 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
24088 + return true;
24089 + return false;
24090 + }
24091 +#endif
24092 +
24093 + return false;
24094 +}
24095 +#endif
24096 +
24097 +#ifdef CONFIG_PAX_EMUTRAMP
24098 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
24099 +{
24100 + int err;
24101 +
24102 + do { /* PaX: libffi trampoline emulation */
24103 + unsigned char mov, jmp;
24104 + unsigned int addr1, addr2;
24105 +
24106 +#ifdef CONFIG_X86_64
24107 + if ((regs->ip + 9) >> 32)
24108 + break;
24109 +#endif
24110 +
24111 + err = get_user(mov, (unsigned char __user *)regs->ip);
24112 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24113 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24114 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24115 +
24116 + if (err)
24117 + break;
24118 +
24119 + if (mov == 0xB8 && jmp == 0xE9) {
24120 + regs->ax = addr1;
24121 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24122 + return 2;
24123 + }
24124 + } while (0);
24125 +
24126 + do { /* PaX: gcc trampoline emulation #1 */
24127 + unsigned char mov1, mov2;
24128 + unsigned short jmp;
24129 + unsigned int addr1, addr2;
24130 +
24131 +#ifdef CONFIG_X86_64
24132 + if ((regs->ip + 11) >> 32)
24133 + break;
24134 +#endif
24135 +
24136 + err = get_user(mov1, (unsigned char __user *)regs->ip);
24137 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24138 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
24139 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24140 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
24141 +
24142 + if (err)
24143 + break;
24144 +
24145 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
24146 + regs->cx = addr1;
24147 + regs->ax = addr2;
24148 + regs->ip = addr2;
24149 + return 2;
24150 + }
24151 + } while (0);
24152 +
24153 + do { /* PaX: gcc trampoline emulation #2 */
24154 + unsigned char mov, jmp;
24155 + unsigned int addr1, addr2;
24156 +
24157 +#ifdef CONFIG_X86_64
24158 + if ((regs->ip + 9) >> 32)
24159 + break;
24160 +#endif
24161 +
24162 + err = get_user(mov, (unsigned char __user *)regs->ip);
24163 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24164 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24165 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24166 +
24167 + if (err)
24168 + break;
24169 +
24170 + if (mov == 0xB9 && jmp == 0xE9) {
24171 + regs->cx = addr1;
24172 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24173 + return 2;
24174 + }
24175 + } while (0);
24176 +
24177 + return 1; /* PaX in action */
24178 +}
24179 +
24180 +#ifdef CONFIG_X86_64
24181 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
24182 +{
24183 + int err;
24184 +
24185 + do { /* PaX: libffi trampoline emulation */
24186 + unsigned short mov1, mov2, jmp1;
24187 + unsigned char stcclc, jmp2;
24188 + unsigned long addr1, addr2;
24189 +
24190 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24191 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24192 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24193 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24194 + err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
24195 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
24196 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
24197 +
24198 + if (err)
24199 + break;
24200 +
24201 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24202 + regs->r11 = addr1;
24203 + regs->r10 = addr2;
24204 + if (stcclc == 0xF8)
24205 + regs->flags &= ~X86_EFLAGS_CF;
24206 + else
24207 + regs->flags |= X86_EFLAGS_CF;
24208 + regs->ip = addr1;
24209 + return 2;
24210 + }
24211 + } while (0);
24212 +
24213 + do { /* PaX: gcc trampoline emulation #1 */
24214 + unsigned short mov1, mov2, jmp1;
24215 + unsigned char jmp2;
24216 + unsigned int addr1;
24217 + unsigned long addr2;
24218 +
24219 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24220 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
24221 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
24222 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
24223 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
24224 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
24225 +
24226 + if (err)
24227 + break;
24228 +
24229 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24230 + regs->r11 = addr1;
24231 + regs->r10 = addr2;
24232 + regs->ip = addr1;
24233 + return 2;
24234 + }
24235 + } while (0);
24236 +
24237 + do { /* PaX: gcc trampoline emulation #2 */
24238 + unsigned short mov1, mov2, jmp1;
24239 + unsigned char jmp2;
24240 + unsigned long addr1, addr2;
24241 +
24242 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24243 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24244 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24245 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24246 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
24247 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
24248 +
24249 + if (err)
24250 + break;
24251 +
24252 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24253 + regs->r11 = addr1;
24254 + regs->r10 = addr2;
24255 + regs->ip = addr1;
24256 + return 2;
24257 + }
24258 + } while (0);
24259 +
24260 + return 1; /* PaX in action */
24261 +}
24262 +#endif
24263 +
24264 +/*
24265 + * PaX: decide what to do with offenders (regs->ip = fault address)
24266 + *
24267 + * returns 1 when task should be killed
24268 + * 2 when gcc trampoline was detected
24269 + */
24270 +static int pax_handle_fetch_fault(struct pt_regs *regs)
24271 +{
24272 + if (v8086_mode(regs))
24273 + return 1;
24274 +
24275 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
24276 + return 1;
24277 +
24278 +#ifdef CONFIG_X86_32
24279 + return pax_handle_fetch_fault_32(regs);
24280 +#else
24281 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
24282 + return pax_handle_fetch_fault_32(regs);
24283 + else
24284 + return pax_handle_fetch_fault_64(regs);
24285 +#endif
24286 +}
24287 +#endif
24288 +
24289 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24290 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
24291 +{
24292 + long i;
24293 +
24294 + printk(KERN_ERR "PAX: bytes at PC: ");
24295 + for (i = 0; i < 20; i++) {
24296 + unsigned char c;
24297 + if (get_user(c, (unsigned char __force_user *)pc+i))
24298 + printk(KERN_CONT "?? ");
24299 + else
24300 + printk(KERN_CONT "%02x ", c);
24301 + }
24302 + printk("\n");
24303 +
24304 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
24305 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
24306 + unsigned long c;
24307 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
24308 +#ifdef CONFIG_X86_32
24309 + printk(KERN_CONT "???????? ");
24310 +#else
24311 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
24312 + printk(KERN_CONT "???????? ???????? ");
24313 + else
24314 + printk(KERN_CONT "???????????????? ");
24315 +#endif
24316 + } else {
24317 +#ifdef CONFIG_X86_64
24318 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
24319 + printk(KERN_CONT "%08x ", (unsigned int)c);
24320 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
24321 + } else
24322 +#endif
24323 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
24324 + }
24325 + }
24326 + printk("\n");
24327 +}
24328 +#endif
24329 +
24330 +/**
24331 + * probe_kernel_write(): safely attempt to write to a location
24332 + * @dst: address to write to
24333 + * @src: pointer to the data that shall be written
24334 + * @size: size of the data chunk
24335 + *
24336 + * Safely write to address @dst from the buffer at @src. If a kernel fault
24337 + * happens, handle that and return -EFAULT.
24338 + */
24339 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
24340 +{
24341 + long ret;
24342 + mm_segment_t old_fs = get_fs();
24343 +
24344 + set_fs(KERNEL_DS);
24345 + pagefault_disable();
24346 + pax_open_kernel();
24347 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
24348 + pax_close_kernel();
24349 + pagefault_enable();
24350 + set_fs(old_fs);
24351 +
24352 + return ret ? -EFAULT : 0;
24353 +}
24354 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
24355 index dd74e46..7d26398 100644
24356 --- a/arch/x86/mm/gup.c
24357 +++ b/arch/x86/mm/gup.c
24358 @@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
24359 addr = start;
24360 len = (unsigned long) nr_pages << PAGE_SHIFT;
24361 end = start + len;
24362 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24363 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24364 (void __user *)start, len)))
24365 return 0;
24366
24367 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
24368 index f4f29b1..5cac4fb 100644
24369 --- a/arch/x86/mm/highmem_32.c
24370 +++ b/arch/x86/mm/highmem_32.c
24371 @@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
24372 idx = type + KM_TYPE_NR*smp_processor_id();
24373 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24374 BUG_ON(!pte_none(*(kmap_pte-idx)));
24375 +
24376 + pax_open_kernel();
24377 set_pte(kmap_pte-idx, mk_pte(page, prot));
24378 + pax_close_kernel();
24379 +
24380 arch_flush_lazy_mmu_mode();
24381
24382 return (void *)vaddr;
24383 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
24384 index 8ecbb4b..a269cab 100644
24385 --- a/arch/x86/mm/hugetlbpage.c
24386 +++ b/arch/x86/mm/hugetlbpage.c
24387 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
24388 struct hstate *h = hstate_file(file);
24389 struct mm_struct *mm = current->mm;
24390 struct vm_area_struct *vma;
24391 - unsigned long start_addr;
24392 + unsigned long start_addr, pax_task_size = TASK_SIZE;
24393 +
24394 +#ifdef CONFIG_PAX_SEGMEXEC
24395 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24396 + pax_task_size = SEGMEXEC_TASK_SIZE;
24397 +#endif
24398 +
24399 + pax_task_size -= PAGE_SIZE;
24400
24401 if (len > mm->cached_hole_size) {
24402 - start_addr = mm->free_area_cache;
24403 + start_addr = mm->free_area_cache;
24404 } else {
24405 - start_addr = TASK_UNMAPPED_BASE;
24406 - mm->cached_hole_size = 0;
24407 + start_addr = mm->mmap_base;
24408 + mm->cached_hole_size = 0;
24409 }
24410
24411 full_search:
24412 @@ -280,26 +287,27 @@ full_search:
24413
24414 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
24415 /* At this point: (!vma || addr < vma->vm_end). */
24416 - if (TASK_SIZE - len < addr) {
24417 + if (pax_task_size - len < addr) {
24418 /*
24419 * Start a new search - just in case we missed
24420 * some holes.
24421 */
24422 - if (start_addr != TASK_UNMAPPED_BASE) {
24423 - start_addr = TASK_UNMAPPED_BASE;
24424 + if (start_addr != mm->mmap_base) {
24425 + start_addr = mm->mmap_base;
24426 mm->cached_hole_size = 0;
24427 goto full_search;
24428 }
24429 return -ENOMEM;
24430 }
24431 - if (!vma || addr + len <= vma->vm_start) {
24432 - mm->free_area_cache = addr + len;
24433 - return addr;
24434 - }
24435 + if (check_heap_stack_gap(vma, addr, len))
24436 + break;
24437 if (addr + mm->cached_hole_size < vma->vm_start)
24438 mm->cached_hole_size = vma->vm_start - addr;
24439 addr = ALIGN(vma->vm_end, huge_page_size(h));
24440 }
24441 +
24442 + mm->free_area_cache = addr + len;
24443 + return addr;
24444 }
24445
24446 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24447 @@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24448 {
24449 struct hstate *h = hstate_file(file);
24450 struct mm_struct *mm = current->mm;
24451 - struct vm_area_struct *vma, *prev_vma;
24452 - unsigned long base = mm->mmap_base, addr = addr0;
24453 + struct vm_area_struct *vma;
24454 + unsigned long base = mm->mmap_base, addr;
24455 unsigned long largest_hole = mm->cached_hole_size;
24456 - int first_time = 1;
24457
24458 /* don't allow allocations above current base */
24459 if (mm->free_area_cache > base)
24460 @@ -321,14 +328,15 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24461 largest_hole = 0;
24462 mm->free_area_cache = base;
24463 }
24464 -try_again:
24465 +
24466 /* make sure it can fit in the remaining address space */
24467 if (mm->free_area_cache < len)
24468 goto fail;
24469
24470 /* either no address requested or can't fit in requested address hole */
24471 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
24472 + addr = (mm->free_area_cache - len);
24473 do {
24474 + addr &= huge_page_mask(h);
24475 /*
24476 * Lookup failure means no vma is above this address,
24477 * i.e. return with success:
24478 @@ -341,46 +349,47 @@ try_again:
24479 * new region fits between prev_vma->vm_end and
24480 * vma->vm_start, use it:
24481 */
24482 - prev_vma = vma->vm_prev;
24483 - if (addr + len <= vma->vm_start &&
24484 - (!prev_vma || (addr >= prev_vma->vm_end))) {
24485 + if (check_heap_stack_gap(vma, addr, len)) {
24486 /* remember the address as a hint for next time */
24487 - mm->cached_hole_size = largest_hole;
24488 - return (mm->free_area_cache = addr);
24489 - } else {
24490 - /* pull free_area_cache down to the first hole */
24491 - if (mm->free_area_cache == vma->vm_end) {
24492 - mm->free_area_cache = vma->vm_start;
24493 - mm->cached_hole_size = largest_hole;
24494 - }
24495 + mm->cached_hole_size = largest_hole;
24496 + return (mm->free_area_cache = addr);
24497 + }
24498 + /* pull free_area_cache down to the first hole */
24499 + if (mm->free_area_cache == vma->vm_end) {
24500 + mm->free_area_cache = vma->vm_start;
24501 + mm->cached_hole_size = largest_hole;
24502 }
24503
24504 /* remember the largest hole we saw so far */
24505 if (addr + largest_hole < vma->vm_start)
24506 - largest_hole = vma->vm_start - addr;
24507 + largest_hole = vma->vm_start - addr;
24508
24509 /* try just below the current vma->vm_start */
24510 - addr = (vma->vm_start - len) & huge_page_mask(h);
24511 - } while (len <= vma->vm_start);
24512 + addr = skip_heap_stack_gap(vma, len);
24513 + } while (!IS_ERR_VALUE(addr));
24514
24515 fail:
24516 /*
24517 - * if hint left us with no space for the requested
24518 - * mapping then try again:
24519 - */
24520 - if (first_time) {
24521 - mm->free_area_cache = base;
24522 - largest_hole = 0;
24523 - first_time = 0;
24524 - goto try_again;
24525 - }
24526 - /*
24527 * A failed mmap() very likely causes application failure,
24528 * so fall back to the bottom-up function here. This scenario
24529 * can happen with large stack limits and large mmap()
24530 * allocations.
24531 */
24532 - mm->free_area_cache = TASK_UNMAPPED_BASE;
24533 +
24534 +#ifdef CONFIG_PAX_SEGMEXEC
24535 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24536 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
24537 + else
24538 +#endif
24539 +
24540 + mm->mmap_base = TASK_UNMAPPED_BASE;
24541 +
24542 +#ifdef CONFIG_PAX_RANDMMAP
24543 + if (mm->pax_flags & MF_PAX_RANDMMAP)
24544 + mm->mmap_base += mm->delta_mmap;
24545 +#endif
24546 +
24547 + mm->free_area_cache = mm->mmap_base;
24548 mm->cached_hole_size = ~0UL;
24549 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
24550 len, pgoff, flags);
24551 @@ -388,6 +397,7 @@ fail:
24552 /*
24553 * Restore the topdown base:
24554 */
24555 + mm->mmap_base = base;
24556 mm->free_area_cache = base;
24557 mm->cached_hole_size = ~0UL;
24558
24559 @@ -401,10 +411,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24560 struct hstate *h = hstate_file(file);
24561 struct mm_struct *mm = current->mm;
24562 struct vm_area_struct *vma;
24563 + unsigned long pax_task_size = TASK_SIZE;
24564
24565 if (len & ~huge_page_mask(h))
24566 return -EINVAL;
24567 - if (len > TASK_SIZE)
24568 +
24569 +#ifdef CONFIG_PAX_SEGMEXEC
24570 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24571 + pax_task_size = SEGMEXEC_TASK_SIZE;
24572 +#endif
24573 +
24574 + pax_task_size -= PAGE_SIZE;
24575 +
24576 + if (len > pax_task_size)
24577 return -ENOMEM;
24578
24579 if (flags & MAP_FIXED) {
24580 @@ -416,8 +435,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24581 if (addr) {
24582 addr = ALIGN(addr, huge_page_size(h));
24583 vma = find_vma(mm, addr);
24584 - if (TASK_SIZE - len >= addr &&
24585 - (!vma || addr + len <= vma->vm_start))
24586 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
24587 return addr;
24588 }
24589 if (mm->get_unmapped_area == arch_get_unmapped_area)
24590 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
24591 index 6cabf65..77e9c1c 100644
24592 --- a/arch/x86/mm/init.c
24593 +++ b/arch/x86/mm/init.c
24594 @@ -17,6 +17,7 @@
24595 #include <asm/tlb.h>
24596 #include <asm/proto.h>
24597 #include <asm/dma.h> /* for MAX_DMA_PFN */
24598 +#include <asm/desc.h>
24599
24600 unsigned long __initdata pgt_buf_start;
24601 unsigned long __meminitdata pgt_buf_end;
24602 @@ -33,7 +34,7 @@ int direct_gbpages
24603 static void __init find_early_table_space(unsigned long end, int use_pse,
24604 int use_gbpages)
24605 {
24606 - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
24607 + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
24608 phys_addr_t base;
24609
24610 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
24611 @@ -314,8 +315,29 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
24612 */
24613 int devmem_is_allowed(unsigned long pagenr)
24614 {
24615 +#ifdef CONFIG_GRKERNSEC_KMEM
24616 + /* allow BDA */
24617 + if (!pagenr)
24618 + return 1;
24619 + /* allow EBDA */
24620 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
24621 + return 1;
24622 +#else
24623 + if (!pagenr)
24624 + return 1;
24625 +#ifdef CONFIG_VM86
24626 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
24627 + return 1;
24628 +#endif
24629 +#endif
24630 +
24631 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
24632 + return 1;
24633 +#ifdef CONFIG_GRKERNSEC_KMEM
24634 + /* throw out everything else below 1MB */
24635 if (pagenr <= 256)
24636 - return 1;
24637 + return 0;
24638 +#endif
24639 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
24640 return 0;
24641 if (!page_is_ram(pagenr))
24642 @@ -374,6 +396,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
24643
24644 void free_initmem(void)
24645 {
24646 +
24647 +#ifdef CONFIG_PAX_KERNEXEC
24648 +#ifdef CONFIG_X86_32
24649 + /* PaX: limit KERNEL_CS to actual size */
24650 + unsigned long addr, limit;
24651 + struct desc_struct d;
24652 + int cpu;
24653 +
24654 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
24655 + limit = (limit - 1UL) >> PAGE_SHIFT;
24656 +
24657 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
24658 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
24659 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
24660 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
24661 + }
24662 +
24663 + /* PaX: make KERNEL_CS read-only */
24664 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
24665 + if (!paravirt_enabled())
24666 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
24667 +/*
24668 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
24669 + pgd = pgd_offset_k(addr);
24670 + pud = pud_offset(pgd, addr);
24671 + pmd = pmd_offset(pud, addr);
24672 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24673 + }
24674 +*/
24675 +#ifdef CONFIG_X86_PAE
24676 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
24677 +/*
24678 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
24679 + pgd = pgd_offset_k(addr);
24680 + pud = pud_offset(pgd, addr);
24681 + pmd = pmd_offset(pud, addr);
24682 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24683 + }
24684 +*/
24685 +#endif
24686 +
24687 +#ifdef CONFIG_MODULES
24688 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
24689 +#endif
24690 +
24691 +#else
24692 + pgd_t *pgd;
24693 + pud_t *pud;
24694 + pmd_t *pmd;
24695 + unsigned long addr, end;
24696 +
24697 + /* PaX: make kernel code/rodata read-only, rest non-executable */
24698 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
24699 + pgd = pgd_offset_k(addr);
24700 + pud = pud_offset(pgd, addr);
24701 + pmd = pmd_offset(pud, addr);
24702 + if (!pmd_present(*pmd))
24703 + continue;
24704 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
24705 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24706 + else
24707 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24708 + }
24709 +
24710 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
24711 + end = addr + KERNEL_IMAGE_SIZE;
24712 + for (; addr < end; addr += PMD_SIZE) {
24713 + pgd = pgd_offset_k(addr);
24714 + pud = pud_offset(pgd, addr);
24715 + pmd = pmd_offset(pud, addr);
24716 + if (!pmd_present(*pmd))
24717 + continue;
24718 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
24719 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24720 + }
24721 +#endif
24722 +
24723 + flush_tlb_all();
24724 +#endif
24725 +
24726 free_init_pages("unused kernel memory",
24727 (unsigned long)(&__init_begin),
24728 (unsigned long)(&__init_end));
24729 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
24730 index 8663f6c..829ae76 100644
24731 --- a/arch/x86/mm/init_32.c
24732 +++ b/arch/x86/mm/init_32.c
24733 @@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
24734 }
24735
24736 /*
24737 - * Creates a middle page table and puts a pointer to it in the
24738 - * given global directory entry. This only returns the gd entry
24739 - * in non-PAE compilation mode, since the middle layer is folded.
24740 - */
24741 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
24742 -{
24743 - pud_t *pud;
24744 - pmd_t *pmd_table;
24745 -
24746 -#ifdef CONFIG_X86_PAE
24747 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
24748 - if (after_bootmem)
24749 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
24750 - else
24751 - pmd_table = (pmd_t *)alloc_low_page();
24752 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
24753 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
24754 - pud = pud_offset(pgd, 0);
24755 - BUG_ON(pmd_table != pmd_offset(pud, 0));
24756 -
24757 - return pmd_table;
24758 - }
24759 -#endif
24760 - pud = pud_offset(pgd, 0);
24761 - pmd_table = pmd_offset(pud, 0);
24762 -
24763 - return pmd_table;
24764 -}
24765 -
24766 -/*
24767 * Create a page table and place a pointer to it in a middle page
24768 * directory entry:
24769 */
24770 @@ -123,13 +93,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
24771 page_table = (pte_t *)alloc_low_page();
24772
24773 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
24774 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24775 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
24776 +#else
24777 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
24778 +#endif
24779 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
24780 }
24781
24782 return pte_offset_kernel(pmd, 0);
24783 }
24784
24785 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
24786 +{
24787 + pud_t *pud;
24788 + pmd_t *pmd_table;
24789 +
24790 + pud = pud_offset(pgd, 0);
24791 + pmd_table = pmd_offset(pud, 0);
24792 +
24793 + return pmd_table;
24794 +}
24795 +
24796 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
24797 {
24798 int pgd_idx = pgd_index(vaddr);
24799 @@ -203,6 +188,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24800 int pgd_idx, pmd_idx;
24801 unsigned long vaddr;
24802 pgd_t *pgd;
24803 + pud_t *pud;
24804 pmd_t *pmd;
24805 pte_t *pte = NULL;
24806
24807 @@ -212,8 +198,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24808 pgd = pgd_base + pgd_idx;
24809
24810 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
24811 - pmd = one_md_table_init(pgd);
24812 - pmd = pmd + pmd_index(vaddr);
24813 + pud = pud_offset(pgd, vaddr);
24814 + pmd = pmd_offset(pud, vaddr);
24815 +
24816 +#ifdef CONFIG_X86_PAE
24817 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24818 +#endif
24819 +
24820 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
24821 pmd++, pmd_idx++) {
24822 pte = page_table_kmap_check(one_page_table_init(pmd),
24823 @@ -225,11 +216,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24824 }
24825 }
24826
24827 -static inline int is_kernel_text(unsigned long addr)
24828 +static inline int is_kernel_text(unsigned long start, unsigned long end)
24829 {
24830 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
24831 - return 1;
24832 - return 0;
24833 + if ((start > ktla_ktva((unsigned long)_etext) ||
24834 + end <= ktla_ktva((unsigned long)_stext)) &&
24835 + (start > ktla_ktva((unsigned long)_einittext) ||
24836 + end <= ktla_ktva((unsigned long)_sinittext)) &&
24837 +
24838 +#ifdef CONFIG_ACPI_SLEEP
24839 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
24840 +#endif
24841 +
24842 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
24843 + return 0;
24844 + return 1;
24845 }
24846
24847 /*
24848 @@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned long start,
24849 unsigned long last_map_addr = end;
24850 unsigned long start_pfn, end_pfn;
24851 pgd_t *pgd_base = swapper_pg_dir;
24852 - int pgd_idx, pmd_idx, pte_ofs;
24853 + unsigned int pgd_idx, pmd_idx, pte_ofs;
24854 unsigned long pfn;
24855 pgd_t *pgd;
24856 + pud_t *pud;
24857 pmd_t *pmd;
24858 pte_t *pte;
24859 unsigned pages_2m, pages_4k;
24860 @@ -281,8 +282,13 @@ repeat:
24861 pfn = start_pfn;
24862 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24863 pgd = pgd_base + pgd_idx;
24864 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
24865 - pmd = one_md_table_init(pgd);
24866 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
24867 + pud = pud_offset(pgd, 0);
24868 + pmd = pmd_offset(pud, 0);
24869 +
24870 +#ifdef CONFIG_X86_PAE
24871 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24872 +#endif
24873
24874 if (pfn >= end_pfn)
24875 continue;
24876 @@ -294,14 +300,13 @@ repeat:
24877 #endif
24878 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
24879 pmd++, pmd_idx++) {
24880 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
24881 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
24882
24883 /*
24884 * Map with big pages if possible, otherwise
24885 * create normal page tables:
24886 */
24887 if (use_pse) {
24888 - unsigned int addr2;
24889 pgprot_t prot = PAGE_KERNEL_LARGE;
24890 /*
24891 * first pass will use the same initial
24892 @@ -311,11 +316,7 @@ repeat:
24893 __pgprot(PTE_IDENT_ATTR |
24894 _PAGE_PSE);
24895
24896 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
24897 - PAGE_OFFSET + PAGE_SIZE-1;
24898 -
24899 - if (is_kernel_text(addr) ||
24900 - is_kernel_text(addr2))
24901 + if (is_kernel_text(address, address + PMD_SIZE))
24902 prot = PAGE_KERNEL_LARGE_EXEC;
24903
24904 pages_2m++;
24905 @@ -332,7 +333,7 @@ repeat:
24906 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24907 pte += pte_ofs;
24908 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
24909 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
24910 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
24911 pgprot_t prot = PAGE_KERNEL;
24912 /*
24913 * first pass will use the same initial
24914 @@ -340,7 +341,7 @@ repeat:
24915 */
24916 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
24917
24918 - if (is_kernel_text(addr))
24919 + if (is_kernel_text(address, address + PAGE_SIZE))
24920 prot = PAGE_KERNEL_EXEC;
24921
24922 pages_4k++;
24923 @@ -466,7 +467,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
24924
24925 pud = pud_offset(pgd, va);
24926 pmd = pmd_offset(pud, va);
24927 - if (!pmd_present(*pmd))
24928 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
24929 break;
24930
24931 pte = pte_offset_kernel(pmd, va);
24932 @@ -518,12 +519,10 @@ void __init early_ioremap_page_table_range_init(void)
24933
24934 static void __init pagetable_init(void)
24935 {
24936 - pgd_t *pgd_base = swapper_pg_dir;
24937 -
24938 - permanent_kmaps_init(pgd_base);
24939 + permanent_kmaps_init(swapper_pg_dir);
24940 }
24941
24942 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24943 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24944 EXPORT_SYMBOL_GPL(__supported_pte_mask);
24945
24946 /* user-defined highmem size */
24947 @@ -735,6 +734,12 @@ void __init mem_init(void)
24948
24949 pci_iommu_alloc();
24950
24951 +#ifdef CONFIG_PAX_PER_CPU_PGD
24952 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
24953 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
24954 + KERNEL_PGD_PTRS);
24955 +#endif
24956 +
24957 #ifdef CONFIG_FLATMEM
24958 BUG_ON(!mem_map);
24959 #endif
24960 @@ -761,7 +766,7 @@ void __init mem_init(void)
24961 reservedpages++;
24962
24963 codesize = (unsigned long) &_etext - (unsigned long) &_text;
24964 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
24965 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
24966 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
24967
24968 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
24969 @@ -802,10 +807,10 @@ void __init mem_init(void)
24970 ((unsigned long)&__init_end -
24971 (unsigned long)&__init_begin) >> 10,
24972
24973 - (unsigned long)&_etext, (unsigned long)&_edata,
24974 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
24975 + (unsigned long)&_sdata, (unsigned long)&_edata,
24976 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
24977
24978 - (unsigned long)&_text, (unsigned long)&_etext,
24979 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
24980 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
24981
24982 /*
24983 @@ -883,6 +888,7 @@ void set_kernel_text_rw(void)
24984 if (!kernel_set_to_readonly)
24985 return;
24986
24987 + start = ktla_ktva(start);
24988 pr_debug("Set kernel text: %lx - %lx for read write\n",
24989 start, start+size);
24990
24991 @@ -897,6 +903,7 @@ void set_kernel_text_ro(void)
24992 if (!kernel_set_to_readonly)
24993 return;
24994
24995 + start = ktla_ktva(start);
24996 pr_debug("Set kernel text: %lx - %lx for read only\n",
24997 start, start+size);
24998
24999 @@ -925,6 +932,7 @@ void mark_rodata_ro(void)
25000 unsigned long start = PFN_ALIGN(_text);
25001 unsigned long size = PFN_ALIGN(_etext) - start;
25002
25003 + start = ktla_ktva(start);
25004 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
25005 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
25006 size >> 10);
25007 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
25008 index 436a030..4f97ffc 100644
25009 --- a/arch/x86/mm/init_64.c
25010 +++ b/arch/x86/mm/init_64.c
25011 @@ -75,7 +75,7 @@ early_param("gbpages", parse_direct_gbpages_on);
25012 * around without checking the pgd every time.
25013 */
25014
25015 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
25016 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
25017 EXPORT_SYMBOL_GPL(__supported_pte_mask);
25018
25019 int force_personality32;
25020 @@ -108,12 +108,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
25021
25022 for (address = start; address <= end; address += PGDIR_SIZE) {
25023 const pgd_t *pgd_ref = pgd_offset_k(address);
25024 +
25025 +#ifdef CONFIG_PAX_PER_CPU_PGD
25026 + unsigned long cpu;
25027 +#else
25028 struct page *page;
25029 +#endif
25030
25031 if (pgd_none(*pgd_ref))
25032 continue;
25033
25034 spin_lock(&pgd_lock);
25035 +
25036 +#ifdef CONFIG_PAX_PER_CPU_PGD
25037 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25038 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
25039 +#else
25040 list_for_each_entry(page, &pgd_list, lru) {
25041 pgd_t *pgd;
25042 spinlock_t *pgt_lock;
25043 @@ -122,6 +132,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
25044 /* the pgt_lock only for Xen */
25045 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
25046 spin_lock(pgt_lock);
25047 +#endif
25048
25049 if (pgd_none(*pgd))
25050 set_pgd(pgd, *pgd_ref);
25051 @@ -129,7 +140,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
25052 BUG_ON(pgd_page_vaddr(*pgd)
25053 != pgd_page_vaddr(*pgd_ref));
25054
25055 +#ifndef CONFIG_PAX_PER_CPU_PGD
25056 spin_unlock(pgt_lock);
25057 +#endif
25058 +
25059 }
25060 spin_unlock(&pgd_lock);
25061 }
25062 @@ -162,7 +176,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
25063 {
25064 if (pgd_none(*pgd)) {
25065 pud_t *pud = (pud_t *)spp_getpage();
25066 - pgd_populate(&init_mm, pgd, pud);
25067 + pgd_populate_kernel(&init_mm, pgd, pud);
25068 if (pud != pud_offset(pgd, 0))
25069 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
25070 pud, pud_offset(pgd, 0));
25071 @@ -174,7 +188,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
25072 {
25073 if (pud_none(*pud)) {
25074 pmd_t *pmd = (pmd_t *) spp_getpage();
25075 - pud_populate(&init_mm, pud, pmd);
25076 + pud_populate_kernel(&init_mm, pud, pmd);
25077 if (pmd != pmd_offset(pud, 0))
25078 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
25079 pmd, pmd_offset(pud, 0));
25080 @@ -203,7 +217,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
25081 pmd = fill_pmd(pud, vaddr);
25082 pte = fill_pte(pmd, vaddr);
25083
25084 + pax_open_kernel();
25085 set_pte(pte, new_pte);
25086 + pax_close_kernel();
25087
25088 /*
25089 * It's enough to flush this one mapping.
25090 @@ -262,14 +278,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
25091 pgd = pgd_offset_k((unsigned long)__va(phys));
25092 if (pgd_none(*pgd)) {
25093 pud = (pud_t *) spp_getpage();
25094 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
25095 - _PAGE_USER));
25096 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
25097 }
25098 pud = pud_offset(pgd, (unsigned long)__va(phys));
25099 if (pud_none(*pud)) {
25100 pmd = (pmd_t *) spp_getpage();
25101 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
25102 - _PAGE_USER));
25103 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
25104 }
25105 pmd = pmd_offset(pud, phys);
25106 BUG_ON(!pmd_none(*pmd));
25107 @@ -330,7 +344,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
25108 if (pfn >= pgt_buf_top)
25109 panic("alloc_low_page: ran out of memory");
25110
25111 - adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
25112 + adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
25113 clear_page(adr);
25114 *phys = pfn * PAGE_SIZE;
25115 return adr;
25116 @@ -346,7 +360,7 @@ static __ref void *map_low_page(void *virt)
25117
25118 phys = __pa(virt);
25119 left = phys & (PAGE_SIZE - 1);
25120 - adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
25121 + adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
25122 adr = (void *)(((unsigned long)adr) | left);
25123
25124 return adr;
25125 @@ -546,7 +560,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
25126 unmap_low_page(pmd);
25127
25128 spin_lock(&init_mm.page_table_lock);
25129 - pud_populate(&init_mm, pud, __va(pmd_phys));
25130 + pud_populate_kernel(&init_mm, pud, __va(pmd_phys));
25131 spin_unlock(&init_mm.page_table_lock);
25132 }
25133 __flush_tlb_all();
25134 @@ -592,7 +606,7 @@ kernel_physical_mapping_init(unsigned long start,
25135 unmap_low_page(pud);
25136
25137 spin_lock(&init_mm.page_table_lock);
25138 - pgd_populate(&init_mm, pgd, __va(pud_phys));
25139 + pgd_populate_kernel(&init_mm, pgd, __va(pud_phys));
25140 spin_unlock(&init_mm.page_table_lock);
25141 pgd_changed = true;
25142 }
25143 @@ -684,6 +698,12 @@ void __init mem_init(void)
25144
25145 pci_iommu_alloc();
25146
25147 +#ifdef CONFIG_PAX_PER_CPU_PGD
25148 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25149 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25150 + KERNEL_PGD_PTRS);
25151 +#endif
25152 +
25153 /* clear_bss() already clear the empty_zero_page */
25154
25155 reservedpages = 0;
25156 @@ -844,8 +864,8 @@ int kern_addr_valid(unsigned long addr)
25157 static struct vm_area_struct gate_vma = {
25158 .vm_start = VSYSCALL_START,
25159 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
25160 - .vm_page_prot = PAGE_READONLY_EXEC,
25161 - .vm_flags = VM_READ | VM_EXEC
25162 + .vm_page_prot = PAGE_READONLY,
25163 + .vm_flags = VM_READ
25164 };
25165
25166 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
25167 @@ -879,7 +899,7 @@ int in_gate_area_no_mm(unsigned long addr)
25168
25169 const char *arch_vma_name(struct vm_area_struct *vma)
25170 {
25171 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
25172 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
25173 return "[vdso]";
25174 if (vma == &gate_vma)
25175 return "[vsyscall]";
25176 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
25177 index 7b179b4..6bd1777 100644
25178 --- a/arch/x86/mm/iomap_32.c
25179 +++ b/arch/x86/mm/iomap_32.c
25180 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
25181 type = kmap_atomic_idx_push();
25182 idx = type + KM_TYPE_NR * smp_processor_id();
25183 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
25184 +
25185 + pax_open_kernel();
25186 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
25187 + pax_close_kernel();
25188 +
25189 arch_flush_lazy_mmu_mode();
25190
25191 return (void *)vaddr;
25192 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
25193 index be1ef57..55f0160 100644
25194 --- a/arch/x86/mm/ioremap.c
25195 +++ b/arch/x86/mm/ioremap.c
25196 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
25197 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
25198 int is_ram = page_is_ram(pfn);
25199
25200 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
25201 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
25202 return NULL;
25203 WARN_ON_ONCE(is_ram);
25204 }
25205 @@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
25206
25207 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
25208 if (page_is_ram(start >> PAGE_SHIFT))
25209 +#ifdef CONFIG_HIGHMEM
25210 + if ((start >> PAGE_SHIFT) < max_low_pfn)
25211 +#endif
25212 return __va(phys);
25213
25214 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
25215 @@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
25216 early_param("early_ioremap_debug", early_ioremap_debug_setup);
25217
25218 static __initdata int after_paging_init;
25219 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
25220 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
25221
25222 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
25223 {
25224 @@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
25225 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
25226
25227 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
25228 - memset(bm_pte, 0, sizeof(bm_pte));
25229 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
25230 + pmd_populate_user(&init_mm, pmd, bm_pte);
25231
25232 /*
25233 * The boot-ioremap range spans multiple pmds, for which
25234 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
25235 index d87dd6d..bf3fa66 100644
25236 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
25237 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
25238 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
25239 * memory (e.g. tracked pages)? For now, we need this to avoid
25240 * invoking kmemcheck for PnP BIOS calls.
25241 */
25242 - if (regs->flags & X86_VM_MASK)
25243 + if (v8086_mode(regs))
25244 return false;
25245 - if (regs->cs != __KERNEL_CS)
25246 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
25247 return false;
25248
25249 pte = kmemcheck_pte_lookup(address);
25250 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
25251 index 845df68..1d8d29f 100644
25252 --- a/arch/x86/mm/mmap.c
25253 +++ b/arch/x86/mm/mmap.c
25254 @@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
25255 * Leave an at least ~128 MB hole with possible stack randomization.
25256 */
25257 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
25258 -#define MAX_GAP (TASK_SIZE/6*5)
25259 +#define MAX_GAP (pax_task_size/6*5)
25260
25261 static int mmap_is_legacy(void)
25262 {
25263 @@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
25264 return rnd << PAGE_SHIFT;
25265 }
25266
25267 -static unsigned long mmap_base(void)
25268 +static unsigned long mmap_base(struct mm_struct *mm)
25269 {
25270 unsigned long gap = rlimit(RLIMIT_STACK);
25271 + unsigned long pax_task_size = TASK_SIZE;
25272 +
25273 +#ifdef CONFIG_PAX_SEGMEXEC
25274 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25275 + pax_task_size = SEGMEXEC_TASK_SIZE;
25276 +#endif
25277
25278 if (gap < MIN_GAP)
25279 gap = MIN_GAP;
25280 else if (gap > MAX_GAP)
25281 gap = MAX_GAP;
25282
25283 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
25284 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
25285 }
25286
25287 /*
25288 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
25289 * does, but not when emulating X86_32
25290 */
25291 -static unsigned long mmap_legacy_base(void)
25292 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
25293 {
25294 - if (mmap_is_ia32())
25295 + if (mmap_is_ia32()) {
25296 +
25297 +#ifdef CONFIG_PAX_SEGMEXEC
25298 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25299 + return SEGMEXEC_TASK_UNMAPPED_BASE;
25300 + else
25301 +#endif
25302 +
25303 return TASK_UNMAPPED_BASE;
25304 - else
25305 + } else
25306 return TASK_UNMAPPED_BASE + mmap_rnd();
25307 }
25308
25309 @@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
25310 void arch_pick_mmap_layout(struct mm_struct *mm)
25311 {
25312 if (mmap_is_legacy()) {
25313 - mm->mmap_base = mmap_legacy_base();
25314 + mm->mmap_base = mmap_legacy_base(mm);
25315 +
25316 +#ifdef CONFIG_PAX_RANDMMAP
25317 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25318 + mm->mmap_base += mm->delta_mmap;
25319 +#endif
25320 +
25321 mm->get_unmapped_area = arch_get_unmapped_area;
25322 mm->unmap_area = arch_unmap_area;
25323 } else {
25324 - mm->mmap_base = mmap_base();
25325 + mm->mmap_base = mmap_base(mm);
25326 +
25327 +#ifdef CONFIG_PAX_RANDMMAP
25328 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25329 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
25330 +#endif
25331 +
25332 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
25333 mm->unmap_area = arch_unmap_area_topdown;
25334 }
25335 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
25336 index dc0b727..dc9d71a 100644
25337 --- a/arch/x86/mm/mmio-mod.c
25338 +++ b/arch/x86/mm/mmio-mod.c
25339 @@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
25340 break;
25341 default:
25342 {
25343 - unsigned char *ip = (unsigned char *)instptr;
25344 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
25345 my_trace->opcode = MMIO_UNKNOWN_OP;
25346 my_trace->width = 0;
25347 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
25348 @@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
25349 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25350 void __iomem *addr)
25351 {
25352 - static atomic_t next_id;
25353 + static atomic_unchecked_t next_id;
25354 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
25355 /* These are page-unaligned. */
25356 struct mmiotrace_map map = {
25357 @@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25358 .private = trace
25359 },
25360 .phys = offset,
25361 - .id = atomic_inc_return(&next_id)
25362 + .id = atomic_inc_return_unchecked(&next_id)
25363 };
25364 map.map_id = trace->id;
25365
25366 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
25367 index b008656..773eac2 100644
25368 --- a/arch/x86/mm/pageattr-test.c
25369 +++ b/arch/x86/mm/pageattr-test.c
25370 @@ -36,7 +36,7 @@ enum {
25371
25372 static int pte_testbit(pte_t pte)
25373 {
25374 - return pte_flags(pte) & _PAGE_UNUSED1;
25375 + return pte_flags(pte) & _PAGE_CPA_TEST;
25376 }
25377
25378 struct split_state {
25379 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
25380 index e1ebde3..b1e1db38 100644
25381 --- a/arch/x86/mm/pageattr.c
25382 +++ b/arch/x86/mm/pageattr.c
25383 @@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25384 */
25385 #ifdef CONFIG_PCI_BIOS
25386 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
25387 - pgprot_val(forbidden) |= _PAGE_NX;
25388 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25389 #endif
25390
25391 /*
25392 @@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25393 * Does not cover __inittext since that is gone later on. On
25394 * 64bit we do not enforce !NX on the low mapping
25395 */
25396 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
25397 - pgprot_val(forbidden) |= _PAGE_NX;
25398 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
25399 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25400
25401 +#ifdef CONFIG_DEBUG_RODATA
25402 /*
25403 * The .rodata section needs to be read-only. Using the pfn
25404 * catches all aliases.
25405 @@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25406 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
25407 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
25408 pgprot_val(forbidden) |= _PAGE_RW;
25409 +#endif
25410
25411 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
25412 /*
25413 @@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25414 }
25415 #endif
25416
25417 +#ifdef CONFIG_PAX_KERNEXEC
25418 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
25419 + pgprot_val(forbidden) |= _PAGE_RW;
25420 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25421 + }
25422 +#endif
25423 +
25424 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
25425
25426 return prot;
25427 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
25428 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
25429 {
25430 /* change init_mm */
25431 + pax_open_kernel();
25432 set_pte_atomic(kpte, pte);
25433 +
25434 #ifdef CONFIG_X86_32
25435 if (!SHARED_KERNEL_PMD) {
25436 +
25437 +#ifdef CONFIG_PAX_PER_CPU_PGD
25438 + unsigned long cpu;
25439 +#else
25440 struct page *page;
25441 +#endif
25442
25443 +#ifdef CONFIG_PAX_PER_CPU_PGD
25444 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25445 + pgd_t *pgd = get_cpu_pgd(cpu);
25446 +#else
25447 list_for_each_entry(page, &pgd_list, lru) {
25448 - pgd_t *pgd;
25449 + pgd_t *pgd = (pgd_t *)page_address(page);
25450 +#endif
25451 +
25452 pud_t *pud;
25453 pmd_t *pmd;
25454
25455 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
25456 + pgd += pgd_index(address);
25457 pud = pud_offset(pgd, address);
25458 pmd = pmd_offset(pud, address);
25459 set_pte_atomic((pte_t *)pmd, pte);
25460 }
25461 }
25462 #endif
25463 + pax_close_kernel();
25464 }
25465
25466 static int
25467 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
25468 index f6ff57b..481690f 100644
25469 --- a/arch/x86/mm/pat.c
25470 +++ b/arch/x86/mm/pat.c
25471 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
25472
25473 if (!entry) {
25474 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
25475 - current->comm, current->pid, start, end);
25476 + current->comm, task_pid_nr(current), start, end);
25477 return -EINVAL;
25478 }
25479
25480 @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25481 while (cursor < to) {
25482 if (!devmem_is_allowed(pfn)) {
25483 printk(KERN_INFO
25484 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
25485 - current->comm, from, to);
25486 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
25487 + current->comm, from, to, cursor);
25488 return 0;
25489 }
25490 cursor += PAGE_SIZE;
25491 @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
25492 printk(KERN_INFO
25493 "%s:%d ioremap_change_attr failed %s "
25494 "for %Lx-%Lx\n",
25495 - current->comm, current->pid,
25496 + current->comm, task_pid_nr(current),
25497 cattr_name(flags),
25498 base, (unsigned long long)(base + size));
25499 return -EINVAL;
25500 @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25501 if (want_flags != flags) {
25502 printk(KERN_WARNING
25503 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
25504 - current->comm, current->pid,
25505 + current->comm, task_pid_nr(current),
25506 cattr_name(want_flags),
25507 (unsigned long long)paddr,
25508 (unsigned long long)(paddr + size),
25509 @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25510 free_memtype(paddr, paddr + size);
25511 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
25512 " for %Lx-%Lx, got %s\n",
25513 - current->comm, current->pid,
25514 + current->comm, task_pid_nr(current),
25515 cattr_name(want_flags),
25516 (unsigned long long)paddr,
25517 (unsigned long long)(paddr + size),
25518 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
25519 index 9f0614d..92ae64a 100644
25520 --- a/arch/x86/mm/pf_in.c
25521 +++ b/arch/x86/mm/pf_in.c
25522 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
25523 int i;
25524 enum reason_type rv = OTHERS;
25525
25526 - p = (unsigned char *)ins_addr;
25527 + p = (unsigned char *)ktla_ktva(ins_addr);
25528 p += skip_prefix(p, &prf);
25529 p += get_opcode(p, &opcode);
25530
25531 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
25532 struct prefix_bits prf;
25533 int i;
25534
25535 - p = (unsigned char *)ins_addr;
25536 + p = (unsigned char *)ktla_ktva(ins_addr);
25537 p += skip_prefix(p, &prf);
25538 p += get_opcode(p, &opcode);
25539
25540 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
25541 struct prefix_bits prf;
25542 int i;
25543
25544 - p = (unsigned char *)ins_addr;
25545 + p = (unsigned char *)ktla_ktva(ins_addr);
25546 p += skip_prefix(p, &prf);
25547 p += get_opcode(p, &opcode);
25548
25549 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
25550 struct prefix_bits prf;
25551 int i;
25552
25553 - p = (unsigned char *)ins_addr;
25554 + p = (unsigned char *)ktla_ktva(ins_addr);
25555 p += skip_prefix(p, &prf);
25556 p += get_opcode(p, &opcode);
25557 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
25558 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
25559 struct prefix_bits prf;
25560 int i;
25561
25562 - p = (unsigned char *)ins_addr;
25563 + p = (unsigned char *)ktla_ktva(ins_addr);
25564 p += skip_prefix(p, &prf);
25565 p += get_opcode(p, &opcode);
25566 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
25567 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
25568 index 8573b83..7d9628f 100644
25569 --- a/arch/x86/mm/pgtable.c
25570 +++ b/arch/x86/mm/pgtable.c
25571 @@ -84,10 +84,60 @@ static inline void pgd_list_del(pgd_t *pgd)
25572 list_del(&page->lru);
25573 }
25574
25575 -#define UNSHARED_PTRS_PER_PGD \
25576 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25577 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25578 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
25579
25580 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
25581 +{
25582 + while (count--)
25583 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
25584 +}
25585 +#endif
25586
25587 +#ifdef CONFIG_PAX_PER_CPU_PGD
25588 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
25589 +{
25590 + while (count--) {
25591 + pgd_t pgd;
25592 +
25593 +#ifdef CONFIG_X86_64
25594 + pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
25595 +#else
25596 + pgd = *src++;
25597 +#endif
25598 +
25599 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25600 + pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
25601 +#endif
25602 +
25603 + *dst++ = pgd;
25604 + }
25605 +
25606 +}
25607 +#endif
25608 +
25609 +#ifdef CONFIG_X86_64
25610 +#define pxd_t pud_t
25611 +#define pyd_t pgd_t
25612 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
25613 +#define pxd_free(mm, pud) pud_free((mm), (pud))
25614 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
25615 +#define pyd_offset(mm, address) pgd_offset((mm), (address))
25616 +#define PYD_SIZE PGDIR_SIZE
25617 +#else
25618 +#define pxd_t pmd_t
25619 +#define pyd_t pud_t
25620 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
25621 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
25622 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
25623 +#define pyd_offset(mm, address) pud_offset((mm), (address))
25624 +#define PYD_SIZE PUD_SIZE
25625 +#endif
25626 +
25627 +#ifdef CONFIG_PAX_PER_CPU_PGD
25628 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
25629 +static inline void pgd_dtor(pgd_t *pgd) {}
25630 +#else
25631 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
25632 {
25633 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
25634 @@ -128,6 +178,7 @@ static void pgd_dtor(pgd_t *pgd)
25635 pgd_list_del(pgd);
25636 spin_unlock(&pgd_lock);
25637 }
25638 +#endif
25639
25640 /*
25641 * List of all pgd's needed for non-PAE so it can invalidate entries
25642 @@ -140,7 +191,7 @@ static void pgd_dtor(pgd_t *pgd)
25643 * -- wli
25644 */
25645
25646 -#ifdef CONFIG_X86_PAE
25647 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25648 /*
25649 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
25650 * updating the top-level pagetable entries to guarantee the
25651 @@ -152,7 +203,7 @@ static void pgd_dtor(pgd_t *pgd)
25652 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
25653 * and initialize the kernel pmds here.
25654 */
25655 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
25656 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25657
25658 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25659 {
25660 @@ -170,36 +221,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25661 */
25662 flush_tlb_mm(mm);
25663 }
25664 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
25665 +#define PREALLOCATED_PXDS USER_PGD_PTRS
25666 #else /* !CONFIG_X86_PAE */
25667
25668 /* No need to prepopulate any pagetable entries in non-PAE modes. */
25669 -#define PREALLOCATED_PMDS 0
25670 +#define PREALLOCATED_PXDS 0
25671
25672 #endif /* CONFIG_X86_PAE */
25673
25674 -static void free_pmds(pmd_t *pmds[])
25675 +static void free_pxds(pxd_t *pxds[])
25676 {
25677 int i;
25678
25679 - for(i = 0; i < PREALLOCATED_PMDS; i++)
25680 - if (pmds[i])
25681 - free_page((unsigned long)pmds[i]);
25682 + for(i = 0; i < PREALLOCATED_PXDS; i++)
25683 + if (pxds[i])
25684 + free_page((unsigned long)pxds[i]);
25685 }
25686
25687 -static int preallocate_pmds(pmd_t *pmds[])
25688 +static int preallocate_pxds(pxd_t *pxds[])
25689 {
25690 int i;
25691 bool failed = false;
25692
25693 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
25694 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
25695 - if (pmd == NULL)
25696 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
25697 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
25698 + if (pxd == NULL)
25699 failed = true;
25700 - pmds[i] = pmd;
25701 + pxds[i] = pxd;
25702 }
25703
25704 if (failed) {
25705 - free_pmds(pmds);
25706 + free_pxds(pxds);
25707 return -ENOMEM;
25708 }
25709
25710 @@ -212,51 +265,55 @@ static int preallocate_pmds(pmd_t *pmds[])
25711 * preallocate which never got a corresponding vma will need to be
25712 * freed manually.
25713 */
25714 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
25715 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
25716 {
25717 int i;
25718
25719 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
25720 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
25721 pgd_t pgd = pgdp[i];
25722
25723 if (pgd_val(pgd) != 0) {
25724 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
25725 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
25726
25727 - pgdp[i] = native_make_pgd(0);
25728 + set_pgd(pgdp + i, native_make_pgd(0));
25729
25730 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
25731 - pmd_free(mm, pmd);
25732 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
25733 + pxd_free(mm, pxd);
25734 }
25735 }
25736 }
25737
25738 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
25739 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
25740 {
25741 - pud_t *pud;
25742 + pyd_t *pyd;
25743 unsigned long addr;
25744 int i;
25745
25746 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
25747 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
25748 return;
25749
25750 - pud = pud_offset(pgd, 0);
25751 +#ifdef CONFIG_X86_64
25752 + pyd = pyd_offset(mm, 0L);
25753 +#else
25754 + pyd = pyd_offset(pgd, 0L);
25755 +#endif
25756
25757 - for (addr = i = 0; i < PREALLOCATED_PMDS;
25758 - i++, pud++, addr += PUD_SIZE) {
25759 - pmd_t *pmd = pmds[i];
25760 + for (addr = i = 0; i < PREALLOCATED_PXDS;
25761 + i++, pyd++, addr += PYD_SIZE) {
25762 + pxd_t *pxd = pxds[i];
25763
25764 if (i >= KERNEL_PGD_BOUNDARY)
25765 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25766 - sizeof(pmd_t) * PTRS_PER_PMD);
25767 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25768 + sizeof(pxd_t) * PTRS_PER_PMD);
25769
25770 - pud_populate(mm, pud, pmd);
25771 + pyd_populate(mm, pyd, pxd);
25772 }
25773 }
25774
25775 pgd_t *pgd_alloc(struct mm_struct *mm)
25776 {
25777 pgd_t *pgd;
25778 - pmd_t *pmds[PREALLOCATED_PMDS];
25779 + pxd_t *pxds[PREALLOCATED_PXDS];
25780
25781 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
25782
25783 @@ -265,11 +322,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25784
25785 mm->pgd = pgd;
25786
25787 - if (preallocate_pmds(pmds) != 0)
25788 + if (preallocate_pxds(pxds) != 0)
25789 goto out_free_pgd;
25790
25791 if (paravirt_pgd_alloc(mm) != 0)
25792 - goto out_free_pmds;
25793 + goto out_free_pxds;
25794
25795 /*
25796 * Make sure that pre-populating the pmds is atomic with
25797 @@ -279,14 +336,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25798 spin_lock(&pgd_lock);
25799
25800 pgd_ctor(mm, pgd);
25801 - pgd_prepopulate_pmd(mm, pgd, pmds);
25802 + pgd_prepopulate_pxd(mm, pgd, pxds);
25803
25804 spin_unlock(&pgd_lock);
25805
25806 return pgd;
25807
25808 -out_free_pmds:
25809 - free_pmds(pmds);
25810 +out_free_pxds:
25811 + free_pxds(pxds);
25812 out_free_pgd:
25813 free_page((unsigned long)pgd);
25814 out:
25815 @@ -295,7 +352,7 @@ out:
25816
25817 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
25818 {
25819 - pgd_mop_up_pmds(mm, pgd);
25820 + pgd_mop_up_pxds(mm, pgd);
25821 pgd_dtor(pgd);
25822 paravirt_pgd_free(mm, pgd);
25823 free_page((unsigned long)pgd);
25824 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
25825 index cac7184..09a39fa 100644
25826 --- a/arch/x86/mm/pgtable_32.c
25827 +++ b/arch/x86/mm/pgtable_32.c
25828 @@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
25829 return;
25830 }
25831 pte = pte_offset_kernel(pmd, vaddr);
25832 +
25833 + pax_open_kernel();
25834 if (pte_val(pteval))
25835 set_pte_at(&init_mm, vaddr, pte, pteval);
25836 else
25837 pte_clear(&init_mm, vaddr, pte);
25838 + pax_close_kernel();
25839
25840 /*
25841 * It's enough to flush this one mapping.
25842 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
25843 index 410531d..0f16030 100644
25844 --- a/arch/x86/mm/setup_nx.c
25845 +++ b/arch/x86/mm/setup_nx.c
25846 @@ -5,8 +5,10 @@
25847 #include <asm/pgtable.h>
25848 #include <asm/proto.h>
25849
25850 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25851 static int disable_nx __cpuinitdata;
25852
25853 +#ifndef CONFIG_PAX_PAGEEXEC
25854 /*
25855 * noexec = on|off
25856 *
25857 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
25858 return 0;
25859 }
25860 early_param("noexec", noexec_setup);
25861 +#endif
25862 +
25863 +#endif
25864
25865 void __cpuinit x86_configure_nx(void)
25866 {
25867 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25868 if (cpu_has_nx && !disable_nx)
25869 __supported_pte_mask |= _PAGE_NX;
25870 else
25871 +#endif
25872 __supported_pte_mask &= ~_PAGE_NX;
25873 }
25874
25875 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
25876 index d6c0418..06a0ad5 100644
25877 --- a/arch/x86/mm/tlb.c
25878 +++ b/arch/x86/mm/tlb.c
25879 @@ -65,7 +65,11 @@ void leave_mm(int cpu)
25880 BUG();
25881 cpumask_clear_cpu(cpu,
25882 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
25883 +
25884 +#ifndef CONFIG_PAX_PER_CPU_PGD
25885 load_cr3(swapper_pg_dir);
25886 +#endif
25887 +
25888 }
25889 EXPORT_SYMBOL_GPL(leave_mm);
25890
25891 diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
25892 index 6687022..ceabcfa 100644
25893 --- a/arch/x86/net/bpf_jit.S
25894 +++ b/arch/x86/net/bpf_jit.S
25895 @@ -9,6 +9,7 @@
25896 */
25897 #include <linux/linkage.h>
25898 #include <asm/dwarf2.h>
25899 +#include <asm/alternative-asm.h>
25900
25901 /*
25902 * Calling convention :
25903 @@ -35,6 +36,7 @@ sk_load_word:
25904 jle bpf_slow_path_word
25905 mov (SKBDATA,%rsi),%eax
25906 bswap %eax /* ntohl() */
25907 + pax_force_retaddr
25908 ret
25909
25910
25911 @@ -53,6 +55,7 @@ sk_load_half:
25912 jle bpf_slow_path_half
25913 movzwl (SKBDATA,%rsi),%eax
25914 rol $8,%ax # ntohs()
25915 + pax_force_retaddr
25916 ret
25917
25918 sk_load_byte_ind:
25919 @@ -66,6 +69,7 @@ sk_load_byte:
25920 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
25921 jle bpf_slow_path_byte
25922 movzbl (SKBDATA,%rsi),%eax
25923 + pax_force_retaddr
25924 ret
25925
25926 /**
25927 @@ -82,6 +86,7 @@ ENTRY(sk_load_byte_msh)
25928 movzbl (SKBDATA,%rsi),%ebx
25929 and $15,%bl
25930 shl $2,%bl
25931 + pax_force_retaddr
25932 ret
25933 CFI_ENDPROC
25934 ENDPROC(sk_load_byte_msh)
25935 @@ -91,6 +96,7 @@ bpf_error:
25936 xor %eax,%eax
25937 mov -8(%rbp),%rbx
25938 leaveq
25939 + pax_force_retaddr
25940 ret
25941
25942 /* rsi contains offset and can be scratched */
25943 @@ -113,6 +119,7 @@ bpf_slow_path_word:
25944 js bpf_error
25945 mov -12(%rbp),%eax
25946 bswap %eax
25947 + pax_force_retaddr
25948 ret
25949
25950 bpf_slow_path_half:
25951 @@ -121,12 +128,14 @@ bpf_slow_path_half:
25952 mov -12(%rbp),%ax
25953 rol $8,%ax
25954 movzwl %ax,%eax
25955 + pax_force_retaddr
25956 ret
25957
25958 bpf_slow_path_byte:
25959 bpf_slow_path_common(1)
25960 js bpf_error
25961 movzbl -12(%rbp),%eax
25962 + pax_force_retaddr
25963 ret
25964
25965 bpf_slow_path_byte_msh:
25966 @@ -137,4 +146,5 @@ bpf_slow_path_byte_msh:
25967 and $15,%al
25968 shl $2,%al
25969 xchg %eax,%ebx
25970 + pax_force_retaddr
25971 ret
25972 diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
25973 index 5a5b6e4..201d42e 100644
25974 --- a/arch/x86/net/bpf_jit_comp.c
25975 +++ b/arch/x86/net/bpf_jit_comp.c
25976 @@ -117,6 +117,10 @@ static inline void bpf_flush_icache(void *start, void *end)
25977 set_fs(old_fs);
25978 }
25979
25980 +struct bpf_jit_work {
25981 + struct work_struct work;
25982 + void *image;
25983 +};
25984
25985 void bpf_jit_compile(struct sk_filter *fp)
25986 {
25987 @@ -141,6 +145,10 @@ void bpf_jit_compile(struct sk_filter *fp)
25988 if (addrs == NULL)
25989 return;
25990
25991 + fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
25992 + if (!fp->work)
25993 + goto out;
25994 +
25995 /* Before first pass, make a rough estimation of addrs[]
25996 * each bpf instruction is translated to less than 64 bytes
25997 */
25998 @@ -477,7 +485,7 @@ void bpf_jit_compile(struct sk_filter *fp)
25999 common_load: seen |= SEEN_DATAREF;
26000 if ((int)K < 0) {
26001 /* Abort the JIT because __load_pointer() is needed. */
26002 - goto out;
26003 + goto error;
26004 }
26005 t_offset = func - (image + addrs[i]);
26006 EMIT1_off32(0xbe, K); /* mov imm32,%esi */
26007 @@ -492,7 +500,7 @@ common_load: seen |= SEEN_DATAREF;
26008 case BPF_S_LDX_B_MSH:
26009 if ((int)K < 0) {
26010 /* Abort the JIT because __load_pointer() is needed. */
26011 - goto out;
26012 + goto error;
26013 }
26014 seen |= SEEN_DATAREF | SEEN_XREG;
26015 t_offset = sk_load_byte_msh - (image + addrs[i]);
26016 @@ -582,17 +590,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
26017 break;
26018 default:
26019 /* hmm, too complex filter, give up with jit compiler */
26020 - goto out;
26021 + goto error;
26022 }
26023 ilen = prog - temp;
26024 if (image) {
26025 if (unlikely(proglen + ilen > oldproglen)) {
26026 pr_err("bpb_jit_compile fatal error\n");
26027 - kfree(addrs);
26028 - module_free(NULL, image);
26029 - return;
26030 + module_free_exec(NULL, image);
26031 + goto error;
26032 }
26033 + pax_open_kernel();
26034 memcpy(image + proglen, temp, ilen);
26035 + pax_close_kernel();
26036 }
26037 proglen += ilen;
26038 addrs[i] = proglen;
26039 @@ -613,11 +622,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
26040 break;
26041 }
26042 if (proglen == oldproglen) {
26043 - image = module_alloc(max_t(unsigned int,
26044 - proglen,
26045 - sizeof(struct work_struct)));
26046 + image = module_alloc_exec(proglen);
26047 if (!image)
26048 - goto out;
26049 + goto error;
26050 }
26051 oldproglen = proglen;
26052 }
26053 @@ -633,7 +640,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
26054 bpf_flush_icache(image, image + proglen);
26055
26056 fp->bpf_func = (void *)image;
26057 - }
26058 + } else
26059 +error:
26060 + kfree(fp->work);
26061 +
26062 out:
26063 kfree(addrs);
26064 return;
26065 @@ -641,18 +651,20 @@ out:
26066
26067 static void jit_free_defer(struct work_struct *arg)
26068 {
26069 - module_free(NULL, arg);
26070 + module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
26071 + kfree(arg);
26072 }
26073
26074 /* run from softirq, we must use a work_struct to call
26075 - * module_free() from process context
26076 + * module_free_exec() from process context
26077 */
26078 void bpf_jit_free(struct sk_filter *fp)
26079 {
26080 if (fp->bpf_func != sk_run_filter) {
26081 - struct work_struct *work = (struct work_struct *)fp->bpf_func;
26082 + struct work_struct *work = &fp->work->work;
26083
26084 INIT_WORK(work, jit_free_defer);
26085 + fp->work->image = fp->bpf_func;
26086 schedule_work(work);
26087 }
26088 }
26089 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
26090 index bff89df..377758a 100644
26091 --- a/arch/x86/oprofile/backtrace.c
26092 +++ b/arch/x86/oprofile/backtrace.c
26093 @@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
26094 struct stack_frame_ia32 *fp;
26095 unsigned long bytes;
26096
26097 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
26098 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
26099 if (bytes != sizeof(bufhead))
26100 return NULL;
26101
26102 - fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
26103 + fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
26104
26105 oprofile_add_trace(bufhead[0].return_address);
26106
26107 @@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
26108 struct stack_frame bufhead[2];
26109 unsigned long bytes;
26110
26111 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
26112 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
26113 if (bytes != sizeof(bufhead))
26114 return NULL;
26115
26116 @@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
26117 {
26118 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
26119
26120 - if (!user_mode_vm(regs)) {
26121 + if (!user_mode(regs)) {
26122 unsigned long stack = kernel_stack_pointer(regs);
26123 if (depth)
26124 dump_trace(NULL, regs, (unsigned long *)stack, 0,
26125 diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
26126 index cb29191..036766d 100644
26127 --- a/arch/x86/pci/mrst.c
26128 +++ b/arch/x86/pci/mrst.c
26129 @@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
26130 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
26131 pci_mmcfg_late_init();
26132 pcibios_enable_irq = mrst_pci_irq_enable;
26133 - pci_root_ops = pci_mrst_ops;
26134 + pax_open_kernel();
26135 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
26136 + pax_close_kernel();
26137 /* Continue with standard init */
26138 return 1;
26139 }
26140 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
26141 index da8fe05..7ee6704 100644
26142 --- a/arch/x86/pci/pcbios.c
26143 +++ b/arch/x86/pci/pcbios.c
26144 @@ -79,50 +79,93 @@ union bios32 {
26145 static struct {
26146 unsigned long address;
26147 unsigned short segment;
26148 -} bios32_indirect = { 0, __KERNEL_CS };
26149 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
26150
26151 /*
26152 * Returns the entry point for the given service, NULL on error
26153 */
26154
26155 -static unsigned long bios32_service(unsigned long service)
26156 +static unsigned long __devinit bios32_service(unsigned long service)
26157 {
26158 unsigned char return_code; /* %al */
26159 unsigned long address; /* %ebx */
26160 unsigned long length; /* %ecx */
26161 unsigned long entry; /* %edx */
26162 unsigned long flags;
26163 + struct desc_struct d, *gdt;
26164
26165 local_irq_save(flags);
26166 - __asm__("lcall *(%%edi); cld"
26167 +
26168 + gdt = get_cpu_gdt_table(smp_processor_id());
26169 +
26170 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
26171 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26172 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
26173 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26174 +
26175 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
26176 : "=a" (return_code),
26177 "=b" (address),
26178 "=c" (length),
26179 "=d" (entry)
26180 : "0" (service),
26181 "1" (0),
26182 - "D" (&bios32_indirect));
26183 + "D" (&bios32_indirect),
26184 + "r"(__PCIBIOS_DS)
26185 + : "memory");
26186 +
26187 + pax_open_kernel();
26188 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
26189 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
26190 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
26191 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
26192 + pax_close_kernel();
26193 +
26194 local_irq_restore(flags);
26195
26196 switch (return_code) {
26197 - case 0:
26198 - return address + entry;
26199 - case 0x80: /* Not present */
26200 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26201 - return 0;
26202 - default: /* Shouldn't happen */
26203 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26204 - service, return_code);
26205 + case 0: {
26206 + int cpu;
26207 + unsigned char flags;
26208 +
26209 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
26210 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
26211 + printk(KERN_WARNING "bios32_service: not valid\n");
26212 return 0;
26213 + }
26214 + address = address + PAGE_OFFSET;
26215 + length += 16UL; /* some BIOSs underreport this... */
26216 + flags = 4;
26217 + if (length >= 64*1024*1024) {
26218 + length >>= PAGE_SHIFT;
26219 + flags |= 8;
26220 + }
26221 +
26222 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
26223 + gdt = get_cpu_gdt_table(cpu);
26224 + pack_descriptor(&d, address, length, 0x9b, flags);
26225 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26226 + pack_descriptor(&d, address, length, 0x93, flags);
26227 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26228 + }
26229 + return entry;
26230 + }
26231 + case 0x80: /* Not present */
26232 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26233 + return 0;
26234 + default: /* Shouldn't happen */
26235 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26236 + service, return_code);
26237 + return 0;
26238 }
26239 }
26240
26241 static struct {
26242 unsigned long address;
26243 unsigned short segment;
26244 -} pci_indirect = { 0, __KERNEL_CS };
26245 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
26246
26247 -static int pci_bios_present;
26248 +static int pci_bios_present __read_only;
26249
26250 static int __devinit check_pcibios(void)
26251 {
26252 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
26253 unsigned long flags, pcibios_entry;
26254
26255 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
26256 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
26257 + pci_indirect.address = pcibios_entry;
26258
26259 local_irq_save(flags);
26260 - __asm__(
26261 - "lcall *(%%edi); cld\n\t"
26262 + __asm__("movw %w6, %%ds\n\t"
26263 + "lcall *%%ss:(%%edi); cld\n\t"
26264 + "push %%ss\n\t"
26265 + "pop %%ds\n\t"
26266 "jc 1f\n\t"
26267 "xor %%ah, %%ah\n"
26268 "1:"
26269 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
26270 "=b" (ebx),
26271 "=c" (ecx)
26272 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
26273 - "D" (&pci_indirect)
26274 + "D" (&pci_indirect),
26275 + "r" (__PCIBIOS_DS)
26276 : "memory");
26277 local_irq_restore(flags);
26278
26279 @@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26280
26281 switch (len) {
26282 case 1:
26283 - __asm__("lcall *(%%esi); cld\n\t"
26284 + __asm__("movw %w6, %%ds\n\t"
26285 + "lcall *%%ss:(%%esi); cld\n\t"
26286 + "push %%ss\n\t"
26287 + "pop %%ds\n\t"
26288 "jc 1f\n\t"
26289 "xor %%ah, %%ah\n"
26290 "1:"
26291 @@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26292 : "1" (PCIBIOS_READ_CONFIG_BYTE),
26293 "b" (bx),
26294 "D" ((long)reg),
26295 - "S" (&pci_indirect));
26296 + "S" (&pci_indirect),
26297 + "r" (__PCIBIOS_DS));
26298 /*
26299 * Zero-extend the result beyond 8 bits, do not trust the
26300 * BIOS having done it:
26301 @@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26302 *value &= 0xff;
26303 break;
26304 case 2:
26305 - __asm__("lcall *(%%esi); cld\n\t"
26306 + __asm__("movw %w6, %%ds\n\t"
26307 + "lcall *%%ss:(%%esi); cld\n\t"
26308 + "push %%ss\n\t"
26309 + "pop %%ds\n\t"
26310 "jc 1f\n\t"
26311 "xor %%ah, %%ah\n"
26312 "1:"
26313 @@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26314 : "1" (PCIBIOS_READ_CONFIG_WORD),
26315 "b" (bx),
26316 "D" ((long)reg),
26317 - "S" (&pci_indirect));
26318 + "S" (&pci_indirect),
26319 + "r" (__PCIBIOS_DS));
26320 /*
26321 * Zero-extend the result beyond 16 bits, do not trust the
26322 * BIOS having done it:
26323 @@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26324 *value &= 0xffff;
26325 break;
26326 case 4:
26327 - __asm__("lcall *(%%esi); cld\n\t"
26328 + __asm__("movw %w6, %%ds\n\t"
26329 + "lcall *%%ss:(%%esi); cld\n\t"
26330 + "push %%ss\n\t"
26331 + "pop %%ds\n\t"
26332 "jc 1f\n\t"
26333 "xor %%ah, %%ah\n"
26334 "1:"
26335 @@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26336 : "1" (PCIBIOS_READ_CONFIG_DWORD),
26337 "b" (bx),
26338 "D" ((long)reg),
26339 - "S" (&pci_indirect));
26340 + "S" (&pci_indirect),
26341 + "r" (__PCIBIOS_DS));
26342 break;
26343 }
26344
26345 @@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26346
26347 switch (len) {
26348 case 1:
26349 - __asm__("lcall *(%%esi); cld\n\t"
26350 + __asm__("movw %w6, %%ds\n\t"
26351 + "lcall *%%ss:(%%esi); cld\n\t"
26352 + "push %%ss\n\t"
26353 + "pop %%ds\n\t"
26354 "jc 1f\n\t"
26355 "xor %%ah, %%ah\n"
26356 "1:"
26357 @@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26358 "c" (value),
26359 "b" (bx),
26360 "D" ((long)reg),
26361 - "S" (&pci_indirect));
26362 + "S" (&pci_indirect),
26363 + "r" (__PCIBIOS_DS));
26364 break;
26365 case 2:
26366 - __asm__("lcall *(%%esi); cld\n\t"
26367 + __asm__("movw %w6, %%ds\n\t"
26368 + "lcall *%%ss:(%%esi); cld\n\t"
26369 + "push %%ss\n\t"
26370 + "pop %%ds\n\t"
26371 "jc 1f\n\t"
26372 "xor %%ah, %%ah\n"
26373 "1:"
26374 @@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26375 "c" (value),
26376 "b" (bx),
26377 "D" ((long)reg),
26378 - "S" (&pci_indirect));
26379 + "S" (&pci_indirect),
26380 + "r" (__PCIBIOS_DS));
26381 break;
26382 case 4:
26383 - __asm__("lcall *(%%esi); cld\n\t"
26384 + __asm__("movw %w6, %%ds\n\t"
26385 + "lcall *%%ss:(%%esi); cld\n\t"
26386 + "push %%ss\n\t"
26387 + "pop %%ds\n\t"
26388 "jc 1f\n\t"
26389 "xor %%ah, %%ah\n"
26390 "1:"
26391 @@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26392 "c" (value),
26393 "b" (bx),
26394 "D" ((long)reg),
26395 - "S" (&pci_indirect));
26396 + "S" (&pci_indirect),
26397 + "r" (__PCIBIOS_DS));
26398 break;
26399 }
26400
26401 @@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26402
26403 DBG("PCI: Fetching IRQ routing table... ");
26404 __asm__("push %%es\n\t"
26405 + "movw %w8, %%ds\n\t"
26406 "push %%ds\n\t"
26407 "pop %%es\n\t"
26408 - "lcall *(%%esi); cld\n\t"
26409 + "lcall *%%ss:(%%esi); cld\n\t"
26410 "pop %%es\n\t"
26411 + "push %%ss\n\t"
26412 + "pop %%ds\n"
26413 "jc 1f\n\t"
26414 "xor %%ah, %%ah\n"
26415 "1:"
26416 @@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26417 "1" (0),
26418 "D" ((long) &opt),
26419 "S" (&pci_indirect),
26420 - "m" (opt)
26421 + "m" (opt),
26422 + "r" (__PCIBIOS_DS)
26423 : "memory");
26424 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
26425 if (ret & 0xff00)
26426 @@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26427 {
26428 int ret;
26429
26430 - __asm__("lcall *(%%esi); cld\n\t"
26431 + __asm__("movw %w5, %%ds\n\t"
26432 + "lcall *%%ss:(%%esi); cld\n\t"
26433 + "push %%ss\n\t"
26434 + "pop %%ds\n"
26435 "jc 1f\n\t"
26436 "xor %%ah, %%ah\n"
26437 "1:"
26438 @@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26439 : "0" (PCIBIOS_SET_PCI_HW_INT),
26440 "b" ((dev->bus->number << 8) | dev->devfn),
26441 "c" ((irq << 8) | (pin + 10)),
26442 - "S" (&pci_indirect));
26443 + "S" (&pci_indirect),
26444 + "r" (__PCIBIOS_DS));
26445 return !(ret & 0xff00);
26446 }
26447 EXPORT_SYMBOL(pcibios_set_irq_routing);
26448 diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
26449 index 40e4469..1ab536e 100644
26450 --- a/arch/x86/platform/efi/efi_32.c
26451 +++ b/arch/x86/platform/efi/efi_32.c
26452 @@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
26453 {
26454 struct desc_ptr gdt_descr;
26455
26456 +#ifdef CONFIG_PAX_KERNEXEC
26457 + struct desc_struct d;
26458 +#endif
26459 +
26460 local_irq_save(efi_rt_eflags);
26461
26462 load_cr3(initial_page_table);
26463 __flush_tlb_all();
26464
26465 +#ifdef CONFIG_PAX_KERNEXEC
26466 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
26467 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
26468 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
26469 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
26470 +#endif
26471 +
26472 gdt_descr.address = __pa(get_cpu_gdt_table(0));
26473 gdt_descr.size = GDT_SIZE - 1;
26474 load_gdt(&gdt_descr);
26475 @@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
26476 {
26477 struct desc_ptr gdt_descr;
26478
26479 +#ifdef CONFIG_PAX_KERNEXEC
26480 + struct desc_struct d;
26481 +
26482 + memset(&d, 0, sizeof d);
26483 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
26484 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
26485 +#endif
26486 +
26487 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
26488 gdt_descr.size = GDT_SIZE - 1;
26489 load_gdt(&gdt_descr);
26490 diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
26491 index fbe66e6..c5c0dd2 100644
26492 --- a/arch/x86/platform/efi/efi_stub_32.S
26493 +++ b/arch/x86/platform/efi/efi_stub_32.S
26494 @@ -6,7 +6,9 @@
26495 */
26496
26497 #include <linux/linkage.h>
26498 +#include <linux/init.h>
26499 #include <asm/page_types.h>
26500 +#include <asm/segment.h>
26501
26502 /*
26503 * efi_call_phys(void *, ...) is a function with variable parameters.
26504 @@ -20,7 +22,7 @@
26505 * service functions will comply with gcc calling convention, too.
26506 */
26507
26508 -.text
26509 +__INIT
26510 ENTRY(efi_call_phys)
26511 /*
26512 * 0. The function can only be called in Linux kernel. So CS has been
26513 @@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
26514 * The mapping of lower virtual memory has been created in prelog and
26515 * epilog.
26516 */
26517 - movl $1f, %edx
26518 - subl $__PAGE_OFFSET, %edx
26519 - jmp *%edx
26520 + movl $(__KERNEXEC_EFI_DS), %edx
26521 + mov %edx, %ds
26522 + mov %edx, %es
26523 + mov %edx, %ss
26524 + ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
26525 1:
26526
26527 /*
26528 @@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
26529 * parameter 2, ..., param n. To make things easy, we save the return
26530 * address of efi_call_phys in a global variable.
26531 */
26532 - popl %edx
26533 - movl %edx, saved_return_addr
26534 - /* get the function pointer into ECX*/
26535 - popl %ecx
26536 - movl %ecx, efi_rt_function_ptr
26537 - movl $2f, %edx
26538 - subl $__PAGE_OFFSET, %edx
26539 - pushl %edx
26540 + popl (saved_return_addr)
26541 + popl (efi_rt_function_ptr)
26542
26543 /*
26544 * 3. Clear PG bit in %CR0.
26545 @@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
26546 /*
26547 * 5. Call the physical function.
26548 */
26549 - jmp *%ecx
26550 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
26551
26552 -2:
26553 /*
26554 * 6. After EFI runtime service returns, control will return to
26555 * following instruction. We'd better readjust stack pointer first.
26556 @@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
26557 movl %cr0, %edx
26558 orl $0x80000000, %edx
26559 movl %edx, %cr0
26560 - jmp 1f
26561 -1:
26562 +
26563 /*
26564 * 8. Now restore the virtual mode from flat mode by
26565 * adding EIP with PAGE_OFFSET.
26566 */
26567 - movl $1f, %edx
26568 - jmp *%edx
26569 + ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
26570 1:
26571 + movl $(__KERNEL_DS), %edx
26572 + mov %edx, %ds
26573 + mov %edx, %es
26574 + mov %edx, %ss
26575
26576 /*
26577 * 9. Balance the stack. And because EAX contain the return value,
26578 * we'd better not clobber it.
26579 */
26580 - leal efi_rt_function_ptr, %edx
26581 - movl (%edx), %ecx
26582 - pushl %ecx
26583 + pushl (efi_rt_function_ptr)
26584
26585 /*
26586 - * 10. Push the saved return address onto the stack and return.
26587 + * 10. Return to the saved return address.
26588 */
26589 - leal saved_return_addr, %edx
26590 - movl (%edx), %ecx
26591 - pushl %ecx
26592 - ret
26593 + jmpl *(saved_return_addr)
26594 ENDPROC(efi_call_phys)
26595 .previous
26596
26597 -.data
26598 +__INITDATA
26599 saved_return_addr:
26600 .long 0
26601 efi_rt_function_ptr:
26602 diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
26603 index 4c07cca..2c8427d 100644
26604 --- a/arch/x86/platform/efi/efi_stub_64.S
26605 +++ b/arch/x86/platform/efi/efi_stub_64.S
26606 @@ -7,6 +7,7 @@
26607 */
26608
26609 #include <linux/linkage.h>
26610 +#include <asm/alternative-asm.h>
26611
26612 #define SAVE_XMM \
26613 mov %rsp, %rax; \
26614 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
26615 call *%rdi
26616 addq $32, %rsp
26617 RESTORE_XMM
26618 + pax_force_retaddr 0, 1
26619 ret
26620 ENDPROC(efi_call0)
26621
26622 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
26623 call *%rdi
26624 addq $32, %rsp
26625 RESTORE_XMM
26626 + pax_force_retaddr 0, 1
26627 ret
26628 ENDPROC(efi_call1)
26629
26630 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
26631 call *%rdi
26632 addq $32, %rsp
26633 RESTORE_XMM
26634 + pax_force_retaddr 0, 1
26635 ret
26636 ENDPROC(efi_call2)
26637
26638 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
26639 call *%rdi
26640 addq $32, %rsp
26641 RESTORE_XMM
26642 + pax_force_retaddr 0, 1
26643 ret
26644 ENDPROC(efi_call3)
26645
26646 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
26647 call *%rdi
26648 addq $32, %rsp
26649 RESTORE_XMM
26650 + pax_force_retaddr 0, 1
26651 ret
26652 ENDPROC(efi_call4)
26653
26654 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
26655 call *%rdi
26656 addq $48, %rsp
26657 RESTORE_XMM
26658 + pax_force_retaddr 0, 1
26659 ret
26660 ENDPROC(efi_call5)
26661
26662 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
26663 call *%rdi
26664 addq $48, %rsp
26665 RESTORE_XMM
26666 + pax_force_retaddr 0, 1
26667 ret
26668 ENDPROC(efi_call6)
26669 diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
26670 index 475e2cd..1b8e708 100644
26671 --- a/arch/x86/platform/mrst/mrst.c
26672 +++ b/arch/x86/platform/mrst/mrst.c
26673 @@ -76,18 +76,20 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
26674 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
26675 int sfi_mrtc_num;
26676
26677 -static void mrst_power_off(void)
26678 +static __noreturn void mrst_power_off(void)
26679 {
26680 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
26681 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 1);
26682 + BUG();
26683 }
26684
26685 -static void mrst_reboot(void)
26686 +static __noreturn void mrst_reboot(void)
26687 {
26688 if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
26689 intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
26690 else
26691 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
26692 + BUG();
26693 }
26694
26695 /* parse all the mtimer info to a static mtimer array */
26696 diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
26697 index 3ae0e61..4202d86 100644
26698 --- a/arch/x86/platform/uv/tlb_uv.c
26699 +++ b/arch/x86/platform/uv/tlb_uv.c
26700 @@ -1424,6 +1424,8 @@ static ssize_t tunables_read(struct file *file, char __user *userbuf,
26701 * 0: display meaning of the statistics
26702 */
26703 static ssize_t ptc_proc_write(struct file *file, const char __user *user,
26704 + size_t count, loff_t *data) __size_overflow(3);
26705 +static ssize_t ptc_proc_write(struct file *file, const char __user *user,
26706 size_t count, loff_t *data)
26707 {
26708 int cpu;
26709 @@ -1539,6 +1541,8 @@ static int parse_tunables_write(struct bau_control *bcp, char *instr,
26710 * Handle a write to debugfs. (/sys/kernel/debug/sgi_uv/bau_tunables)
26711 */
26712 static ssize_t tunables_write(struct file *file, const char __user *user,
26713 + size_t count, loff_t *data) __size_overflow(3);
26714 +static ssize_t tunables_write(struct file *file, const char __user *user,
26715 size_t count, loff_t *data)
26716 {
26717 int cpu;
26718 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
26719 index f10c0af..3ec1f95 100644
26720 --- a/arch/x86/power/cpu.c
26721 +++ b/arch/x86/power/cpu.c
26722 @@ -131,7 +131,7 @@ static void do_fpu_end(void)
26723 static void fix_processor_context(void)
26724 {
26725 int cpu = smp_processor_id();
26726 - struct tss_struct *t = &per_cpu(init_tss, cpu);
26727 + struct tss_struct *t = init_tss + cpu;
26728
26729 set_tss_desc(cpu, t); /*
26730 * This just modifies memory; should not be
26731 @@ -141,7 +141,9 @@ static void fix_processor_context(void)
26732 */
26733
26734 #ifdef CONFIG_X86_64
26735 + pax_open_kernel();
26736 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
26737 + pax_close_kernel();
26738
26739 syscall_init(); /* This sets MSR_*STAR and related */
26740 #endif
26741 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
26742 index 5d17950..2253fc9 100644
26743 --- a/arch/x86/vdso/Makefile
26744 +++ b/arch/x86/vdso/Makefile
26745 @@ -137,7 +137,7 @@ quiet_cmd_vdso = VDSO $@
26746 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
26747 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
26748
26749 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26750 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26751 GCOV_PROFILE := n
26752
26753 #
26754 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
26755 index 468d591..8e80a0a 100644
26756 --- a/arch/x86/vdso/vdso32-setup.c
26757 +++ b/arch/x86/vdso/vdso32-setup.c
26758 @@ -25,6 +25,7 @@
26759 #include <asm/tlbflush.h>
26760 #include <asm/vdso.h>
26761 #include <asm/proto.h>
26762 +#include <asm/mman.h>
26763
26764 enum {
26765 VDSO_DISABLED = 0,
26766 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
26767 void enable_sep_cpu(void)
26768 {
26769 int cpu = get_cpu();
26770 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
26771 + struct tss_struct *tss = init_tss + cpu;
26772
26773 if (!boot_cpu_has(X86_FEATURE_SEP)) {
26774 put_cpu();
26775 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
26776 gate_vma.vm_start = FIXADDR_USER_START;
26777 gate_vma.vm_end = FIXADDR_USER_END;
26778 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
26779 - gate_vma.vm_page_prot = __P101;
26780 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
26781 /*
26782 * Make sure the vDSO gets into every core dump.
26783 * Dumping its contents makes post-mortem fully interpretable later
26784 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26785 if (compat)
26786 addr = VDSO_HIGH_BASE;
26787 else {
26788 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
26789 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
26790 if (IS_ERR_VALUE(addr)) {
26791 ret = addr;
26792 goto up_fail;
26793 }
26794 }
26795
26796 - current->mm->context.vdso = (void *)addr;
26797 + current->mm->context.vdso = addr;
26798
26799 if (compat_uses_vma || !compat) {
26800 /*
26801 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26802 }
26803
26804 current_thread_info()->sysenter_return =
26805 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26806 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26807
26808 up_fail:
26809 if (ret)
26810 - current->mm->context.vdso = NULL;
26811 + current->mm->context.vdso = 0;
26812
26813 up_write(&mm->mmap_sem);
26814
26815 @@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
26816
26817 const char *arch_vma_name(struct vm_area_struct *vma)
26818 {
26819 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
26820 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
26821 return "[vdso]";
26822 +
26823 +#ifdef CONFIG_PAX_SEGMEXEC
26824 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
26825 + return "[vdso]";
26826 +#endif
26827 +
26828 return NULL;
26829 }
26830
26831 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
26832 * Check to see if the corresponding task was created in compat vdso
26833 * mode.
26834 */
26835 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
26836 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
26837 return &gate_vma;
26838 return NULL;
26839 }
26840 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
26841 index 153407c..611cba9 100644
26842 --- a/arch/x86/vdso/vma.c
26843 +++ b/arch/x86/vdso/vma.c
26844 @@ -16,8 +16,6 @@
26845 #include <asm/vdso.h>
26846 #include <asm/page.h>
26847
26848 -unsigned int __read_mostly vdso_enabled = 1;
26849 -
26850 extern char vdso_start[], vdso_end[];
26851 extern unsigned short vdso_sync_cpuid;
26852
26853 @@ -96,7 +94,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
26854 * unaligned here as a result of stack start randomization.
26855 */
26856 addr = PAGE_ALIGN(addr);
26857 - addr = align_addr(addr, NULL, ALIGN_VDSO);
26858
26859 return addr;
26860 }
26861 @@ -106,40 +103,35 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
26862 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26863 {
26864 struct mm_struct *mm = current->mm;
26865 - unsigned long addr;
26866 + unsigned long addr = 0;
26867 int ret;
26868
26869 - if (!vdso_enabled)
26870 - return 0;
26871 -
26872 down_write(&mm->mmap_sem);
26873 +
26874 +#ifdef CONFIG_PAX_RANDMMAP
26875 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26876 +#endif
26877 +
26878 addr = vdso_addr(mm->start_stack, vdso_size);
26879 + addr = align_addr(addr, NULL, ALIGN_VDSO);
26880 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
26881 if (IS_ERR_VALUE(addr)) {
26882 ret = addr;
26883 goto up_fail;
26884 }
26885
26886 - current->mm->context.vdso = (void *)addr;
26887 + mm->context.vdso = addr;
26888
26889 ret = install_special_mapping(mm, addr, vdso_size,
26890 VM_READ|VM_EXEC|
26891 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
26892 VM_ALWAYSDUMP,
26893 vdso_pages);
26894 - if (ret) {
26895 - current->mm->context.vdso = NULL;
26896 - goto up_fail;
26897 - }
26898 +
26899 + if (ret)
26900 + mm->context.vdso = 0;
26901
26902 up_fail:
26903 up_write(&mm->mmap_sem);
26904 return ret;
26905 }
26906 -
26907 -static __init int vdso_setup(char *s)
26908 -{
26909 - vdso_enabled = simple_strtoul(s, NULL, 0);
26910 - return 0;
26911 -}
26912 -__setup("vdso=", vdso_setup);
26913 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
26914 index 4e517d4..68a48f5 100644
26915 --- a/arch/x86/xen/enlighten.c
26916 +++ b/arch/x86/xen/enlighten.c
26917 @@ -86,8 +86,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
26918
26919 struct shared_info xen_dummy_shared_info;
26920
26921 -void *xen_initial_gdt;
26922 -
26923 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
26924 __read_mostly int xen_have_vector_callback;
26925 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
26926 @@ -1030,30 +1028,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
26927 #endif
26928 };
26929
26930 -static void xen_reboot(int reason)
26931 +static __noreturn void xen_reboot(int reason)
26932 {
26933 struct sched_shutdown r = { .reason = reason };
26934
26935 - if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
26936 - BUG();
26937 + HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
26938 + BUG();
26939 }
26940
26941 -static void xen_restart(char *msg)
26942 +static __noreturn void xen_restart(char *msg)
26943 {
26944 xen_reboot(SHUTDOWN_reboot);
26945 }
26946
26947 -static void xen_emergency_restart(void)
26948 +static __noreturn void xen_emergency_restart(void)
26949 {
26950 xen_reboot(SHUTDOWN_reboot);
26951 }
26952
26953 -static void xen_machine_halt(void)
26954 +static __noreturn void xen_machine_halt(void)
26955 {
26956 xen_reboot(SHUTDOWN_poweroff);
26957 }
26958
26959 -static void xen_machine_power_off(void)
26960 +static __noreturn void xen_machine_power_off(void)
26961 {
26962 if (pm_power_off)
26963 pm_power_off();
26964 @@ -1156,7 +1154,17 @@ asmlinkage void __init xen_start_kernel(void)
26965 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
26966
26967 /* Work out if we support NX */
26968 - x86_configure_nx();
26969 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
26970 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
26971 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
26972 + unsigned l, h;
26973 +
26974 + __supported_pte_mask |= _PAGE_NX;
26975 + rdmsr(MSR_EFER, l, h);
26976 + l |= EFER_NX;
26977 + wrmsr(MSR_EFER, l, h);
26978 + }
26979 +#endif
26980
26981 xen_setup_features();
26982
26983 @@ -1187,13 +1195,6 @@ asmlinkage void __init xen_start_kernel(void)
26984
26985 machine_ops = xen_machine_ops;
26986
26987 - /*
26988 - * The only reliable way to retain the initial address of the
26989 - * percpu gdt_page is to remember it here, so we can go and
26990 - * mark it RW later, when the initial percpu area is freed.
26991 - */
26992 - xen_initial_gdt = &per_cpu(gdt_page, 0);
26993 -
26994 xen_smp_init();
26995
26996 #ifdef CONFIG_ACPI_NUMA
26997 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
26998 index dc19347..1b07a2c 100644
26999 --- a/arch/x86/xen/mmu.c
27000 +++ b/arch/x86/xen/mmu.c
27001 @@ -1738,6 +1738,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
27002 convert_pfn_mfn(init_level4_pgt);
27003 convert_pfn_mfn(level3_ident_pgt);
27004 convert_pfn_mfn(level3_kernel_pgt);
27005 + convert_pfn_mfn(level3_vmalloc_start_pgt);
27006 + convert_pfn_mfn(level3_vmalloc_end_pgt);
27007 + convert_pfn_mfn(level3_vmemmap_pgt);
27008
27009 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
27010 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
27011 @@ -1756,7 +1759,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
27012 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
27013 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
27014 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
27015 + set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
27016 + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
27017 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
27018 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
27019 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
27020 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
27021 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
27022
27023 @@ -1963,6 +1970,7 @@ static void __init xen_post_allocator_init(void)
27024 pv_mmu_ops.set_pud = xen_set_pud;
27025 #if PAGETABLE_LEVELS == 4
27026 pv_mmu_ops.set_pgd = xen_set_pgd;
27027 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
27028 #endif
27029
27030 /* This will work as long as patching hasn't happened yet
27031 @@ -2044,6 +2052,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
27032 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
27033 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
27034 .set_pgd = xen_set_pgd_hyper,
27035 + .set_pgd_batched = xen_set_pgd_hyper,
27036
27037 .alloc_pud = xen_alloc_pmd_init,
27038 .release_pud = xen_release_pmd_init,
27039 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
27040 index f2ce60a..14e08dc 100644
27041 --- a/arch/x86/xen/smp.c
27042 +++ b/arch/x86/xen/smp.c
27043 @@ -209,11 +209,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
27044 {
27045 BUG_ON(smp_processor_id() != 0);
27046 native_smp_prepare_boot_cpu();
27047 -
27048 - /* We've switched to the "real" per-cpu gdt, so make sure the
27049 - old memory can be recycled */
27050 - make_lowmem_page_readwrite(xen_initial_gdt);
27051 -
27052 xen_filter_cpu_maps();
27053 xen_setup_vcpu_info_placement();
27054 }
27055 @@ -290,12 +285,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
27056 gdt = get_cpu_gdt_table(cpu);
27057
27058 ctxt->flags = VGCF_IN_KERNEL;
27059 - ctxt->user_regs.ds = __USER_DS;
27060 - ctxt->user_regs.es = __USER_DS;
27061 + ctxt->user_regs.ds = __KERNEL_DS;
27062 + ctxt->user_regs.es = __KERNEL_DS;
27063 ctxt->user_regs.ss = __KERNEL_DS;
27064 #ifdef CONFIG_X86_32
27065 ctxt->user_regs.fs = __KERNEL_PERCPU;
27066 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
27067 + savesegment(gs, ctxt->user_regs.gs);
27068 #else
27069 ctxt->gs_base_kernel = per_cpu_offset(cpu);
27070 #endif
27071 @@ -346,13 +341,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
27072 int rc;
27073
27074 per_cpu(current_task, cpu) = idle;
27075 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
27076 #ifdef CONFIG_X86_32
27077 irq_ctx_init(cpu);
27078 #else
27079 clear_tsk_thread_flag(idle, TIF_FORK);
27080 - per_cpu(kernel_stack, cpu) =
27081 - (unsigned long)task_stack_page(idle) -
27082 - KERNEL_STACK_OFFSET + THREAD_SIZE;
27083 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27084 #endif
27085 xen_setup_runstate_info(cpu);
27086 xen_setup_timer(cpu);
27087 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
27088 index b040b0e..8cc4fe0 100644
27089 --- a/arch/x86/xen/xen-asm_32.S
27090 +++ b/arch/x86/xen/xen-asm_32.S
27091 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
27092 ESP_OFFSET=4 # bytes pushed onto stack
27093
27094 /*
27095 - * Store vcpu_info pointer for easy access. Do it this way to
27096 - * avoid having to reload %fs
27097 + * Store vcpu_info pointer for easy access.
27098 */
27099 #ifdef CONFIG_SMP
27100 - GET_THREAD_INFO(%eax)
27101 - movl TI_cpu(%eax), %eax
27102 - movl __per_cpu_offset(,%eax,4), %eax
27103 - mov xen_vcpu(%eax), %eax
27104 + push %fs
27105 + mov $(__KERNEL_PERCPU), %eax
27106 + mov %eax, %fs
27107 + mov PER_CPU_VAR(xen_vcpu), %eax
27108 + pop %fs
27109 #else
27110 movl xen_vcpu, %eax
27111 #endif
27112 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
27113 index aaa7291..3f77960 100644
27114 --- a/arch/x86/xen/xen-head.S
27115 +++ b/arch/x86/xen/xen-head.S
27116 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
27117 #ifdef CONFIG_X86_32
27118 mov %esi,xen_start_info
27119 mov $init_thread_union+THREAD_SIZE,%esp
27120 +#ifdef CONFIG_SMP
27121 + movl $cpu_gdt_table,%edi
27122 + movl $__per_cpu_load,%eax
27123 + movw %ax,__KERNEL_PERCPU + 2(%edi)
27124 + rorl $16,%eax
27125 + movb %al,__KERNEL_PERCPU + 4(%edi)
27126 + movb %ah,__KERNEL_PERCPU + 7(%edi)
27127 + movl $__per_cpu_end - 1,%eax
27128 + subl $__per_cpu_start,%eax
27129 + movw %ax,__KERNEL_PERCPU + 0(%edi)
27130 +#endif
27131 #else
27132 mov %rsi,xen_start_info
27133 mov $init_thread_union+THREAD_SIZE,%rsp
27134 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
27135 index b095739..8c17bcd 100644
27136 --- a/arch/x86/xen/xen-ops.h
27137 +++ b/arch/x86/xen/xen-ops.h
27138 @@ -10,8 +10,6 @@
27139 extern const char xen_hypervisor_callback[];
27140 extern const char xen_failsafe_callback[];
27141
27142 -extern void *xen_initial_gdt;
27143 -
27144 struct trap_info;
27145 void xen_copy_trap_info(struct trap_info *traps);
27146
27147 diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
27148 index 525bd3d..ef888b1 100644
27149 --- a/arch/xtensa/variants/dc232b/include/variant/core.h
27150 +++ b/arch/xtensa/variants/dc232b/include/variant/core.h
27151 @@ -119,9 +119,9 @@
27152 ----------------------------------------------------------------------*/
27153
27154 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
27155 -#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
27156 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
27157 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
27158 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27159
27160 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
27161 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
27162 diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
27163 index 2f33760..835e50a 100644
27164 --- a/arch/xtensa/variants/fsf/include/variant/core.h
27165 +++ b/arch/xtensa/variants/fsf/include/variant/core.h
27166 @@ -11,6 +11,7 @@
27167 #ifndef _XTENSA_CORE_H
27168 #define _XTENSA_CORE_H
27169
27170 +#include <linux/const.h>
27171
27172 /****************************************************************************
27173 Parameters Useful for Any Code, USER or PRIVILEGED
27174 @@ -112,9 +113,9 @@
27175 ----------------------------------------------------------------------*/
27176
27177 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
27178 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
27179 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
27180 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
27181 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27182
27183 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
27184 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
27185 diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
27186 index af00795..2bb8105 100644
27187 --- a/arch/xtensa/variants/s6000/include/variant/core.h
27188 +++ b/arch/xtensa/variants/s6000/include/variant/core.h
27189 @@ -11,6 +11,7 @@
27190 #ifndef _XTENSA_CORE_CONFIGURATION_H
27191 #define _XTENSA_CORE_CONFIGURATION_H
27192
27193 +#include <linux/const.h>
27194
27195 /****************************************************************************
27196 Parameters Useful for Any Code, USER or PRIVILEGED
27197 @@ -118,9 +119,9 @@
27198 ----------------------------------------------------------------------*/
27199
27200 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
27201 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
27202 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
27203 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
27204 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27205
27206 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
27207 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
27208 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
27209 index 58916af..9cb880b 100644
27210 --- a/block/blk-iopoll.c
27211 +++ b/block/blk-iopoll.c
27212 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
27213 }
27214 EXPORT_SYMBOL(blk_iopoll_complete);
27215
27216 -static void blk_iopoll_softirq(struct softirq_action *h)
27217 +static void blk_iopoll_softirq(void)
27218 {
27219 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
27220 int rearm = 0, budget = blk_iopoll_budget;
27221 diff --git a/block/blk-map.c b/block/blk-map.c
27222 index 623e1cd..ca1e109 100644
27223 --- a/block/blk-map.c
27224 +++ b/block/blk-map.c
27225 @@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
27226 if (!len || !kbuf)
27227 return -EINVAL;
27228
27229 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
27230 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
27231 if (do_copy)
27232 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
27233 else
27234 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
27235 index 1366a89..e17f54b 100644
27236 --- a/block/blk-softirq.c
27237 +++ b/block/blk-softirq.c
27238 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
27239 * Softirq action handler - move entries to local list and loop over them
27240 * while passing them to the queue registered handler.
27241 */
27242 -static void blk_done_softirq(struct softirq_action *h)
27243 +static void blk_done_softirq(void)
27244 {
27245 struct list_head *cpu_list, local_list;
27246
27247 diff --git a/block/bsg.c b/block/bsg.c
27248 index ff64ae3..593560c 100644
27249 --- a/block/bsg.c
27250 +++ b/block/bsg.c
27251 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
27252 struct sg_io_v4 *hdr, struct bsg_device *bd,
27253 fmode_t has_write_perm)
27254 {
27255 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27256 + unsigned char *cmdptr;
27257 +
27258 if (hdr->request_len > BLK_MAX_CDB) {
27259 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
27260 if (!rq->cmd)
27261 return -ENOMEM;
27262 - }
27263 + cmdptr = rq->cmd;
27264 + } else
27265 + cmdptr = tmpcmd;
27266
27267 - if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
27268 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
27269 hdr->request_len))
27270 return -EFAULT;
27271
27272 + if (cmdptr != rq->cmd)
27273 + memcpy(rq->cmd, cmdptr, hdr->request_len);
27274 +
27275 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
27276 if (blk_verify_command(rq->cmd, has_write_perm))
27277 return -EPERM;
27278 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
27279 index 7c668c8..db3521c 100644
27280 --- a/block/compat_ioctl.c
27281 +++ b/block/compat_ioctl.c
27282 @@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
27283 err |= __get_user(f->spec1, &uf->spec1);
27284 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
27285 err |= __get_user(name, &uf->name);
27286 - f->name = compat_ptr(name);
27287 + f->name = (void __force_kernel *)compat_ptr(name);
27288 if (err) {
27289 err = -EFAULT;
27290 goto out;
27291 diff --git a/block/partitions/efi.c b/block/partitions/efi.c
27292 index 6296b40..417c00f 100644
27293 --- a/block/partitions/efi.c
27294 +++ b/block/partitions/efi.c
27295 @@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
27296 if (!gpt)
27297 return NULL;
27298
27299 + if (!le32_to_cpu(gpt->num_partition_entries))
27300 + return NULL;
27301 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
27302 + if (!pte)
27303 + return NULL;
27304 +
27305 count = le32_to_cpu(gpt->num_partition_entries) *
27306 le32_to_cpu(gpt->sizeof_partition_entry);
27307 - if (!count)
27308 - return NULL;
27309 - pte = kzalloc(count, GFP_KERNEL);
27310 - if (!pte)
27311 - return NULL;
27312 -
27313 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
27314 (u8 *) pte,
27315 count) < count) {
27316 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
27317 index 260fa80..e8f3caf 100644
27318 --- a/block/scsi_ioctl.c
27319 +++ b/block/scsi_ioctl.c
27320 @@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
27321 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
27322 struct sg_io_hdr *hdr, fmode_t mode)
27323 {
27324 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
27325 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27326 + unsigned char *cmdptr;
27327 +
27328 + if (rq->cmd != rq->__cmd)
27329 + cmdptr = rq->cmd;
27330 + else
27331 + cmdptr = tmpcmd;
27332 +
27333 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
27334 return -EFAULT;
27335 +
27336 + if (cmdptr != rq->cmd)
27337 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
27338 +
27339 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
27340 return -EPERM;
27341
27342 @@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27343 int err;
27344 unsigned int in_len, out_len, bytes, opcode, cmdlen;
27345 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
27346 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27347 + unsigned char *cmdptr;
27348
27349 if (!sic)
27350 return -EINVAL;
27351 @@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27352 */
27353 err = -EFAULT;
27354 rq->cmd_len = cmdlen;
27355 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
27356 +
27357 + if (rq->cmd != rq->__cmd)
27358 + cmdptr = rq->cmd;
27359 + else
27360 + cmdptr = tmpcmd;
27361 +
27362 + if (copy_from_user(cmdptr, sic->data, cmdlen))
27363 goto error;
27364
27365 + if (rq->cmd != cmdptr)
27366 + memcpy(rq->cmd, cmdptr, cmdlen);
27367 +
27368 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
27369 goto error;
27370
27371 diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
27372 index a0f768c..1da9c73 100644
27373 --- a/crypto/ablkcipher.c
27374 +++ b/crypto/ablkcipher.c
27375 @@ -307,6 +307,8 @@ int ablkcipher_walk_phys(struct ablkcipher_request *req,
27376 EXPORT_SYMBOL_GPL(ablkcipher_walk_phys);
27377
27378 static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
27379 + unsigned int keylen) __size_overflow(3);
27380 +static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
27381 unsigned int keylen)
27382 {
27383 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
27384 @@ -329,6 +331,8 @@ static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key,
27385 }
27386
27387 static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
27388 + unsigned int keylen) __size_overflow(3);
27389 +static int setkey(struct crypto_ablkcipher *tfm, const u8 *key,
27390 unsigned int keylen)
27391 {
27392 struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm);
27393 diff --git a/crypto/aead.c b/crypto/aead.c
27394 index 04add3dc..983032f 100644
27395 --- a/crypto/aead.c
27396 +++ b/crypto/aead.c
27397 @@ -27,6 +27,8 @@
27398 #include "internal.h"
27399
27400 static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
27401 + unsigned int keylen) __size_overflow(3);
27402 +static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
27403 unsigned int keylen)
27404 {
27405 struct aead_alg *aead = crypto_aead_alg(tfm);
27406 @@ -48,6 +50,7 @@ static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
27407 return ret;
27408 }
27409
27410 +static int setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) __size_overflow(3);
27411 static int setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
27412 {
27413 struct aead_alg *aead = crypto_aead_alg(tfm);
27414 diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
27415 index 1e61d1a..cf06b86 100644
27416 --- a/crypto/blkcipher.c
27417 +++ b/crypto/blkcipher.c
27418 @@ -359,6 +359,8 @@ int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
27419 EXPORT_SYMBOL_GPL(blkcipher_walk_virt_block);
27420
27421 static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
27422 + unsigned int keylen) __size_overflow(3);
27423 +static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
27424 unsigned int keylen)
27425 {
27426 struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
27427 @@ -380,6 +382,7 @@ static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
27428 return ret;
27429 }
27430
27431 +static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) __size_overflow(3);
27432 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
27433 {
27434 struct blkcipher_alg *cipher = &tfm->__crt_alg->cra_blkcipher;
27435 diff --git a/crypto/cipher.c b/crypto/cipher.c
27436 index 39541e0..802d956 100644
27437 --- a/crypto/cipher.c
27438 +++ b/crypto/cipher.c
27439 @@ -21,6 +21,8 @@
27440 #include "internal.h"
27441
27442 static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
27443 + unsigned int keylen) __size_overflow(3);
27444 +static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
27445 unsigned int keylen)
27446 {
27447 struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher;
27448 @@ -43,6 +45,7 @@ static int setkey_unaligned(struct crypto_tfm *tfm, const u8 *key,
27449
27450 }
27451
27452 +static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) __size_overflow(3);
27453 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
27454 {
27455 struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher;
27456 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
27457 index 671d4d6..5f24030 100644
27458 --- a/crypto/cryptd.c
27459 +++ b/crypto/cryptd.c
27460 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
27461
27462 struct cryptd_blkcipher_request_ctx {
27463 crypto_completion_t complete;
27464 -};
27465 +} __no_const;
27466
27467 struct cryptd_hash_ctx {
27468 struct crypto_shash *child;
27469 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
27470
27471 struct cryptd_aead_request_ctx {
27472 crypto_completion_t complete;
27473 -};
27474 +} __no_const;
27475
27476 static void cryptd_queue_worker(struct work_struct *work);
27477
27478 diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
27479 index 5d41894..22021e4 100644
27480 --- a/drivers/acpi/apei/cper.c
27481 +++ b/drivers/acpi/apei/cper.c
27482 @@ -38,12 +38,12 @@
27483 */
27484 u64 cper_next_record_id(void)
27485 {
27486 - static atomic64_t seq;
27487 + static atomic64_unchecked_t seq;
27488
27489 - if (!atomic64_read(&seq))
27490 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
27491 + if (!atomic64_read_unchecked(&seq))
27492 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
27493
27494 - return atomic64_inc_return(&seq);
27495 + return atomic64_inc_return_unchecked(&seq);
27496 }
27497 EXPORT_SYMBOL_GPL(cper_next_record_id);
27498
27499 diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
27500 index 86933ca..5cb1a69 100644
27501 --- a/drivers/acpi/battery.c
27502 +++ b/drivers/acpi/battery.c
27503 @@ -787,6 +787,9 @@ static int acpi_battery_print_alarm(struct seq_file *seq, int result)
27504
27505 static ssize_t acpi_battery_write_alarm(struct file *file,
27506 const char __user * buffer,
27507 + size_t count, loff_t * ppos) __size_overflow(3);
27508 +static ssize_t acpi_battery_write_alarm(struct file *file,
27509 + const char __user * buffer,
27510 size_t count, loff_t * ppos)
27511 {
27512 int result = 0;
27513 diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
27514 index b258cab..3fb7da7 100644
27515 --- a/drivers/acpi/ec_sys.c
27516 +++ b/drivers/acpi/ec_sys.c
27517 @@ -12,6 +12,7 @@
27518 #include <linux/acpi.h>
27519 #include <linux/debugfs.h>
27520 #include <linux/module.h>
27521 +#include <linux/uaccess.h>
27522 #include "internal.h"
27523
27524 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
27525 @@ -40,7 +41,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
27526 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
27527 */
27528 unsigned int size = EC_SPACE_SIZE;
27529 - u8 *data = (u8 *) buf;
27530 + u8 data;
27531 loff_t init_off = *off;
27532 int err = 0;
27533
27534 @@ -53,9 +54,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
27535 size = count;
27536
27537 while (size) {
27538 - err = ec_read(*off, &data[*off - init_off]);
27539 + err = ec_read(*off, &data);
27540 if (err)
27541 return err;
27542 + if (put_user(data, &buf[*off - init_off]))
27543 + return -EFAULT;
27544 *off += 1;
27545 size--;
27546 }
27547 @@ -71,7 +74,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
27548
27549 unsigned int size = count;
27550 loff_t init_off = *off;
27551 - u8 *data = (u8 *) buf;
27552 int err = 0;
27553
27554 if (*off >= EC_SPACE_SIZE)
27555 @@ -82,7 +84,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
27556 }
27557
27558 while (size) {
27559 - u8 byte_write = data[*off - init_off];
27560 + u8 byte_write;
27561 + if (get_user(byte_write, &buf[*off - init_off]))
27562 + return -EFAULT;
27563 err = ec_write(*off, byte_write);
27564 if (err)
27565 return err;
27566 diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
27567 index 251c7b62..000462d 100644
27568 --- a/drivers/acpi/proc.c
27569 +++ b/drivers/acpi/proc.c
27570 @@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file,
27571 size_t count, loff_t * ppos)
27572 {
27573 struct list_head *node, *next;
27574 - char strbuf[5];
27575 - char str[5] = "";
27576 - unsigned int len = count;
27577 + char strbuf[5] = {0};
27578
27579 - if (len > 4)
27580 - len = 4;
27581 - if (len < 0)
27582 + if (count > 4)
27583 + count = 4;
27584 + if (copy_from_user(strbuf, buffer, count))
27585 return -EFAULT;
27586 -
27587 - if (copy_from_user(strbuf, buffer, len))
27588 - return -EFAULT;
27589 - strbuf[len] = '\0';
27590 - sscanf(strbuf, "%s", str);
27591 + strbuf[count] = '\0';
27592
27593 mutex_lock(&acpi_device_lock);
27594 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
27595 @@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file,
27596 if (!dev->wakeup.flags.valid)
27597 continue;
27598
27599 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
27600 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
27601 if (device_can_wakeup(&dev->dev)) {
27602 bool enable = !device_may_wakeup(&dev->dev);
27603 device_set_wakeup_enable(&dev->dev, enable);
27604 diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
27605 index 8ae05ce..7dbbed9 100644
27606 --- a/drivers/acpi/processor_driver.c
27607 +++ b/drivers/acpi/processor_driver.c
27608 @@ -555,7 +555,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
27609 return 0;
27610 #endif
27611
27612 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
27613 + BUG_ON(pr->id >= nr_cpu_ids);
27614
27615 /*
27616 * Buggy BIOS check
27617 diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
27618 index 6e36d0c..f319944 100644
27619 --- a/drivers/acpi/sbs.c
27620 +++ b/drivers/acpi/sbs.c
27621 @@ -655,6 +655,9 @@ static int acpi_battery_read_alarm(struct seq_file *seq, void *offset)
27622
27623 static ssize_t
27624 acpi_battery_write_alarm(struct file *file, const char __user * buffer,
27625 + size_t count, loff_t * ppos) __size_overflow(3);
27626 +static ssize_t
27627 +acpi_battery_write_alarm(struct file *file, const char __user * buffer,
27628 size_t count, loff_t * ppos)
27629 {
27630 struct seq_file *seq = file->private_data;
27631 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
27632 index c06e0ec..a2c06ba 100644
27633 --- a/drivers/ata/libata-core.c
27634 +++ b/drivers/ata/libata-core.c
27635 @@ -4736,7 +4736,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
27636 struct ata_port *ap;
27637 unsigned int tag;
27638
27639 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27640 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27641 ap = qc->ap;
27642
27643 qc->flags = 0;
27644 @@ -4752,7 +4752,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
27645 struct ata_port *ap;
27646 struct ata_link *link;
27647
27648 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27649 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27650 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
27651 ap = qc->ap;
27652 link = qc->dev->link;
27653 @@ -5816,6 +5816,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27654 return;
27655
27656 spin_lock(&lock);
27657 + pax_open_kernel();
27658
27659 for (cur = ops->inherits; cur; cur = cur->inherits) {
27660 void **inherit = (void **)cur;
27661 @@ -5829,8 +5830,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27662 if (IS_ERR(*pp))
27663 *pp = NULL;
27664
27665 - ops->inherits = NULL;
27666 + *(struct ata_port_operations **)&ops->inherits = NULL;
27667
27668 + pax_close_kernel();
27669 spin_unlock(&lock);
27670 }
27671
27672 diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
27673 index 048589f..4002b98 100644
27674 --- a/drivers/ata/pata_arasan_cf.c
27675 +++ b/drivers/ata/pata_arasan_cf.c
27676 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
27677 /* Handle platform specific quirks */
27678 if (pdata->quirk) {
27679 if (pdata->quirk & CF_BROKEN_PIO) {
27680 - ap->ops->set_piomode = NULL;
27681 + pax_open_kernel();
27682 + *(void **)&ap->ops->set_piomode = NULL;
27683 + pax_close_kernel();
27684 ap->pio_mask = 0;
27685 }
27686 if (pdata->quirk & CF_BROKEN_MWDMA)
27687 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
27688 index f9b983a..887b9d8 100644
27689 --- a/drivers/atm/adummy.c
27690 +++ b/drivers/atm/adummy.c
27691 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
27692 vcc->pop(vcc, skb);
27693 else
27694 dev_kfree_skb_any(skb);
27695 - atomic_inc(&vcc->stats->tx);
27696 + atomic_inc_unchecked(&vcc->stats->tx);
27697
27698 return 0;
27699 }
27700 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
27701 index f8f41e0..1f987dd 100644
27702 --- a/drivers/atm/ambassador.c
27703 +++ b/drivers/atm/ambassador.c
27704 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
27705 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
27706
27707 // VC layer stats
27708 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27709 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27710
27711 // free the descriptor
27712 kfree (tx_descr);
27713 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
27714 dump_skb ("<<<", vc, skb);
27715
27716 // VC layer stats
27717 - atomic_inc(&atm_vcc->stats->rx);
27718 + atomic_inc_unchecked(&atm_vcc->stats->rx);
27719 __net_timestamp(skb);
27720 // end of our responsibility
27721 atm_vcc->push (atm_vcc, skb);
27722 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
27723 } else {
27724 PRINTK (KERN_INFO, "dropped over-size frame");
27725 // should we count this?
27726 - atomic_inc(&atm_vcc->stats->rx_drop);
27727 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27728 }
27729
27730 } else {
27731 @@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
27732 }
27733
27734 if (check_area (skb->data, skb->len)) {
27735 - atomic_inc(&atm_vcc->stats->tx_err);
27736 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
27737 return -ENOMEM; // ?
27738 }
27739
27740 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
27741 index b22d71c..d6e1049 100644
27742 --- a/drivers/atm/atmtcp.c
27743 +++ b/drivers/atm/atmtcp.c
27744 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27745 if (vcc->pop) vcc->pop(vcc,skb);
27746 else dev_kfree_skb(skb);
27747 if (dev_data) return 0;
27748 - atomic_inc(&vcc->stats->tx_err);
27749 + atomic_inc_unchecked(&vcc->stats->tx_err);
27750 return -ENOLINK;
27751 }
27752 size = skb->len+sizeof(struct atmtcp_hdr);
27753 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27754 if (!new_skb) {
27755 if (vcc->pop) vcc->pop(vcc,skb);
27756 else dev_kfree_skb(skb);
27757 - atomic_inc(&vcc->stats->tx_err);
27758 + atomic_inc_unchecked(&vcc->stats->tx_err);
27759 return -ENOBUFS;
27760 }
27761 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
27762 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27763 if (vcc->pop) vcc->pop(vcc,skb);
27764 else dev_kfree_skb(skb);
27765 out_vcc->push(out_vcc,new_skb);
27766 - atomic_inc(&vcc->stats->tx);
27767 - atomic_inc(&out_vcc->stats->rx);
27768 + atomic_inc_unchecked(&vcc->stats->tx);
27769 + atomic_inc_unchecked(&out_vcc->stats->rx);
27770 return 0;
27771 }
27772
27773 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
27774 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
27775 read_unlock(&vcc_sklist_lock);
27776 if (!out_vcc) {
27777 - atomic_inc(&vcc->stats->tx_err);
27778 + atomic_inc_unchecked(&vcc->stats->tx_err);
27779 goto done;
27780 }
27781 skb_pull(skb,sizeof(struct atmtcp_hdr));
27782 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
27783 __net_timestamp(new_skb);
27784 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
27785 out_vcc->push(out_vcc,new_skb);
27786 - atomic_inc(&vcc->stats->tx);
27787 - atomic_inc(&out_vcc->stats->rx);
27788 + atomic_inc_unchecked(&vcc->stats->tx);
27789 + atomic_inc_unchecked(&out_vcc->stats->rx);
27790 done:
27791 if (vcc->pop) vcc->pop(vcc,skb);
27792 else dev_kfree_skb(skb);
27793 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
27794 index 956e9ac..133516d 100644
27795 --- a/drivers/atm/eni.c
27796 +++ b/drivers/atm/eni.c
27797 @@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
27798 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
27799 vcc->dev->number);
27800 length = 0;
27801 - atomic_inc(&vcc->stats->rx_err);
27802 + atomic_inc_unchecked(&vcc->stats->rx_err);
27803 }
27804 else {
27805 length = ATM_CELL_SIZE-1; /* no HEC */
27806 @@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
27807 size);
27808 }
27809 eff = length = 0;
27810 - atomic_inc(&vcc->stats->rx_err);
27811 + atomic_inc_unchecked(&vcc->stats->rx_err);
27812 }
27813 else {
27814 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
27815 @@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
27816 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
27817 vcc->dev->number,vcc->vci,length,size << 2,descr);
27818 length = eff = 0;
27819 - atomic_inc(&vcc->stats->rx_err);
27820 + atomic_inc_unchecked(&vcc->stats->rx_err);
27821 }
27822 }
27823 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
27824 @@ -771,7 +771,7 @@ rx_dequeued++;
27825 vcc->push(vcc,skb);
27826 pushed++;
27827 }
27828 - atomic_inc(&vcc->stats->rx);
27829 + atomic_inc_unchecked(&vcc->stats->rx);
27830 }
27831 wake_up(&eni_dev->rx_wait);
27832 }
27833 @@ -1229,7 +1229,7 @@ static void dequeue_tx(struct atm_dev *dev)
27834 PCI_DMA_TODEVICE);
27835 if (vcc->pop) vcc->pop(vcc,skb);
27836 else dev_kfree_skb_irq(skb);
27837 - atomic_inc(&vcc->stats->tx);
27838 + atomic_inc_unchecked(&vcc->stats->tx);
27839 wake_up(&eni_dev->tx_wait);
27840 dma_complete++;
27841 }
27842 @@ -1569,7 +1569,7 @@ tx_complete++;
27843 /*--------------------------------- entries ---------------------------------*/
27844
27845
27846 -static const char *media_name[] __devinitdata = {
27847 +static const char *media_name[] __devinitconst = {
27848 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
27849 "UTP", "05?", "06?", "07?", /* 4- 7 */
27850 "TAXI","09?", "10?", "11?", /* 8-11 */
27851 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
27852 index 5072f8a..fa52520d 100644
27853 --- a/drivers/atm/firestream.c
27854 +++ b/drivers/atm/firestream.c
27855 @@ -750,7 +750,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
27856 }
27857 }
27858
27859 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27860 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27861
27862 fs_dprintk (FS_DEBUG_TXMEM, "i");
27863 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
27864 @@ -817,7 +817,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
27865 #endif
27866 skb_put (skb, qe->p1 & 0xffff);
27867 ATM_SKB(skb)->vcc = atm_vcc;
27868 - atomic_inc(&atm_vcc->stats->rx);
27869 + atomic_inc_unchecked(&atm_vcc->stats->rx);
27870 __net_timestamp(skb);
27871 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
27872 atm_vcc->push (atm_vcc, skb);
27873 @@ -838,12 +838,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
27874 kfree (pe);
27875 }
27876 if (atm_vcc)
27877 - atomic_inc(&atm_vcc->stats->rx_drop);
27878 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27879 break;
27880 case 0x1f: /* Reassembly abort: no buffers. */
27881 /* Silently increment error counter. */
27882 if (atm_vcc)
27883 - atomic_inc(&atm_vcc->stats->rx_drop);
27884 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27885 break;
27886 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
27887 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
27888 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
27889 index 361f5ae..7fc552d 100644
27890 --- a/drivers/atm/fore200e.c
27891 +++ b/drivers/atm/fore200e.c
27892 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
27893 #endif
27894 /* check error condition */
27895 if (*entry->status & STATUS_ERROR)
27896 - atomic_inc(&vcc->stats->tx_err);
27897 + atomic_inc_unchecked(&vcc->stats->tx_err);
27898 else
27899 - atomic_inc(&vcc->stats->tx);
27900 + atomic_inc_unchecked(&vcc->stats->tx);
27901 }
27902 }
27903
27904 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
27905 if (skb == NULL) {
27906 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
27907
27908 - atomic_inc(&vcc->stats->rx_drop);
27909 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27910 return -ENOMEM;
27911 }
27912
27913 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
27914
27915 dev_kfree_skb_any(skb);
27916
27917 - atomic_inc(&vcc->stats->rx_drop);
27918 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27919 return -ENOMEM;
27920 }
27921
27922 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
27923
27924 vcc->push(vcc, skb);
27925 - atomic_inc(&vcc->stats->rx);
27926 + atomic_inc_unchecked(&vcc->stats->rx);
27927
27928 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
27929
27930 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
27931 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
27932 fore200e->atm_dev->number,
27933 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
27934 - atomic_inc(&vcc->stats->rx_err);
27935 + atomic_inc_unchecked(&vcc->stats->rx_err);
27936 }
27937 }
27938
27939 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
27940 goto retry_here;
27941 }
27942
27943 - atomic_inc(&vcc->stats->tx_err);
27944 + atomic_inc_unchecked(&vcc->stats->tx_err);
27945
27946 fore200e->tx_sat++;
27947 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
27948 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
27949 index b182c2f..1c6fa8a 100644
27950 --- a/drivers/atm/he.c
27951 +++ b/drivers/atm/he.c
27952 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27953
27954 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
27955 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
27956 - atomic_inc(&vcc->stats->rx_drop);
27957 + atomic_inc_unchecked(&vcc->stats->rx_drop);
27958 goto return_host_buffers;
27959 }
27960
27961 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27962 RBRQ_LEN_ERR(he_dev->rbrq_head)
27963 ? "LEN_ERR" : "",
27964 vcc->vpi, vcc->vci);
27965 - atomic_inc(&vcc->stats->rx_err);
27966 + atomic_inc_unchecked(&vcc->stats->rx_err);
27967 goto return_host_buffers;
27968 }
27969
27970 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
27971 vcc->push(vcc, skb);
27972 spin_lock(&he_dev->global_lock);
27973
27974 - atomic_inc(&vcc->stats->rx);
27975 + atomic_inc_unchecked(&vcc->stats->rx);
27976
27977 return_host_buffers:
27978 ++pdus_assembled;
27979 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
27980 tpd->vcc->pop(tpd->vcc, tpd->skb);
27981 else
27982 dev_kfree_skb_any(tpd->skb);
27983 - atomic_inc(&tpd->vcc->stats->tx_err);
27984 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
27985 }
27986 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
27987 return;
27988 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27989 vcc->pop(vcc, skb);
27990 else
27991 dev_kfree_skb_any(skb);
27992 - atomic_inc(&vcc->stats->tx_err);
27993 + atomic_inc_unchecked(&vcc->stats->tx_err);
27994 return -EINVAL;
27995 }
27996
27997 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
27998 vcc->pop(vcc, skb);
27999 else
28000 dev_kfree_skb_any(skb);
28001 - atomic_inc(&vcc->stats->tx_err);
28002 + atomic_inc_unchecked(&vcc->stats->tx_err);
28003 return -EINVAL;
28004 }
28005 #endif
28006 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28007 vcc->pop(vcc, skb);
28008 else
28009 dev_kfree_skb_any(skb);
28010 - atomic_inc(&vcc->stats->tx_err);
28011 + atomic_inc_unchecked(&vcc->stats->tx_err);
28012 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28013 return -ENOMEM;
28014 }
28015 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28016 vcc->pop(vcc, skb);
28017 else
28018 dev_kfree_skb_any(skb);
28019 - atomic_inc(&vcc->stats->tx_err);
28020 + atomic_inc_unchecked(&vcc->stats->tx_err);
28021 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28022 return -ENOMEM;
28023 }
28024 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28025 __enqueue_tpd(he_dev, tpd, cid);
28026 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28027
28028 - atomic_inc(&vcc->stats->tx);
28029 + atomic_inc_unchecked(&vcc->stats->tx);
28030
28031 return 0;
28032 }
28033 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
28034 index b812103..e391a49 100644
28035 --- a/drivers/atm/horizon.c
28036 +++ b/drivers/atm/horizon.c
28037 @@ -1035,7 +1035,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
28038 {
28039 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
28040 // VC layer stats
28041 - atomic_inc(&vcc->stats->rx);
28042 + atomic_inc_unchecked(&vcc->stats->rx);
28043 __net_timestamp(skb);
28044 // end of our responsibility
28045 vcc->push (vcc, skb);
28046 @@ -1187,7 +1187,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
28047 dev->tx_iovec = NULL;
28048
28049 // VC layer stats
28050 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
28051 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
28052
28053 // free the skb
28054 hrz_kfree_skb (skb);
28055 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
28056 index 1c05212..c28e200 100644
28057 --- a/drivers/atm/idt77252.c
28058 +++ b/drivers/atm/idt77252.c
28059 @@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
28060 else
28061 dev_kfree_skb(skb);
28062
28063 - atomic_inc(&vcc->stats->tx);
28064 + atomic_inc_unchecked(&vcc->stats->tx);
28065 }
28066
28067 atomic_dec(&scq->used);
28068 @@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28069 if ((sb = dev_alloc_skb(64)) == NULL) {
28070 printk("%s: Can't allocate buffers for aal0.\n",
28071 card->name);
28072 - atomic_add(i, &vcc->stats->rx_drop);
28073 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
28074 break;
28075 }
28076 if (!atm_charge(vcc, sb->truesize)) {
28077 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
28078 card->name);
28079 - atomic_add(i - 1, &vcc->stats->rx_drop);
28080 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
28081 dev_kfree_skb(sb);
28082 break;
28083 }
28084 @@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28085 ATM_SKB(sb)->vcc = vcc;
28086 __net_timestamp(sb);
28087 vcc->push(vcc, sb);
28088 - atomic_inc(&vcc->stats->rx);
28089 + atomic_inc_unchecked(&vcc->stats->rx);
28090
28091 cell += ATM_CELL_PAYLOAD;
28092 }
28093 @@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28094 "(CDC: %08x)\n",
28095 card->name, len, rpp->len, readl(SAR_REG_CDC));
28096 recycle_rx_pool_skb(card, rpp);
28097 - atomic_inc(&vcc->stats->rx_err);
28098 + atomic_inc_unchecked(&vcc->stats->rx_err);
28099 return;
28100 }
28101 if (stat & SAR_RSQE_CRC) {
28102 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
28103 recycle_rx_pool_skb(card, rpp);
28104 - atomic_inc(&vcc->stats->rx_err);
28105 + atomic_inc_unchecked(&vcc->stats->rx_err);
28106 return;
28107 }
28108 if (skb_queue_len(&rpp->queue) > 1) {
28109 @@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28110 RXPRINTK("%s: Can't alloc RX skb.\n",
28111 card->name);
28112 recycle_rx_pool_skb(card, rpp);
28113 - atomic_inc(&vcc->stats->rx_err);
28114 + atomic_inc_unchecked(&vcc->stats->rx_err);
28115 return;
28116 }
28117 if (!atm_charge(vcc, skb->truesize)) {
28118 @@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28119 __net_timestamp(skb);
28120
28121 vcc->push(vcc, skb);
28122 - atomic_inc(&vcc->stats->rx);
28123 + atomic_inc_unchecked(&vcc->stats->rx);
28124
28125 return;
28126 }
28127 @@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28128 __net_timestamp(skb);
28129
28130 vcc->push(vcc, skb);
28131 - atomic_inc(&vcc->stats->rx);
28132 + atomic_inc_unchecked(&vcc->stats->rx);
28133
28134 if (skb->truesize > SAR_FB_SIZE_3)
28135 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
28136 @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
28137 if (vcc->qos.aal != ATM_AAL0) {
28138 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
28139 card->name, vpi, vci);
28140 - atomic_inc(&vcc->stats->rx_drop);
28141 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28142 goto drop;
28143 }
28144
28145 if ((sb = dev_alloc_skb(64)) == NULL) {
28146 printk("%s: Can't allocate buffers for AAL0.\n",
28147 card->name);
28148 - atomic_inc(&vcc->stats->rx_err);
28149 + atomic_inc_unchecked(&vcc->stats->rx_err);
28150 goto drop;
28151 }
28152
28153 @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
28154 ATM_SKB(sb)->vcc = vcc;
28155 __net_timestamp(sb);
28156 vcc->push(vcc, sb);
28157 - atomic_inc(&vcc->stats->rx);
28158 + atomic_inc_unchecked(&vcc->stats->rx);
28159
28160 drop:
28161 skb_pull(queue, 64);
28162 @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28163
28164 if (vc == NULL) {
28165 printk("%s: NULL connection in send().\n", card->name);
28166 - atomic_inc(&vcc->stats->tx_err);
28167 + atomic_inc_unchecked(&vcc->stats->tx_err);
28168 dev_kfree_skb(skb);
28169 return -EINVAL;
28170 }
28171 if (!test_bit(VCF_TX, &vc->flags)) {
28172 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
28173 - atomic_inc(&vcc->stats->tx_err);
28174 + atomic_inc_unchecked(&vcc->stats->tx_err);
28175 dev_kfree_skb(skb);
28176 return -EINVAL;
28177 }
28178 @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28179 break;
28180 default:
28181 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
28182 - atomic_inc(&vcc->stats->tx_err);
28183 + atomic_inc_unchecked(&vcc->stats->tx_err);
28184 dev_kfree_skb(skb);
28185 return -EINVAL;
28186 }
28187
28188 if (skb_shinfo(skb)->nr_frags != 0) {
28189 printk("%s: No scatter-gather yet.\n", card->name);
28190 - atomic_inc(&vcc->stats->tx_err);
28191 + atomic_inc_unchecked(&vcc->stats->tx_err);
28192 dev_kfree_skb(skb);
28193 return -EINVAL;
28194 }
28195 @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28196
28197 err = queue_skb(card, vc, skb, oam);
28198 if (err) {
28199 - atomic_inc(&vcc->stats->tx_err);
28200 + atomic_inc_unchecked(&vcc->stats->tx_err);
28201 dev_kfree_skb(skb);
28202 return err;
28203 }
28204 @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
28205 skb = dev_alloc_skb(64);
28206 if (!skb) {
28207 printk("%s: Out of memory in send_oam().\n", card->name);
28208 - atomic_inc(&vcc->stats->tx_err);
28209 + atomic_inc_unchecked(&vcc->stats->tx_err);
28210 return -ENOMEM;
28211 }
28212 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
28213 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
28214 index 9e373ba..cf93727 100644
28215 --- a/drivers/atm/iphase.c
28216 +++ b/drivers/atm/iphase.c
28217 @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
28218 status = (u_short) (buf_desc_ptr->desc_mode);
28219 if (status & (RX_CER | RX_PTE | RX_OFL))
28220 {
28221 - atomic_inc(&vcc->stats->rx_err);
28222 + atomic_inc_unchecked(&vcc->stats->rx_err);
28223 IF_ERR(printk("IA: bad packet, dropping it");)
28224 if (status & RX_CER) {
28225 IF_ERR(printk(" cause: packet CRC error\n");)
28226 @@ -1169,7 +1169,7 @@ static int rx_pkt(struct atm_dev *dev)
28227 len = dma_addr - buf_addr;
28228 if (len > iadev->rx_buf_sz) {
28229 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
28230 - atomic_inc(&vcc->stats->rx_err);
28231 + atomic_inc_unchecked(&vcc->stats->rx_err);
28232 goto out_free_desc;
28233 }
28234
28235 @@ -1319,7 +1319,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28236 ia_vcc = INPH_IA_VCC(vcc);
28237 if (ia_vcc == NULL)
28238 {
28239 - atomic_inc(&vcc->stats->rx_err);
28240 + atomic_inc_unchecked(&vcc->stats->rx_err);
28241 atm_return(vcc, skb->truesize);
28242 dev_kfree_skb_any(skb);
28243 goto INCR_DLE;
28244 @@ -1331,7 +1331,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28245 if ((length > iadev->rx_buf_sz) || (length >
28246 (skb->len - sizeof(struct cpcs_trailer))))
28247 {
28248 - atomic_inc(&vcc->stats->rx_err);
28249 + atomic_inc_unchecked(&vcc->stats->rx_err);
28250 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
28251 length, skb->len);)
28252 atm_return(vcc, skb->truesize);
28253 @@ -1347,7 +1347,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28254
28255 IF_RX(printk("rx_dle_intr: skb push");)
28256 vcc->push(vcc,skb);
28257 - atomic_inc(&vcc->stats->rx);
28258 + atomic_inc_unchecked(&vcc->stats->rx);
28259 iadev->rx_pkt_cnt++;
28260 }
28261 INCR_DLE:
28262 @@ -2827,15 +2827,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
28263 {
28264 struct k_sonet_stats *stats;
28265 stats = &PRIV(_ia_dev[board])->sonet_stats;
28266 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
28267 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
28268 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
28269 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
28270 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
28271 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
28272 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
28273 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
28274 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
28275 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
28276 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
28277 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
28278 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
28279 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
28280 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
28281 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
28282 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
28283 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
28284 }
28285 ia_cmds.status = 0;
28286 break;
28287 @@ -2940,7 +2940,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
28288 if ((desc == 0) || (desc > iadev->num_tx_desc))
28289 {
28290 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
28291 - atomic_inc(&vcc->stats->tx);
28292 + atomic_inc_unchecked(&vcc->stats->tx);
28293 if (vcc->pop)
28294 vcc->pop(vcc, skb);
28295 else
28296 @@ -3045,14 +3045,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
28297 ATM_DESC(skb) = vcc->vci;
28298 skb_queue_tail(&iadev->tx_dma_q, skb);
28299
28300 - atomic_inc(&vcc->stats->tx);
28301 + atomic_inc_unchecked(&vcc->stats->tx);
28302 iadev->tx_pkt_cnt++;
28303 /* Increment transaction counter */
28304 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
28305
28306 #if 0
28307 /* add flow control logic */
28308 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
28309 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
28310 if (iavcc->vc_desc_cnt > 10) {
28311 vcc->tx_quota = vcc->tx_quota * 3 / 4;
28312 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
28313 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
28314 index f556969..0da15eb 100644
28315 --- a/drivers/atm/lanai.c
28316 +++ b/drivers/atm/lanai.c
28317 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
28318 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
28319 lanai_endtx(lanai, lvcc);
28320 lanai_free_skb(lvcc->tx.atmvcc, skb);
28321 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
28322 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
28323 }
28324
28325 /* Try to fill the buffer - don't call unless there is backlog */
28326 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
28327 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
28328 __net_timestamp(skb);
28329 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
28330 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
28331 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
28332 out:
28333 lvcc->rx.buf.ptr = end;
28334 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
28335 @@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28336 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
28337 "vcc %d\n", lanai->number, (unsigned int) s, vci);
28338 lanai->stats.service_rxnotaal5++;
28339 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28340 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28341 return 0;
28342 }
28343 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
28344 @@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28345 int bytes;
28346 read_unlock(&vcc_sklist_lock);
28347 DPRINTK("got trashed rx pdu on vci %d\n", vci);
28348 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28349 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28350 lvcc->stats.x.aal5.service_trash++;
28351 bytes = (SERVICE_GET_END(s) * 16) -
28352 (((unsigned long) lvcc->rx.buf.ptr) -
28353 @@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28354 }
28355 if (s & SERVICE_STREAM) {
28356 read_unlock(&vcc_sklist_lock);
28357 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28358 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28359 lvcc->stats.x.aal5.service_stream++;
28360 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
28361 "PDU on VCI %d!\n", lanai->number, vci);
28362 @@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28363 return 0;
28364 }
28365 DPRINTK("got rx crc error on vci %d\n", vci);
28366 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28367 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28368 lvcc->stats.x.aal5.service_rxcrc++;
28369 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
28370 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
28371 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
28372 index 1c70c45..300718d 100644
28373 --- a/drivers/atm/nicstar.c
28374 +++ b/drivers/atm/nicstar.c
28375 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28376 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
28377 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
28378 card->index);
28379 - atomic_inc(&vcc->stats->tx_err);
28380 + atomic_inc_unchecked(&vcc->stats->tx_err);
28381 dev_kfree_skb_any(skb);
28382 return -EINVAL;
28383 }
28384 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28385 if (!vc->tx) {
28386 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
28387 card->index);
28388 - atomic_inc(&vcc->stats->tx_err);
28389 + atomic_inc_unchecked(&vcc->stats->tx_err);
28390 dev_kfree_skb_any(skb);
28391 return -EINVAL;
28392 }
28393 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28394 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
28395 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
28396 card->index);
28397 - atomic_inc(&vcc->stats->tx_err);
28398 + atomic_inc_unchecked(&vcc->stats->tx_err);
28399 dev_kfree_skb_any(skb);
28400 return -EINVAL;
28401 }
28402
28403 if (skb_shinfo(skb)->nr_frags != 0) {
28404 printk("nicstar%d: No scatter-gather yet.\n", card->index);
28405 - atomic_inc(&vcc->stats->tx_err);
28406 + atomic_inc_unchecked(&vcc->stats->tx_err);
28407 dev_kfree_skb_any(skb);
28408 return -EINVAL;
28409 }
28410 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28411 }
28412
28413 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
28414 - atomic_inc(&vcc->stats->tx_err);
28415 + atomic_inc_unchecked(&vcc->stats->tx_err);
28416 dev_kfree_skb_any(skb);
28417 return -EIO;
28418 }
28419 - atomic_inc(&vcc->stats->tx);
28420 + atomic_inc_unchecked(&vcc->stats->tx);
28421
28422 return 0;
28423 }
28424 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28425 printk
28426 ("nicstar%d: Can't allocate buffers for aal0.\n",
28427 card->index);
28428 - atomic_add(i, &vcc->stats->rx_drop);
28429 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
28430 break;
28431 }
28432 if (!atm_charge(vcc, sb->truesize)) {
28433 RXPRINTK
28434 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
28435 card->index);
28436 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
28437 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
28438 dev_kfree_skb_any(sb);
28439 break;
28440 }
28441 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28442 ATM_SKB(sb)->vcc = vcc;
28443 __net_timestamp(sb);
28444 vcc->push(vcc, sb);
28445 - atomic_inc(&vcc->stats->rx);
28446 + atomic_inc_unchecked(&vcc->stats->rx);
28447 cell += ATM_CELL_PAYLOAD;
28448 }
28449
28450 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28451 if (iovb == NULL) {
28452 printk("nicstar%d: Out of iovec buffers.\n",
28453 card->index);
28454 - atomic_inc(&vcc->stats->rx_drop);
28455 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28456 recycle_rx_buf(card, skb);
28457 return;
28458 }
28459 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28460 small or large buffer itself. */
28461 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
28462 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
28463 - atomic_inc(&vcc->stats->rx_err);
28464 + atomic_inc_unchecked(&vcc->stats->rx_err);
28465 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28466 NS_MAX_IOVECS);
28467 NS_PRV_IOVCNT(iovb) = 0;
28468 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28469 ("nicstar%d: Expected a small buffer, and this is not one.\n",
28470 card->index);
28471 which_list(card, skb);
28472 - atomic_inc(&vcc->stats->rx_err);
28473 + atomic_inc_unchecked(&vcc->stats->rx_err);
28474 recycle_rx_buf(card, skb);
28475 vc->rx_iov = NULL;
28476 recycle_iov_buf(card, iovb);
28477 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28478 ("nicstar%d: Expected a large buffer, and this is not one.\n",
28479 card->index);
28480 which_list(card, skb);
28481 - atomic_inc(&vcc->stats->rx_err);
28482 + atomic_inc_unchecked(&vcc->stats->rx_err);
28483 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28484 NS_PRV_IOVCNT(iovb));
28485 vc->rx_iov = NULL;
28486 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28487 printk(" - PDU size mismatch.\n");
28488 else
28489 printk(".\n");
28490 - atomic_inc(&vcc->stats->rx_err);
28491 + atomic_inc_unchecked(&vcc->stats->rx_err);
28492 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28493 NS_PRV_IOVCNT(iovb));
28494 vc->rx_iov = NULL;
28495 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28496 /* skb points to a small buffer */
28497 if (!atm_charge(vcc, skb->truesize)) {
28498 push_rxbufs(card, skb);
28499 - atomic_inc(&vcc->stats->rx_drop);
28500 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28501 } else {
28502 skb_put(skb, len);
28503 dequeue_sm_buf(card, skb);
28504 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28505 ATM_SKB(skb)->vcc = vcc;
28506 __net_timestamp(skb);
28507 vcc->push(vcc, skb);
28508 - atomic_inc(&vcc->stats->rx);
28509 + atomic_inc_unchecked(&vcc->stats->rx);
28510 }
28511 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
28512 struct sk_buff *sb;
28513 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28514 if (len <= NS_SMBUFSIZE) {
28515 if (!atm_charge(vcc, sb->truesize)) {
28516 push_rxbufs(card, sb);
28517 - atomic_inc(&vcc->stats->rx_drop);
28518 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28519 } else {
28520 skb_put(sb, len);
28521 dequeue_sm_buf(card, sb);
28522 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28523 ATM_SKB(sb)->vcc = vcc;
28524 __net_timestamp(sb);
28525 vcc->push(vcc, sb);
28526 - atomic_inc(&vcc->stats->rx);
28527 + atomic_inc_unchecked(&vcc->stats->rx);
28528 }
28529
28530 push_rxbufs(card, skb);
28531 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28532
28533 if (!atm_charge(vcc, skb->truesize)) {
28534 push_rxbufs(card, skb);
28535 - atomic_inc(&vcc->stats->rx_drop);
28536 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28537 } else {
28538 dequeue_lg_buf(card, skb);
28539 #ifdef NS_USE_DESTRUCTORS
28540 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28541 ATM_SKB(skb)->vcc = vcc;
28542 __net_timestamp(skb);
28543 vcc->push(vcc, skb);
28544 - atomic_inc(&vcc->stats->rx);
28545 + atomic_inc_unchecked(&vcc->stats->rx);
28546 }
28547
28548 push_rxbufs(card, sb);
28549 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28550 printk
28551 ("nicstar%d: Out of huge buffers.\n",
28552 card->index);
28553 - atomic_inc(&vcc->stats->rx_drop);
28554 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28555 recycle_iovec_rx_bufs(card,
28556 (struct iovec *)
28557 iovb->data,
28558 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28559 card->hbpool.count++;
28560 } else
28561 dev_kfree_skb_any(hb);
28562 - atomic_inc(&vcc->stats->rx_drop);
28563 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28564 } else {
28565 /* Copy the small buffer to the huge buffer */
28566 sb = (struct sk_buff *)iov->iov_base;
28567 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28568 #endif /* NS_USE_DESTRUCTORS */
28569 __net_timestamp(hb);
28570 vcc->push(vcc, hb);
28571 - atomic_inc(&vcc->stats->rx);
28572 + atomic_inc_unchecked(&vcc->stats->rx);
28573 }
28574 }
28575
28576 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
28577 index e8cd652..bbbd1fc 100644
28578 --- a/drivers/atm/solos-pci.c
28579 +++ b/drivers/atm/solos-pci.c
28580 @@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
28581 }
28582 atm_charge(vcc, skb->truesize);
28583 vcc->push(vcc, skb);
28584 - atomic_inc(&vcc->stats->rx);
28585 + atomic_inc_unchecked(&vcc->stats->rx);
28586 break;
28587
28588 case PKT_STATUS:
28589 @@ -1008,7 +1008,7 @@ static uint32_t fpga_tx(struct solos_card *card)
28590 vcc = SKB_CB(oldskb)->vcc;
28591
28592 if (vcc) {
28593 - atomic_inc(&vcc->stats->tx);
28594 + atomic_inc_unchecked(&vcc->stats->tx);
28595 solos_pop(vcc, oldskb);
28596 } else
28597 dev_kfree_skb_irq(oldskb);
28598 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
28599 index 90f1ccc..04c4a1e 100644
28600 --- a/drivers/atm/suni.c
28601 +++ b/drivers/atm/suni.c
28602 @@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
28603
28604
28605 #define ADD_LIMITED(s,v) \
28606 - atomic_add((v),&stats->s); \
28607 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
28608 + atomic_add_unchecked((v),&stats->s); \
28609 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
28610
28611
28612 static void suni_hz(unsigned long from_timer)
28613 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
28614 index 5120a96..e2572bd 100644
28615 --- a/drivers/atm/uPD98402.c
28616 +++ b/drivers/atm/uPD98402.c
28617 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
28618 struct sonet_stats tmp;
28619 int error = 0;
28620
28621 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
28622 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
28623 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
28624 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
28625 if (zero && !error) {
28626 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
28627
28628
28629 #define ADD_LIMITED(s,v) \
28630 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
28631 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
28632 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
28633 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
28634 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
28635 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
28636
28637
28638 static void stat_event(struct atm_dev *dev)
28639 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
28640 if (reason & uPD98402_INT_PFM) stat_event(dev);
28641 if (reason & uPD98402_INT_PCO) {
28642 (void) GET(PCOCR); /* clear interrupt cause */
28643 - atomic_add(GET(HECCT),
28644 + atomic_add_unchecked(GET(HECCT),
28645 &PRIV(dev)->sonet_stats.uncorr_hcs);
28646 }
28647 if ((reason & uPD98402_INT_RFO) &&
28648 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
28649 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
28650 uPD98402_INT_LOS),PIMR); /* enable them */
28651 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
28652 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
28653 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
28654 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
28655 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
28656 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
28657 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
28658 return 0;
28659 }
28660
28661 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
28662 index d889f56..17eb71e 100644
28663 --- a/drivers/atm/zatm.c
28664 +++ b/drivers/atm/zatm.c
28665 @@ -460,7 +460,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
28666 }
28667 if (!size) {
28668 dev_kfree_skb_irq(skb);
28669 - if (vcc) atomic_inc(&vcc->stats->rx_err);
28670 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
28671 continue;
28672 }
28673 if (!atm_charge(vcc,skb->truesize)) {
28674 @@ -470,7 +470,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
28675 skb->len = size;
28676 ATM_SKB(skb)->vcc = vcc;
28677 vcc->push(vcc,skb);
28678 - atomic_inc(&vcc->stats->rx);
28679 + atomic_inc_unchecked(&vcc->stats->rx);
28680 }
28681 zout(pos & 0xffff,MTA(mbx));
28682 #if 0 /* probably a stupid idea */
28683 @@ -734,7 +734,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
28684 skb_queue_head(&zatm_vcc->backlog,skb);
28685 break;
28686 }
28687 - atomic_inc(&vcc->stats->tx);
28688 + atomic_inc_unchecked(&vcc->stats->tx);
28689 wake_up(&zatm_vcc->tx_wait);
28690 }
28691
28692 diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
28693 index 8493536..31adee0 100644
28694 --- a/drivers/base/devtmpfs.c
28695 +++ b/drivers/base/devtmpfs.c
28696 @@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
28697 if (!thread)
28698 return 0;
28699
28700 - err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
28701 + err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
28702 if (err)
28703 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
28704 else
28705 diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
28706 index caf995f..6f76697 100644
28707 --- a/drivers/base/power/wakeup.c
28708 +++ b/drivers/base/power/wakeup.c
28709 @@ -30,14 +30,14 @@ bool events_check_enabled;
28710 * They need to be modified together atomically, so it's better to use one
28711 * atomic variable to hold them both.
28712 */
28713 -static atomic_t combined_event_count = ATOMIC_INIT(0);
28714 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
28715
28716 #define IN_PROGRESS_BITS (sizeof(int) * 4)
28717 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
28718
28719 static void split_counters(unsigned int *cnt, unsigned int *inpr)
28720 {
28721 - unsigned int comb = atomic_read(&combined_event_count);
28722 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
28723
28724 *cnt = (comb >> IN_PROGRESS_BITS);
28725 *inpr = comb & MAX_IN_PROGRESS;
28726 @@ -353,7 +353,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
28727 ws->last_time = ktime_get();
28728
28729 /* Increment the counter of events in progress. */
28730 - atomic_inc(&combined_event_count);
28731 + atomic_inc_unchecked(&combined_event_count);
28732 }
28733
28734 /**
28735 @@ -443,7 +443,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
28736 * Increment the counter of registered wakeup events and decrement the
28737 * couter of wakeup events in progress simultaneously.
28738 */
28739 - atomic_add(MAX_IN_PROGRESS, &combined_event_count);
28740 + atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
28741 }
28742
28743 /**
28744 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
28745 index b0f553b..77b928b 100644
28746 --- a/drivers/block/cciss.c
28747 +++ b/drivers/block/cciss.c
28748 @@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
28749 int err;
28750 u32 cp;
28751
28752 + memset(&arg64, 0, sizeof(arg64));
28753 +
28754 err = 0;
28755 err |=
28756 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
28757 @@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
28758 while (!list_empty(&h->reqQ)) {
28759 c = list_entry(h->reqQ.next, CommandList_struct, list);
28760 /* can't do anything if fifo is full */
28761 - if ((h->access.fifo_full(h))) {
28762 + if ((h->access->fifo_full(h))) {
28763 dev_warn(&h->pdev->dev, "fifo full\n");
28764 break;
28765 }
28766 @@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
28767 h->Qdepth--;
28768
28769 /* Tell the controller execute command */
28770 - h->access.submit_command(h, c);
28771 + h->access->submit_command(h, c);
28772
28773 /* Put job onto the completed Q */
28774 addQ(&h->cmpQ, c);
28775 @@ -3443,17 +3445,17 @@ startio:
28776
28777 static inline unsigned long get_next_completion(ctlr_info_t *h)
28778 {
28779 - return h->access.command_completed(h);
28780 + return h->access->command_completed(h);
28781 }
28782
28783 static inline int interrupt_pending(ctlr_info_t *h)
28784 {
28785 - return h->access.intr_pending(h);
28786 + return h->access->intr_pending(h);
28787 }
28788
28789 static inline long interrupt_not_for_us(ctlr_info_t *h)
28790 {
28791 - return ((h->access.intr_pending(h) == 0) ||
28792 + return ((h->access->intr_pending(h) == 0) ||
28793 (h->interrupts_enabled == 0));
28794 }
28795
28796 @@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
28797 u32 a;
28798
28799 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
28800 - return h->access.command_completed(h);
28801 + return h->access->command_completed(h);
28802
28803 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
28804 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
28805 @@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
28806 trans_support & CFGTBL_Trans_use_short_tags);
28807
28808 /* Change the access methods to the performant access methods */
28809 - h->access = SA5_performant_access;
28810 + h->access = &SA5_performant_access;
28811 h->transMethod = CFGTBL_Trans_Performant;
28812
28813 return;
28814 @@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
28815 if (prod_index < 0)
28816 return -ENODEV;
28817 h->product_name = products[prod_index].product_name;
28818 - h->access = *(products[prod_index].access);
28819 + h->access = products[prod_index].access;
28820
28821 if (cciss_board_disabled(h)) {
28822 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
28823 @@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
28824 }
28825
28826 /* make sure the board interrupts are off */
28827 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
28828 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
28829 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
28830 if (rc)
28831 goto clean2;
28832 @@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
28833 * fake ones to scoop up any residual completions.
28834 */
28835 spin_lock_irqsave(&h->lock, flags);
28836 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
28837 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
28838 spin_unlock_irqrestore(&h->lock, flags);
28839 free_irq(h->intr[h->intr_mode], h);
28840 rc = cciss_request_irq(h, cciss_msix_discard_completions,
28841 @@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
28842 dev_info(&h->pdev->dev, "Board READY.\n");
28843 dev_info(&h->pdev->dev,
28844 "Waiting for stale completions to drain.\n");
28845 - h->access.set_intr_mask(h, CCISS_INTR_ON);
28846 + h->access->set_intr_mask(h, CCISS_INTR_ON);
28847 msleep(10000);
28848 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
28849 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
28850
28851 rc = controller_reset_failed(h->cfgtable);
28852 if (rc)
28853 @@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
28854 cciss_scsi_setup(h);
28855
28856 /* Turn the interrupts on so we can service requests */
28857 - h->access.set_intr_mask(h, CCISS_INTR_ON);
28858 + h->access->set_intr_mask(h, CCISS_INTR_ON);
28859
28860 /* Get the firmware version */
28861 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
28862 @@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
28863 kfree(flush_buf);
28864 if (return_code != IO_OK)
28865 dev_warn(&h->pdev->dev, "Error flushing cache\n");
28866 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
28867 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
28868 free_irq(h->intr[h->intr_mode], h);
28869 }
28870
28871 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
28872 index 7fda30e..eb5dfe0 100644
28873 --- a/drivers/block/cciss.h
28874 +++ b/drivers/block/cciss.h
28875 @@ -101,7 +101,7 @@ struct ctlr_info
28876 /* information about each logical volume */
28877 drive_info_struct *drv[CISS_MAX_LUN];
28878
28879 - struct access_method access;
28880 + struct access_method *access;
28881
28882 /* queue and queue Info */
28883 struct list_head reqQ;
28884 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
28885 index 9125bbe..eede5c8 100644
28886 --- a/drivers/block/cpqarray.c
28887 +++ b/drivers/block/cpqarray.c
28888 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
28889 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
28890 goto Enomem4;
28891 }
28892 - hba[i]->access.set_intr_mask(hba[i], 0);
28893 + hba[i]->access->set_intr_mask(hba[i], 0);
28894 if (request_irq(hba[i]->intr, do_ida_intr,
28895 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
28896 {
28897 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
28898 add_timer(&hba[i]->timer);
28899
28900 /* Enable IRQ now that spinlock and rate limit timer are set up */
28901 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
28902 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
28903
28904 for(j=0; j<NWD; j++) {
28905 struct gendisk *disk = ida_gendisk[i][j];
28906 @@ -694,7 +694,7 @@ DBGINFO(
28907 for(i=0; i<NR_PRODUCTS; i++) {
28908 if (board_id == products[i].board_id) {
28909 c->product_name = products[i].product_name;
28910 - c->access = *(products[i].access);
28911 + c->access = products[i].access;
28912 break;
28913 }
28914 }
28915 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
28916 hba[ctlr]->intr = intr;
28917 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
28918 hba[ctlr]->product_name = products[j].product_name;
28919 - hba[ctlr]->access = *(products[j].access);
28920 + hba[ctlr]->access = products[j].access;
28921 hba[ctlr]->ctlr = ctlr;
28922 hba[ctlr]->board_id = board_id;
28923 hba[ctlr]->pci_dev = NULL; /* not PCI */
28924 @@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
28925
28926 while((c = h->reqQ) != NULL) {
28927 /* Can't do anything if we're busy */
28928 - if (h->access.fifo_full(h) == 0)
28929 + if (h->access->fifo_full(h) == 0)
28930 return;
28931
28932 /* Get the first entry from the request Q */
28933 @@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
28934 h->Qdepth--;
28935
28936 /* Tell the controller to do our bidding */
28937 - h->access.submit_command(h, c);
28938 + h->access->submit_command(h, c);
28939
28940 /* Get onto the completion Q */
28941 addQ(&h->cmpQ, c);
28942 @@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
28943 unsigned long flags;
28944 __u32 a,a1;
28945
28946 - istat = h->access.intr_pending(h);
28947 + istat = h->access->intr_pending(h);
28948 /* Is this interrupt for us? */
28949 if (istat == 0)
28950 return IRQ_NONE;
28951 @@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
28952 */
28953 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
28954 if (istat & FIFO_NOT_EMPTY) {
28955 - while((a = h->access.command_completed(h))) {
28956 + while((a = h->access->command_completed(h))) {
28957 a1 = a; a &= ~3;
28958 if ((c = h->cmpQ) == NULL)
28959 {
28960 @@ -1449,11 +1449,11 @@ static int sendcmd(
28961 /*
28962 * Disable interrupt
28963 */
28964 - info_p->access.set_intr_mask(info_p, 0);
28965 + info_p->access->set_intr_mask(info_p, 0);
28966 /* Make sure there is room in the command FIFO */
28967 /* Actually it should be completely empty at this time. */
28968 for (i = 200000; i > 0; i--) {
28969 - temp = info_p->access.fifo_full(info_p);
28970 + temp = info_p->access->fifo_full(info_p);
28971 if (temp != 0) {
28972 break;
28973 }
28974 @@ -1466,7 +1466,7 @@ DBG(
28975 /*
28976 * Send the cmd
28977 */
28978 - info_p->access.submit_command(info_p, c);
28979 + info_p->access->submit_command(info_p, c);
28980 complete = pollcomplete(ctlr);
28981
28982 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
28983 @@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
28984 * we check the new geometry. Then turn interrupts back on when
28985 * we're done.
28986 */
28987 - host->access.set_intr_mask(host, 0);
28988 + host->access->set_intr_mask(host, 0);
28989 getgeometry(ctlr);
28990 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
28991 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
28992
28993 for(i=0; i<NWD; i++) {
28994 struct gendisk *disk = ida_gendisk[ctlr][i];
28995 @@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
28996 /* Wait (up to 2 seconds) for a command to complete */
28997
28998 for (i = 200000; i > 0; i--) {
28999 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
29000 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
29001 if (done == 0) {
29002 udelay(10); /* a short fixed delay */
29003 } else
29004 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
29005 index be73e9d..7fbf140 100644
29006 --- a/drivers/block/cpqarray.h
29007 +++ b/drivers/block/cpqarray.h
29008 @@ -99,7 +99,7 @@ struct ctlr_info {
29009 drv_info_t drv[NWD];
29010 struct proc_dir_entry *proc;
29011
29012 - struct access_method access;
29013 + struct access_method *access;
29014
29015 cmdlist_t *reqQ;
29016 cmdlist_t *cmpQ;
29017 diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
29018 index 8d68056..e67050f 100644
29019 --- a/drivers/block/drbd/drbd_int.h
29020 +++ b/drivers/block/drbd/drbd_int.h
29021 @@ -736,7 +736,7 @@ struct drbd_request;
29022 struct drbd_epoch {
29023 struct list_head list;
29024 unsigned int barrier_nr;
29025 - atomic_t epoch_size; /* increased on every request added. */
29026 + atomic_unchecked_t epoch_size; /* increased on every request added. */
29027 atomic_t active; /* increased on every req. added, and dec on every finished. */
29028 unsigned long flags;
29029 };
29030 @@ -1108,7 +1108,7 @@ struct drbd_conf {
29031 void *int_dig_in;
29032 void *int_dig_vv;
29033 wait_queue_head_t seq_wait;
29034 - atomic_t packet_seq;
29035 + atomic_unchecked_t packet_seq;
29036 unsigned int peer_seq;
29037 spinlock_t peer_seq_lock;
29038 unsigned int minor;
29039 @@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
29040
29041 static inline void drbd_tcp_cork(struct socket *sock)
29042 {
29043 - int __user val = 1;
29044 + int val = 1;
29045 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
29046 - (char __user *)&val, sizeof(val));
29047 + (char __force_user *)&val, sizeof(val));
29048 }
29049
29050 static inline void drbd_tcp_uncork(struct socket *sock)
29051 {
29052 - int __user val = 0;
29053 + int val = 0;
29054 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
29055 - (char __user *)&val, sizeof(val));
29056 + (char __force_user *)&val, sizeof(val));
29057 }
29058
29059 static inline void drbd_tcp_nodelay(struct socket *sock)
29060 {
29061 - int __user val = 1;
29062 + int val = 1;
29063 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
29064 - (char __user *)&val, sizeof(val));
29065 + (char __force_user *)&val, sizeof(val));
29066 }
29067
29068 static inline void drbd_tcp_quickack(struct socket *sock)
29069 {
29070 - int __user val = 2;
29071 + int val = 2;
29072 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
29073 - (char __user *)&val, sizeof(val));
29074 + (char __force_user *)&val, sizeof(val));
29075 }
29076
29077 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
29078 diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
29079 index 211fc44..c5116f1 100644
29080 --- a/drivers/block/drbd/drbd_main.c
29081 +++ b/drivers/block/drbd/drbd_main.c
29082 @@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
29083 p.sector = sector;
29084 p.block_id = block_id;
29085 p.blksize = blksize;
29086 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
29087 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
29088
29089 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
29090 return false;
29091 @@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
29092 p.sector = cpu_to_be64(req->sector);
29093 p.block_id = (unsigned long)req;
29094 p.seq_num = cpu_to_be32(req->seq_num =
29095 - atomic_add_return(1, &mdev->packet_seq));
29096 + atomic_add_return_unchecked(1, &mdev->packet_seq));
29097
29098 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
29099
29100 @@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
29101 atomic_set(&mdev->unacked_cnt, 0);
29102 atomic_set(&mdev->local_cnt, 0);
29103 atomic_set(&mdev->net_cnt, 0);
29104 - atomic_set(&mdev->packet_seq, 0);
29105 + atomic_set_unchecked(&mdev->packet_seq, 0);
29106 atomic_set(&mdev->pp_in_use, 0);
29107 atomic_set(&mdev->pp_in_use_by_net, 0);
29108 atomic_set(&mdev->rs_sect_in, 0);
29109 @@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
29110 mdev->receiver.t_state);
29111
29112 /* no need to lock it, I'm the only thread alive */
29113 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
29114 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
29115 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
29116 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
29117 mdev->al_writ_cnt =
29118 mdev->bm_writ_cnt =
29119 mdev->read_cnt =
29120 diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
29121 index af2a250..219c74b 100644
29122 --- a/drivers/block/drbd/drbd_nl.c
29123 +++ b/drivers/block/drbd/drbd_nl.c
29124 @@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
29125 module_put(THIS_MODULE);
29126 }
29127
29128 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
29129 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
29130
29131 static unsigned short *
29132 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
29133 @@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
29134 cn_reply->id.idx = CN_IDX_DRBD;
29135 cn_reply->id.val = CN_VAL_DRBD;
29136
29137 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29138 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29139 cn_reply->ack = 0; /* not used here. */
29140 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29141 (int)((char *)tl - (char *)reply->tag_list);
29142 @@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
29143 cn_reply->id.idx = CN_IDX_DRBD;
29144 cn_reply->id.val = CN_VAL_DRBD;
29145
29146 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29147 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29148 cn_reply->ack = 0; /* not used here. */
29149 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29150 (int)((char *)tl - (char *)reply->tag_list);
29151 @@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
29152 cn_reply->id.idx = CN_IDX_DRBD;
29153 cn_reply->id.val = CN_VAL_DRBD;
29154
29155 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
29156 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
29157 cn_reply->ack = 0; // not used here.
29158 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29159 (int)((char*)tl - (char*)reply->tag_list);
29160 @@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
29161 cn_reply->id.idx = CN_IDX_DRBD;
29162 cn_reply->id.val = CN_VAL_DRBD;
29163
29164 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29165 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29166 cn_reply->ack = 0; /* not used here. */
29167 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29168 (int)((char *)tl - (char *)reply->tag_list);
29169 diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
29170 index 43beaca..4a5b1dd 100644
29171 --- a/drivers/block/drbd/drbd_receiver.c
29172 +++ b/drivers/block/drbd/drbd_receiver.c
29173 @@ -894,7 +894,7 @@ retry:
29174 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
29175 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
29176
29177 - atomic_set(&mdev->packet_seq, 0);
29178 + atomic_set_unchecked(&mdev->packet_seq, 0);
29179 mdev->peer_seq = 0;
29180
29181 drbd_thread_start(&mdev->asender);
29182 @@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
29183 do {
29184 next_epoch = NULL;
29185
29186 - epoch_size = atomic_read(&epoch->epoch_size);
29187 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
29188
29189 switch (ev & ~EV_CLEANUP) {
29190 case EV_PUT:
29191 @@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
29192 rv = FE_DESTROYED;
29193 } else {
29194 epoch->flags = 0;
29195 - atomic_set(&epoch->epoch_size, 0);
29196 + atomic_set_unchecked(&epoch->epoch_size, 0);
29197 /* atomic_set(&epoch->active, 0); is already zero */
29198 if (rv == FE_STILL_LIVE)
29199 rv = FE_RECYCLED;
29200 @@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
29201 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
29202 drbd_flush(mdev);
29203
29204 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
29205 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
29206 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
29207 if (epoch)
29208 break;
29209 }
29210
29211 epoch = mdev->current_epoch;
29212 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
29213 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
29214
29215 D_ASSERT(atomic_read(&epoch->active) == 0);
29216 D_ASSERT(epoch->flags == 0);
29217 @@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
29218 }
29219
29220 epoch->flags = 0;
29221 - atomic_set(&epoch->epoch_size, 0);
29222 + atomic_set_unchecked(&epoch->epoch_size, 0);
29223 atomic_set(&epoch->active, 0);
29224
29225 spin_lock(&mdev->epoch_lock);
29226 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
29227 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
29228 list_add(&epoch->list, &mdev->current_epoch->list);
29229 mdev->current_epoch = epoch;
29230 mdev->epochs++;
29231 @@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
29232 spin_unlock(&mdev->peer_seq_lock);
29233
29234 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
29235 - atomic_inc(&mdev->current_epoch->epoch_size);
29236 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
29237 return drbd_drain_block(mdev, data_size);
29238 }
29239
29240 @@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
29241
29242 spin_lock(&mdev->epoch_lock);
29243 e->epoch = mdev->current_epoch;
29244 - atomic_inc(&e->epoch->epoch_size);
29245 + atomic_inc_unchecked(&e->epoch->epoch_size);
29246 atomic_inc(&e->epoch->active);
29247 spin_unlock(&mdev->epoch_lock);
29248
29249 @@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
29250 D_ASSERT(list_empty(&mdev->done_ee));
29251
29252 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
29253 - atomic_set(&mdev->current_epoch->epoch_size, 0);
29254 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
29255 D_ASSERT(list_empty(&mdev->current_epoch->list));
29256 }
29257
29258 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
29259 index cd50435..ba1ffb5 100644
29260 --- a/drivers/block/loop.c
29261 +++ b/drivers/block/loop.c
29262 @@ -226,7 +226,7 @@ static int __do_lo_send_write(struct file *file,
29263 mm_segment_t old_fs = get_fs();
29264
29265 set_fs(get_ds());
29266 - bw = file->f_op->write(file, buf, len, &pos);
29267 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
29268 set_fs(old_fs);
29269 if (likely(bw == len))
29270 return 0;
29271 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
29272 index 4364303..9adf4ee 100644
29273 --- a/drivers/char/Kconfig
29274 +++ b/drivers/char/Kconfig
29275 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
29276
29277 config DEVKMEM
29278 bool "/dev/kmem virtual device support"
29279 - default y
29280 + default n
29281 + depends on !GRKERNSEC_KMEM
29282 help
29283 Say Y here if you want to support the /dev/kmem device. The
29284 /dev/kmem device is rarely used, but can be used for certain
29285 @@ -596,6 +597,7 @@ config DEVPORT
29286 bool
29287 depends on !M68K
29288 depends on ISA || PCI
29289 + depends on !GRKERNSEC_KMEM
29290 default y
29291
29292 source "drivers/s390/char/Kconfig"
29293 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
29294 index 2e04433..22afc64 100644
29295 --- a/drivers/char/agp/frontend.c
29296 +++ b/drivers/char/agp/frontend.c
29297 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
29298 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
29299 return -EFAULT;
29300
29301 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
29302 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
29303 return -EFAULT;
29304
29305 client = agp_find_client_by_pid(reserve.pid);
29306 diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
29307 index 095ab90..afad0a4 100644
29308 --- a/drivers/char/briq_panel.c
29309 +++ b/drivers/char/briq_panel.c
29310 @@ -9,6 +9,7 @@
29311 #include <linux/types.h>
29312 #include <linux/errno.h>
29313 #include <linux/tty.h>
29314 +#include <linux/mutex.h>
29315 #include <linux/timer.h>
29316 #include <linux/kernel.h>
29317 #include <linux/wait.h>
29318 @@ -34,6 +35,7 @@ static int vfd_is_open;
29319 static unsigned char vfd[40];
29320 static int vfd_cursor;
29321 static unsigned char ledpb, led;
29322 +static DEFINE_MUTEX(vfd_mutex);
29323
29324 static void update_vfd(void)
29325 {
29326 @@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
29327 if (!vfd_is_open)
29328 return -EBUSY;
29329
29330 + mutex_lock(&vfd_mutex);
29331 for (;;) {
29332 char c;
29333 if (!indx)
29334 break;
29335 - if (get_user(c, buf))
29336 + if (get_user(c, buf)) {
29337 + mutex_unlock(&vfd_mutex);
29338 return -EFAULT;
29339 + }
29340 if (esc) {
29341 set_led(c);
29342 esc = 0;
29343 @@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
29344 buf++;
29345 }
29346 update_vfd();
29347 + mutex_unlock(&vfd_mutex);
29348
29349 return len;
29350 }
29351 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
29352 index f773a9d..65cd683 100644
29353 --- a/drivers/char/genrtc.c
29354 +++ b/drivers/char/genrtc.c
29355 @@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
29356 switch (cmd) {
29357
29358 case RTC_PLL_GET:
29359 + memset(&pll, 0, sizeof(pll));
29360 if (get_rtc_pll(&pll))
29361 return -EINVAL;
29362 else
29363 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
29364 index 0833896..cccce52 100644
29365 --- a/drivers/char/hpet.c
29366 +++ b/drivers/char/hpet.c
29367 @@ -572,7 +572,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
29368 }
29369
29370 static int
29371 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
29372 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
29373 struct hpet_info *info)
29374 {
29375 struct hpet_timer __iomem *timer;
29376 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
29377 index 58c0e63..46c16bf 100644
29378 --- a/drivers/char/ipmi/ipmi_msghandler.c
29379 +++ b/drivers/char/ipmi/ipmi_msghandler.c
29380 @@ -415,7 +415,7 @@ struct ipmi_smi {
29381 struct proc_dir_entry *proc_dir;
29382 char proc_dir_name[10];
29383
29384 - atomic_t stats[IPMI_NUM_STATS];
29385 + atomic_unchecked_t stats[IPMI_NUM_STATS];
29386
29387 /*
29388 * run_to_completion duplicate of smb_info, smi_info
29389 @@ -448,9 +448,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
29390
29391
29392 #define ipmi_inc_stat(intf, stat) \
29393 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
29394 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
29395 #define ipmi_get_stat(intf, stat) \
29396 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
29397 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
29398
29399 static int is_lan_addr(struct ipmi_addr *addr)
29400 {
29401 @@ -2868,7 +2868,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
29402 INIT_LIST_HEAD(&intf->cmd_rcvrs);
29403 init_waitqueue_head(&intf->waitq);
29404 for (i = 0; i < IPMI_NUM_STATS; i++)
29405 - atomic_set(&intf->stats[i], 0);
29406 + atomic_set_unchecked(&intf->stats[i], 0);
29407
29408 intf->proc_dir = NULL;
29409
29410 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
29411 index 50fcf9c..91b5528 100644
29412 --- a/drivers/char/ipmi/ipmi_si_intf.c
29413 +++ b/drivers/char/ipmi/ipmi_si_intf.c
29414 @@ -277,7 +277,7 @@ struct smi_info {
29415 unsigned char slave_addr;
29416
29417 /* Counters and things for the proc filesystem. */
29418 - atomic_t stats[SI_NUM_STATS];
29419 + atomic_unchecked_t stats[SI_NUM_STATS];
29420
29421 struct task_struct *thread;
29422
29423 @@ -286,9 +286,9 @@ struct smi_info {
29424 };
29425
29426 #define smi_inc_stat(smi, stat) \
29427 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
29428 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
29429 #define smi_get_stat(smi, stat) \
29430 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
29431 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
29432
29433 #define SI_MAX_PARMS 4
29434
29435 @@ -3230,7 +3230,7 @@ static int try_smi_init(struct smi_info *new_smi)
29436 atomic_set(&new_smi->req_events, 0);
29437 new_smi->run_to_completion = 0;
29438 for (i = 0; i < SI_NUM_STATS; i++)
29439 - atomic_set(&new_smi->stats[i], 0);
29440 + atomic_set_unchecked(&new_smi->stats[i], 0);
29441
29442 new_smi->interrupt_disabled = 1;
29443 atomic_set(&new_smi->stop_operation, 0);
29444 diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
29445 index 1aeaaba..e018570 100644
29446 --- a/drivers/char/mbcs.c
29447 +++ b/drivers/char/mbcs.c
29448 @@ -800,7 +800,7 @@ static int mbcs_remove(struct cx_dev *dev)
29449 return 0;
29450 }
29451
29452 -static const struct cx_device_id __devinitdata mbcs_id_table[] = {
29453 +static const struct cx_device_id __devinitconst mbcs_id_table[] = {
29454 {
29455 .part_num = MBCS_PART_NUM,
29456 .mfg_num = MBCS_MFG_NUM,
29457 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
29458 index d6e9d08..4493e89 100644
29459 --- a/drivers/char/mem.c
29460 +++ b/drivers/char/mem.c
29461 @@ -18,6 +18,7 @@
29462 #include <linux/raw.h>
29463 #include <linux/tty.h>
29464 #include <linux/capability.h>
29465 +#include <linux/security.h>
29466 #include <linux/ptrace.h>
29467 #include <linux/device.h>
29468 #include <linux/highmem.h>
29469 @@ -35,6 +36,10 @@
29470 # include <linux/efi.h>
29471 #endif
29472
29473 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
29474 +extern const struct file_operations grsec_fops;
29475 +#endif
29476 +
29477 static inline unsigned long size_inside_page(unsigned long start,
29478 unsigned long size)
29479 {
29480 @@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29481
29482 while (cursor < to) {
29483 if (!devmem_is_allowed(pfn)) {
29484 +#ifdef CONFIG_GRKERNSEC_KMEM
29485 + gr_handle_mem_readwrite(from, to);
29486 +#else
29487 printk(KERN_INFO
29488 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
29489 current->comm, from, to);
29490 +#endif
29491 return 0;
29492 }
29493 cursor += PAGE_SIZE;
29494 @@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29495 }
29496 return 1;
29497 }
29498 +#elif defined(CONFIG_GRKERNSEC_KMEM)
29499 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29500 +{
29501 + return 0;
29502 +}
29503 #else
29504 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29505 {
29506 @@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
29507
29508 while (count > 0) {
29509 unsigned long remaining;
29510 + char *temp;
29511
29512 sz = size_inside_page(p, count);
29513
29514 @@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
29515 if (!ptr)
29516 return -EFAULT;
29517
29518 - remaining = copy_to_user(buf, ptr, sz);
29519 +#ifdef CONFIG_PAX_USERCOPY
29520 + temp = kmalloc(sz, GFP_KERNEL);
29521 + if (!temp) {
29522 + unxlate_dev_mem_ptr(p, ptr);
29523 + return -ENOMEM;
29524 + }
29525 + memcpy(temp, ptr, sz);
29526 +#else
29527 + temp = ptr;
29528 +#endif
29529 +
29530 + remaining = copy_to_user(buf, temp, sz);
29531 +
29532 +#ifdef CONFIG_PAX_USERCOPY
29533 + kfree(temp);
29534 +#endif
29535 +
29536 unxlate_dev_mem_ptr(p, ptr);
29537 if (remaining)
29538 return -EFAULT;
29539 @@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29540 size_t count, loff_t *ppos)
29541 {
29542 unsigned long p = *ppos;
29543 - ssize_t low_count, read, sz;
29544 + ssize_t low_count, read, sz, err = 0;
29545 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
29546 - int err = 0;
29547
29548 read = 0;
29549 if (p < (unsigned long) high_memory) {
29550 @@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29551 }
29552 #endif
29553 while (low_count > 0) {
29554 + char *temp;
29555 +
29556 sz = size_inside_page(p, low_count);
29557
29558 /*
29559 @@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29560 */
29561 kbuf = xlate_dev_kmem_ptr((char *)p);
29562
29563 - if (copy_to_user(buf, kbuf, sz))
29564 +#ifdef CONFIG_PAX_USERCOPY
29565 + temp = kmalloc(sz, GFP_KERNEL);
29566 + if (!temp)
29567 + return -ENOMEM;
29568 + memcpy(temp, kbuf, sz);
29569 +#else
29570 + temp = kbuf;
29571 +#endif
29572 +
29573 + err = copy_to_user(buf, temp, sz);
29574 +
29575 +#ifdef CONFIG_PAX_USERCOPY
29576 + kfree(temp);
29577 +#endif
29578 +
29579 + if (err)
29580 return -EFAULT;
29581 buf += sz;
29582 p += sz;
29583 @@ -867,6 +914,9 @@ static const struct memdev {
29584 #ifdef CONFIG_CRASH_DUMP
29585 [12] = { "oldmem", 0, &oldmem_fops, NULL },
29586 #endif
29587 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
29588 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
29589 +#endif
29590 };
29591
29592 static int memory_open(struct inode *inode, struct file *filp)
29593 diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
29594 index da3cfee..a5a6606 100644
29595 --- a/drivers/char/nvram.c
29596 +++ b/drivers/char/nvram.c
29597 @@ -248,7 +248,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
29598
29599 spin_unlock_irq(&rtc_lock);
29600
29601 - if (copy_to_user(buf, contents, tmp - contents))
29602 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
29603 return -EFAULT;
29604
29605 *ppos = i;
29606 diff --git a/drivers/char/random.c b/drivers/char/random.c
29607 index 54ca8b2..4a092ed 100644
29608 --- a/drivers/char/random.c
29609 +++ b/drivers/char/random.c
29610 @@ -261,8 +261,13 @@
29611 /*
29612 * Configuration information
29613 */
29614 +#ifdef CONFIG_GRKERNSEC_RANDNET
29615 +#define INPUT_POOL_WORDS 512
29616 +#define OUTPUT_POOL_WORDS 128
29617 +#else
29618 #define INPUT_POOL_WORDS 128
29619 #define OUTPUT_POOL_WORDS 32
29620 +#endif
29621 #define SEC_XFER_SIZE 512
29622 #define EXTRACT_SIZE 10
29623
29624 @@ -300,10 +305,17 @@ static struct poolinfo {
29625 int poolwords;
29626 int tap1, tap2, tap3, tap4, tap5;
29627 } poolinfo_table[] = {
29628 +#ifdef CONFIG_GRKERNSEC_RANDNET
29629 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
29630 + { 512, 411, 308, 208, 104, 1 },
29631 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
29632 + { 128, 103, 76, 51, 25, 1 },
29633 +#else
29634 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
29635 { 128, 103, 76, 51, 25, 1 },
29636 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
29637 { 32, 26, 20, 14, 7, 1 },
29638 +#endif
29639 #if 0
29640 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
29641 { 2048, 1638, 1231, 819, 411, 1 },
29642 @@ -913,7 +925,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
29643
29644 extract_buf(r, tmp);
29645 i = min_t(int, nbytes, EXTRACT_SIZE);
29646 - if (copy_to_user(buf, tmp, i)) {
29647 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
29648 ret = -EFAULT;
29649 break;
29650 }
29651 @@ -1238,7 +1250,7 @@ EXPORT_SYMBOL(generate_random_uuid);
29652 #include <linux/sysctl.h>
29653
29654 static int min_read_thresh = 8, min_write_thresh;
29655 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
29656 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
29657 static int max_write_thresh = INPUT_POOL_WORDS * 32;
29658 static char sysctl_bootid[16];
29659
29660 @@ -1260,10 +1272,15 @@ static int proc_do_uuid(ctl_table *table, int write,
29661 uuid = table->data;
29662 if (!uuid) {
29663 uuid = tmp_uuid;
29664 - uuid[8] = 0;
29665 - }
29666 - if (uuid[8] == 0)
29667 generate_random_uuid(uuid);
29668 + } else {
29669 + static DEFINE_SPINLOCK(bootid_spinlock);
29670 +
29671 + spin_lock(&bootid_spinlock);
29672 + if (!uuid[8])
29673 + generate_random_uuid(uuid);
29674 + spin_unlock(&bootid_spinlock);
29675 + }
29676
29677 sprintf(buf, "%pU", uuid);
29678
29679 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
29680 index 1ee8ce7..b778bef 100644
29681 --- a/drivers/char/sonypi.c
29682 +++ b/drivers/char/sonypi.c
29683 @@ -55,6 +55,7 @@
29684 #include <asm/uaccess.h>
29685 #include <asm/io.h>
29686 #include <asm/system.h>
29687 +#include <asm/local.h>
29688
29689 #include <linux/sonypi.h>
29690
29691 @@ -491,7 +492,7 @@ static struct sonypi_device {
29692 spinlock_t fifo_lock;
29693 wait_queue_head_t fifo_proc_list;
29694 struct fasync_struct *fifo_async;
29695 - int open_count;
29696 + local_t open_count;
29697 int model;
29698 struct input_dev *input_jog_dev;
29699 struct input_dev *input_key_dev;
29700 @@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
29701 static int sonypi_misc_release(struct inode *inode, struct file *file)
29702 {
29703 mutex_lock(&sonypi_device.lock);
29704 - sonypi_device.open_count--;
29705 + local_dec(&sonypi_device.open_count);
29706 mutex_unlock(&sonypi_device.lock);
29707 return 0;
29708 }
29709 @@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
29710 {
29711 mutex_lock(&sonypi_device.lock);
29712 /* Flush input queue on first open */
29713 - if (!sonypi_device.open_count)
29714 + if (!local_read(&sonypi_device.open_count))
29715 kfifo_reset(&sonypi_device.fifo);
29716 - sonypi_device.open_count++;
29717 + local_inc(&sonypi_device.open_count);
29718 mutex_unlock(&sonypi_device.lock);
29719
29720 return 0;
29721 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
29722 index ad7c732..5aa8054 100644
29723 --- a/drivers/char/tpm/tpm.c
29724 +++ b/drivers/char/tpm/tpm.c
29725 @@ -415,7 +415,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
29726 chip->vendor.req_complete_val)
29727 goto out_recv;
29728
29729 - if ((status == chip->vendor.req_canceled)) {
29730 + if (status == chip->vendor.req_canceled) {
29731 dev_err(chip->dev, "Operation Canceled\n");
29732 rc = -ECANCELED;
29733 goto out;
29734 diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
29735 index 0636520..169c1d0 100644
29736 --- a/drivers/char/tpm/tpm_bios.c
29737 +++ b/drivers/char/tpm/tpm_bios.c
29738 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
29739 event = addr;
29740
29741 if ((event->event_type == 0 && event->event_size == 0) ||
29742 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
29743 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
29744 return NULL;
29745
29746 return addr;
29747 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
29748 return NULL;
29749
29750 if ((event->event_type == 0 && event->event_size == 0) ||
29751 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
29752 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
29753 return NULL;
29754
29755 (*pos)++;
29756 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
29757 int i;
29758
29759 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
29760 - seq_putc(m, data[i]);
29761 + if (!seq_putc(m, data[i]))
29762 + return -EFAULT;
29763
29764 return 0;
29765 }
29766 @@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
29767 log->bios_event_log_end = log->bios_event_log + len;
29768
29769 virt = acpi_os_map_memory(start, len);
29770 + if (!virt) {
29771 + kfree(log->bios_event_log);
29772 + log->bios_event_log = NULL;
29773 + return -EFAULT;
29774 + }
29775
29776 - memcpy(log->bios_event_log, virt, len);
29777 + memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
29778
29779 acpi_os_unmap_memory(virt, len);
29780 return 0;
29781 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
29782 index b58b561..c9088c8 100644
29783 --- a/drivers/char/virtio_console.c
29784 +++ b/drivers/char/virtio_console.c
29785 @@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
29786 if (to_user) {
29787 ssize_t ret;
29788
29789 - ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
29790 + ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
29791 if (ret)
29792 return -EFAULT;
29793 } else {
29794 @@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
29795 if (!port_has_data(port) && !port->host_connected)
29796 return 0;
29797
29798 - return fill_readbuf(port, ubuf, count, true);
29799 + return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
29800 }
29801
29802 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
29803 diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
29804 index c9eee6d..f9d5280 100644
29805 --- a/drivers/edac/amd64_edac.c
29806 +++ b/drivers/edac/amd64_edac.c
29807 @@ -2685,7 +2685,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
29808 * PCI core identifies what devices are on a system during boot, and then
29809 * inquiry this table to see if this driver is for a given device found.
29810 */
29811 -static const struct pci_device_id amd64_pci_table[] __devinitdata = {
29812 +static const struct pci_device_id amd64_pci_table[] __devinitconst = {
29813 {
29814 .vendor = PCI_VENDOR_ID_AMD,
29815 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
29816 diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
29817 index e47e73b..348e0bd 100644
29818 --- a/drivers/edac/amd76x_edac.c
29819 +++ b/drivers/edac/amd76x_edac.c
29820 @@ -321,7 +321,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
29821 edac_mc_free(mci);
29822 }
29823
29824 -static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
29825 +static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
29826 {
29827 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29828 AMD762},
29829 diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
29830 index 1af531a..3a8ff27 100644
29831 --- a/drivers/edac/e752x_edac.c
29832 +++ b/drivers/edac/e752x_edac.c
29833 @@ -1380,7 +1380,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
29834 edac_mc_free(mci);
29835 }
29836
29837 -static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
29838 +static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
29839 {
29840 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29841 E7520},
29842 diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
29843 index 6ffb6d2..383d8d7 100644
29844 --- a/drivers/edac/e7xxx_edac.c
29845 +++ b/drivers/edac/e7xxx_edac.c
29846 @@ -525,7 +525,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
29847 edac_mc_free(mci);
29848 }
29849
29850 -static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
29851 +static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
29852 {
29853 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29854 E7205},
29855 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
29856 index 97f5064..202b6e6 100644
29857 --- a/drivers/edac/edac_pci_sysfs.c
29858 +++ b/drivers/edac/edac_pci_sysfs.c
29859 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
29860 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
29861 static int edac_pci_poll_msec = 1000; /* one second workq period */
29862
29863 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
29864 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
29865 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
29866 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
29867
29868 static struct kobject *edac_pci_top_main_kobj;
29869 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
29870 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29871 edac_printk(KERN_CRIT, EDAC_PCI,
29872 "Signaled System Error on %s\n",
29873 pci_name(dev));
29874 - atomic_inc(&pci_nonparity_count);
29875 + atomic_inc_unchecked(&pci_nonparity_count);
29876 }
29877
29878 if (status & (PCI_STATUS_PARITY)) {
29879 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29880 "Master Data Parity Error on %s\n",
29881 pci_name(dev));
29882
29883 - atomic_inc(&pci_parity_count);
29884 + atomic_inc_unchecked(&pci_parity_count);
29885 }
29886
29887 if (status & (PCI_STATUS_DETECTED_PARITY)) {
29888 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29889 "Detected Parity Error on %s\n",
29890 pci_name(dev));
29891
29892 - atomic_inc(&pci_parity_count);
29893 + atomic_inc_unchecked(&pci_parity_count);
29894 }
29895 }
29896
29897 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29898 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
29899 "Signaled System Error on %s\n",
29900 pci_name(dev));
29901 - atomic_inc(&pci_nonparity_count);
29902 + atomic_inc_unchecked(&pci_nonparity_count);
29903 }
29904
29905 if (status & (PCI_STATUS_PARITY)) {
29906 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29907 "Master Data Parity Error on "
29908 "%s\n", pci_name(dev));
29909
29910 - atomic_inc(&pci_parity_count);
29911 + atomic_inc_unchecked(&pci_parity_count);
29912 }
29913
29914 if (status & (PCI_STATUS_DETECTED_PARITY)) {
29915 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29916 "Detected Parity Error on %s\n",
29917 pci_name(dev));
29918
29919 - atomic_inc(&pci_parity_count);
29920 + atomic_inc_unchecked(&pci_parity_count);
29921 }
29922 }
29923 }
29924 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
29925 if (!check_pci_errors)
29926 return;
29927
29928 - before_count = atomic_read(&pci_parity_count);
29929 + before_count = atomic_read_unchecked(&pci_parity_count);
29930
29931 /* scan all PCI devices looking for a Parity Error on devices and
29932 * bridges.
29933 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
29934 /* Only if operator has selected panic on PCI Error */
29935 if (edac_pci_get_panic_on_pe()) {
29936 /* If the count is different 'after' from 'before' */
29937 - if (before_count != atomic_read(&pci_parity_count))
29938 + if (before_count != atomic_read_unchecked(&pci_parity_count))
29939 panic("EDAC: PCI Parity Error");
29940 }
29941 }
29942 diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
29943 index c0510b3..6e2a954 100644
29944 --- a/drivers/edac/i3000_edac.c
29945 +++ b/drivers/edac/i3000_edac.c
29946 @@ -470,7 +470,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
29947 edac_mc_free(mci);
29948 }
29949
29950 -static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
29951 +static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
29952 {
29953 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29954 I3000},
29955 diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
29956 index 73f55e200..5faaf59 100644
29957 --- a/drivers/edac/i3200_edac.c
29958 +++ b/drivers/edac/i3200_edac.c
29959 @@ -445,7 +445,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
29960 edac_mc_free(mci);
29961 }
29962
29963 -static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
29964 +static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
29965 {
29966 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
29967 I3200},
29968 diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
29969 index 4dc3ac2..67d05a6 100644
29970 --- a/drivers/edac/i5000_edac.c
29971 +++ b/drivers/edac/i5000_edac.c
29972 @@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
29973 *
29974 * The "E500P" device is the first device supported.
29975 */
29976 -static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
29977 +static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
29978 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
29979 .driver_data = I5000P},
29980
29981 diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
29982 index bcbdeec..9886d16 100644
29983 --- a/drivers/edac/i5100_edac.c
29984 +++ b/drivers/edac/i5100_edac.c
29985 @@ -1051,7 +1051,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
29986 edac_mc_free(mci);
29987 }
29988
29989 -static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
29990 +static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
29991 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
29992 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
29993 { 0, }
29994 diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
29995 index 74d6ec34..baff517 100644
29996 --- a/drivers/edac/i5400_edac.c
29997 +++ b/drivers/edac/i5400_edac.c
29998 @@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
29999 *
30000 * The "E500P" device is the first device supported.
30001 */
30002 -static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
30003 +static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
30004 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
30005 {0,} /* 0 terminated list. */
30006 };
30007 diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
30008 index 6104dba..e7ea8e1 100644
30009 --- a/drivers/edac/i7300_edac.c
30010 +++ b/drivers/edac/i7300_edac.c
30011 @@ -1192,7 +1192,7 @@ static void __devexit i7300_remove_one(struct pci_dev *pdev)
30012 *
30013 * Has only 8086:360c PCI ID
30014 */
30015 -static const struct pci_device_id i7300_pci_tbl[] __devinitdata = {
30016 +static const struct pci_device_id i7300_pci_tbl[] __devinitconst = {
30017 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
30018 {0,} /* 0 terminated list. */
30019 };
30020 diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
30021 index 8568d9b..42b2fa8 100644
30022 --- a/drivers/edac/i7core_edac.c
30023 +++ b/drivers/edac/i7core_edac.c
30024 @@ -391,7 +391,7 @@ static const struct pci_id_table pci_dev_table[] = {
30025 /*
30026 * pci_device_id table for which devices we are looking for
30027 */
30028 -static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
30029 +static const struct pci_device_id i7core_pci_tbl[] __devinitconst = {
30030 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
30031 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_LINK0)},
30032 {0,} /* 0 terminated list. */
30033 diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
30034 index 4329d39..f3022ef 100644
30035 --- a/drivers/edac/i82443bxgx_edac.c
30036 +++ b/drivers/edac/i82443bxgx_edac.c
30037 @@ -380,7 +380,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
30038
30039 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
30040
30041 -static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
30042 +static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
30043 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
30044 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
30045 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
30046 diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
30047 index 931a057..fd28340 100644
30048 --- a/drivers/edac/i82860_edac.c
30049 +++ b/drivers/edac/i82860_edac.c
30050 @@ -270,7 +270,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
30051 edac_mc_free(mci);
30052 }
30053
30054 -static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
30055 +static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
30056 {
30057 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
30058 I82860},
30059 diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
30060 index 33864c6..01edc61 100644
30061 --- a/drivers/edac/i82875p_edac.c
30062 +++ b/drivers/edac/i82875p_edac.c
30063 @@ -511,7 +511,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
30064 edac_mc_free(mci);
30065 }
30066
30067 -static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
30068 +static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
30069 {
30070 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
30071 I82875P},
30072 diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
30073 index 4184e01..dcb2cd3 100644
30074 --- a/drivers/edac/i82975x_edac.c
30075 +++ b/drivers/edac/i82975x_edac.c
30076 @@ -612,7 +612,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
30077 edac_mc_free(mci);
30078 }
30079
30080 -static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
30081 +static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
30082 {
30083 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
30084 I82975X
30085 diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
30086 index 0106747..0b40417 100644
30087 --- a/drivers/edac/mce_amd.h
30088 +++ b/drivers/edac/mce_amd.h
30089 @@ -83,7 +83,7 @@ struct amd_decoder_ops {
30090 bool (*dc_mce)(u16, u8);
30091 bool (*ic_mce)(u16, u8);
30092 bool (*nb_mce)(u16, u8);
30093 -};
30094 +} __no_const;
30095
30096 void amd_report_gart_errors(bool);
30097 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
30098 diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
30099 index e294e1b..a41b05b 100644
30100 --- a/drivers/edac/r82600_edac.c
30101 +++ b/drivers/edac/r82600_edac.c
30102 @@ -373,7 +373,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
30103 edac_mc_free(mci);
30104 }
30105
30106 -static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
30107 +static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
30108 {
30109 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
30110 },
30111 diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
30112 index 1dc118d..8c68af9 100644
30113 --- a/drivers/edac/sb_edac.c
30114 +++ b/drivers/edac/sb_edac.c
30115 @@ -367,7 +367,7 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
30116 /*
30117 * pci_device_id table for which devices we are looking for
30118 */
30119 -static const struct pci_device_id sbridge_pci_tbl[] __devinitdata = {
30120 +static const struct pci_device_id sbridge_pci_tbl[] __devinitconst = {
30121 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
30122 {0,} /* 0 terminated list. */
30123 };
30124 diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
30125 index b6f47de..c5acf3a 100644
30126 --- a/drivers/edac/x38_edac.c
30127 +++ b/drivers/edac/x38_edac.c
30128 @@ -440,7 +440,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
30129 edac_mc_free(mci);
30130 }
30131
30132 -static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
30133 +static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
30134 {
30135 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
30136 X38},
30137 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
30138 index 85661b0..c784559a 100644
30139 --- a/drivers/firewire/core-card.c
30140 +++ b/drivers/firewire/core-card.c
30141 @@ -657,7 +657,7 @@ void fw_card_release(struct kref *kref)
30142
30143 void fw_core_remove_card(struct fw_card *card)
30144 {
30145 - struct fw_card_driver dummy_driver = dummy_driver_template;
30146 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
30147
30148 card->driver->update_phy_reg(card, 4,
30149 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
30150 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
30151 index 4799393..37bd3ab 100644
30152 --- a/drivers/firewire/core-cdev.c
30153 +++ b/drivers/firewire/core-cdev.c
30154 @@ -1331,8 +1331,7 @@ static int init_iso_resource(struct client *client,
30155 int ret;
30156
30157 if ((request->channels == 0 && request->bandwidth == 0) ||
30158 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
30159 - request->bandwidth < 0)
30160 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
30161 return -EINVAL;
30162
30163 r = kmalloc(sizeof(*r), GFP_KERNEL);
30164 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
30165 index 855ab3f..11f4bbd 100644
30166 --- a/drivers/firewire/core-transaction.c
30167 +++ b/drivers/firewire/core-transaction.c
30168 @@ -37,6 +37,7 @@
30169 #include <linux/timer.h>
30170 #include <linux/types.h>
30171 #include <linux/workqueue.h>
30172 +#include <linux/sched.h>
30173
30174 #include <asm/byteorder.h>
30175
30176 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
30177 index b45be57..5fad18b 100644
30178 --- a/drivers/firewire/core.h
30179 +++ b/drivers/firewire/core.h
30180 @@ -101,6 +101,7 @@ struct fw_card_driver {
30181
30182 int (*stop_iso)(struct fw_iso_context *ctx);
30183 };
30184 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
30185
30186 void fw_card_initialize(struct fw_card *card,
30187 const struct fw_card_driver *driver, struct device *device);
30188 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
30189 index 153980b..4b4d046 100644
30190 --- a/drivers/firmware/dmi_scan.c
30191 +++ b/drivers/firmware/dmi_scan.c
30192 @@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
30193 }
30194 }
30195 else {
30196 - /*
30197 - * no iounmap() for that ioremap(); it would be a no-op, but
30198 - * it's so early in setup that sucker gets confused into doing
30199 - * what it shouldn't if we actually call it.
30200 - */
30201 p = dmi_ioremap(0xF0000, 0x10000);
30202 if (p == NULL)
30203 goto error;
30204 @@ -723,7 +718,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
30205 if (buf == NULL)
30206 return -1;
30207
30208 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
30209 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
30210
30211 iounmap(buf);
30212 return 0;
30213 diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
30214 index 82d5c20..44a7177 100644
30215 --- a/drivers/gpio/gpio-vr41xx.c
30216 +++ b/drivers/gpio/gpio-vr41xx.c
30217 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
30218 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
30219 maskl, pendl, maskh, pendh);
30220
30221 - atomic_inc(&irq_err_count);
30222 + atomic_inc_unchecked(&irq_err_count);
30223
30224 return -EINVAL;
30225 }
30226 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
30227 index 84a4a80..ce0306e 100644
30228 --- a/drivers/gpu/drm/drm_crtc_helper.c
30229 +++ b/drivers/gpu/drm/drm_crtc_helper.c
30230 @@ -280,7 +280,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
30231 struct drm_crtc *tmp;
30232 int crtc_mask = 1;
30233
30234 - WARN(!crtc, "checking null crtc?\n");
30235 + BUG_ON(!crtc);
30236
30237 dev = crtc->dev;
30238
30239 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
30240 index ebf7d3f..d64c436 100644
30241 --- a/drivers/gpu/drm/drm_drv.c
30242 +++ b/drivers/gpu/drm/drm_drv.c
30243 @@ -312,7 +312,7 @@ module_exit(drm_core_exit);
30244 /**
30245 * Copy and IOCTL return string to user space
30246 */
30247 -static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
30248 +static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
30249 {
30250 int len;
30251
30252 @@ -391,7 +391,7 @@ long drm_ioctl(struct file *filp,
30253
30254 dev = file_priv->minor->dev;
30255 atomic_inc(&dev->ioctl_count);
30256 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
30257 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
30258 ++file_priv->ioctl_count;
30259
30260 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
30261 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
30262 index 6263b01..7987f55 100644
30263 --- a/drivers/gpu/drm/drm_fops.c
30264 +++ b/drivers/gpu/drm/drm_fops.c
30265 @@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
30266 }
30267
30268 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
30269 - atomic_set(&dev->counts[i], 0);
30270 + atomic_set_unchecked(&dev->counts[i], 0);
30271
30272 dev->sigdata.lock = NULL;
30273
30274 @@ -135,8 +135,8 @@ int drm_open(struct inode *inode, struct file *filp)
30275
30276 retcode = drm_open_helper(inode, filp, dev);
30277 if (!retcode) {
30278 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
30279 - if (!dev->open_count++)
30280 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
30281 + if (local_inc_return(&dev->open_count) == 1)
30282 retcode = drm_setup(dev);
30283 }
30284 if (!retcode) {
30285 @@ -473,7 +473,7 @@ int drm_release(struct inode *inode, struct file *filp)
30286
30287 mutex_lock(&drm_global_mutex);
30288
30289 - DRM_DEBUG("open_count = %d\n", dev->open_count);
30290 + DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
30291
30292 if (dev->driver->preclose)
30293 dev->driver->preclose(dev, file_priv);
30294 @@ -482,10 +482,10 @@ int drm_release(struct inode *inode, struct file *filp)
30295 * Begin inline drm_release
30296 */
30297
30298 - DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
30299 + DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
30300 task_pid_nr(current),
30301 (long)old_encode_dev(file_priv->minor->device),
30302 - dev->open_count);
30303 + local_read(&dev->open_count));
30304
30305 /* Release any auth tokens that might point to this file_priv,
30306 (do that under the drm_global_mutex) */
30307 @@ -571,8 +571,8 @@ int drm_release(struct inode *inode, struct file *filp)
30308 * End inline drm_release
30309 */
30310
30311 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
30312 - if (!--dev->open_count) {
30313 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
30314 + if (local_dec_and_test(&dev->open_count)) {
30315 if (atomic_read(&dev->ioctl_count)) {
30316 DRM_ERROR("Device busy: %d\n",
30317 atomic_read(&dev->ioctl_count));
30318 diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
30319 index c87dc96..326055d 100644
30320 --- a/drivers/gpu/drm/drm_global.c
30321 +++ b/drivers/gpu/drm/drm_global.c
30322 @@ -36,7 +36,7 @@
30323 struct drm_global_item {
30324 struct mutex mutex;
30325 void *object;
30326 - int refcount;
30327 + atomic_t refcount;
30328 };
30329
30330 static struct drm_global_item glob[DRM_GLOBAL_NUM];
30331 @@ -49,7 +49,7 @@ void drm_global_init(void)
30332 struct drm_global_item *item = &glob[i];
30333 mutex_init(&item->mutex);
30334 item->object = NULL;
30335 - item->refcount = 0;
30336 + atomic_set(&item->refcount, 0);
30337 }
30338 }
30339
30340 @@ -59,7 +59,7 @@ void drm_global_release(void)
30341 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
30342 struct drm_global_item *item = &glob[i];
30343 BUG_ON(item->object != NULL);
30344 - BUG_ON(item->refcount != 0);
30345 + BUG_ON(atomic_read(&item->refcount) != 0);
30346 }
30347 }
30348
30349 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
30350 void *object;
30351
30352 mutex_lock(&item->mutex);
30353 - if (item->refcount == 0) {
30354 + if (atomic_read(&item->refcount) == 0) {
30355 item->object = kzalloc(ref->size, GFP_KERNEL);
30356 if (unlikely(item->object == NULL)) {
30357 ret = -ENOMEM;
30358 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
30359 goto out_err;
30360
30361 }
30362 - ++item->refcount;
30363 + atomic_inc(&item->refcount);
30364 ref->object = item->object;
30365 object = item->object;
30366 mutex_unlock(&item->mutex);
30367 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
30368 struct drm_global_item *item = &glob[ref->global_type];
30369
30370 mutex_lock(&item->mutex);
30371 - BUG_ON(item->refcount == 0);
30372 + BUG_ON(atomic_read(&item->refcount) == 0);
30373 BUG_ON(ref->object != item->object);
30374 - if (--item->refcount == 0) {
30375 + if (atomic_dec_and_test(&item->refcount)) {
30376 ref->release(ref);
30377 item->object = NULL;
30378 }
30379 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
30380 index ab1162d..42587b2 100644
30381 --- a/drivers/gpu/drm/drm_info.c
30382 +++ b/drivers/gpu/drm/drm_info.c
30383 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
30384 struct drm_local_map *map;
30385 struct drm_map_list *r_list;
30386
30387 - /* Hardcoded from _DRM_FRAME_BUFFER,
30388 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
30389 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
30390 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
30391 + static const char * const types[] = {
30392 + [_DRM_FRAME_BUFFER] = "FB",
30393 + [_DRM_REGISTERS] = "REG",
30394 + [_DRM_SHM] = "SHM",
30395 + [_DRM_AGP] = "AGP",
30396 + [_DRM_SCATTER_GATHER] = "SG",
30397 + [_DRM_CONSISTENT] = "PCI",
30398 + [_DRM_GEM] = "GEM" };
30399 const char *type;
30400 int i;
30401
30402 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
30403 map = r_list->map;
30404 if (!map)
30405 continue;
30406 - if (map->type < 0 || map->type > 5)
30407 + if (map->type >= ARRAY_SIZE(types))
30408 type = "??";
30409 else
30410 type = types[map->type];
30411 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
30412 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
30413 vma->vm_flags & VM_LOCKED ? 'l' : '-',
30414 vma->vm_flags & VM_IO ? 'i' : '-',
30415 +#ifdef CONFIG_GRKERNSEC_HIDESYM
30416 + 0);
30417 +#else
30418 vma->vm_pgoff);
30419 +#endif
30420
30421 #if defined(__i386__)
30422 pgprot = pgprot_val(vma->vm_page_prot);
30423 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
30424 index 637fcc3..e890b33 100644
30425 --- a/drivers/gpu/drm/drm_ioc32.c
30426 +++ b/drivers/gpu/drm/drm_ioc32.c
30427 @@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
30428 request = compat_alloc_user_space(nbytes);
30429 if (!access_ok(VERIFY_WRITE, request, nbytes))
30430 return -EFAULT;
30431 - list = (struct drm_buf_desc *) (request + 1);
30432 + list = (struct drm_buf_desc __user *) (request + 1);
30433
30434 if (__put_user(count, &request->count)
30435 || __put_user(list, &request->list))
30436 @@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
30437 request = compat_alloc_user_space(nbytes);
30438 if (!access_ok(VERIFY_WRITE, request, nbytes))
30439 return -EFAULT;
30440 - list = (struct drm_buf_pub *) (request + 1);
30441 + list = (struct drm_buf_pub __user *) (request + 1);
30442
30443 if (__put_user(count, &request->count)
30444 || __put_user(list, &request->list))
30445 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
30446 index 956fd38..e52167a 100644
30447 --- a/drivers/gpu/drm/drm_ioctl.c
30448 +++ b/drivers/gpu/drm/drm_ioctl.c
30449 @@ -251,7 +251,7 @@ int drm_getstats(struct drm_device *dev, void *data,
30450 stats->data[i].value =
30451 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
30452 else
30453 - stats->data[i].value = atomic_read(&dev->counts[i]);
30454 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
30455 stats->data[i].type = dev->types[i];
30456 }
30457
30458 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
30459 index c79c713..2048588 100644
30460 --- a/drivers/gpu/drm/drm_lock.c
30461 +++ b/drivers/gpu/drm/drm_lock.c
30462 @@ -90,7 +90,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
30463 if (drm_lock_take(&master->lock, lock->context)) {
30464 master->lock.file_priv = file_priv;
30465 master->lock.lock_time = jiffies;
30466 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
30467 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
30468 break; /* Got lock */
30469 }
30470
30471 @@ -161,7 +161,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
30472 return -EINVAL;
30473 }
30474
30475 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
30476 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
30477
30478 if (drm_lock_free(&master->lock, lock->context)) {
30479 /* FIXME: Should really bail out here. */
30480 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
30481 index 7f4b4e1..bf4def2 100644
30482 --- a/drivers/gpu/drm/i810/i810_dma.c
30483 +++ b/drivers/gpu/drm/i810/i810_dma.c
30484 @@ -948,8 +948,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
30485 dma->buflist[vertex->idx],
30486 vertex->discard, vertex->used);
30487
30488 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30489 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30490 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30491 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30492 sarea_priv->last_enqueue = dev_priv->counter - 1;
30493 sarea_priv->last_dispatch = (int)hw_status[5];
30494
30495 @@ -1109,8 +1109,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
30496 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
30497 mc->last_render);
30498
30499 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30500 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30501 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30502 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30503 sarea_priv->last_enqueue = dev_priv->counter - 1;
30504 sarea_priv->last_dispatch = (int)hw_status[5];
30505
30506 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
30507 index c9339f4..f5e1b9d 100644
30508 --- a/drivers/gpu/drm/i810/i810_drv.h
30509 +++ b/drivers/gpu/drm/i810/i810_drv.h
30510 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
30511 int page_flipping;
30512
30513 wait_queue_head_t irq_queue;
30514 - atomic_t irq_received;
30515 - atomic_t irq_emitted;
30516 + atomic_unchecked_t irq_received;
30517 + atomic_unchecked_t irq_emitted;
30518
30519 int front_offset;
30520 } drm_i810_private_t;
30521 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
30522 index deaa657..e0fd296 100644
30523 --- a/drivers/gpu/drm/i915/i915_debugfs.c
30524 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
30525 @@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
30526 I915_READ(GTIMR));
30527 }
30528 seq_printf(m, "Interrupts received: %d\n",
30529 - atomic_read(&dev_priv->irq_received));
30530 + atomic_read_unchecked(&dev_priv->irq_received));
30531 for (i = 0; i < I915_NUM_RINGS; i++) {
30532 if (IS_GEN6(dev) || IS_GEN7(dev)) {
30533 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
30534 @@ -1321,7 +1321,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
30535 return ret;
30536
30537 if (opregion->header)
30538 - seq_write(m, opregion->header, OPREGION_SIZE);
30539 + seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
30540
30541 mutex_unlock(&dev->struct_mutex);
30542
30543 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
30544 index ddfe3d9..f6e6b21 100644
30545 --- a/drivers/gpu/drm/i915/i915_dma.c
30546 +++ b/drivers/gpu/drm/i915/i915_dma.c
30547 @@ -1175,7 +1175,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
30548 bool can_switch;
30549
30550 spin_lock(&dev->count_lock);
30551 - can_switch = (dev->open_count == 0);
30552 + can_switch = (local_read(&dev->open_count) == 0);
30553 spin_unlock(&dev->count_lock);
30554 return can_switch;
30555 }
30556 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
30557 index 9689ca3..294f9c1 100644
30558 --- a/drivers/gpu/drm/i915/i915_drv.h
30559 +++ b/drivers/gpu/drm/i915/i915_drv.h
30560 @@ -231,7 +231,7 @@ struct drm_i915_display_funcs {
30561 /* render clock increase/decrease */
30562 /* display clock increase/decrease */
30563 /* pll clock increase/decrease */
30564 -};
30565 +} __no_const;
30566
30567 struct intel_device_info {
30568 u8 gen;
30569 @@ -320,7 +320,7 @@ typedef struct drm_i915_private {
30570 int current_page;
30571 int page_flipping;
30572
30573 - atomic_t irq_received;
30574 + atomic_unchecked_t irq_received;
30575
30576 /* protects the irq masks */
30577 spinlock_t irq_lock;
30578 @@ -896,7 +896,7 @@ struct drm_i915_gem_object {
30579 * will be page flipped away on the next vblank. When it
30580 * reaches 0, dev_priv->pending_flip_queue will be woken up.
30581 */
30582 - atomic_t pending_flip;
30583 + atomic_unchecked_t pending_flip;
30584 };
30585
30586 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
30587 @@ -1276,7 +1276,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
30588 extern void intel_teardown_gmbus(struct drm_device *dev);
30589 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
30590 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
30591 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
30592 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
30593 {
30594 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
30595 }
30596 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30597 index e159e33..cdcc663 100644
30598 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30599 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30600 @@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
30601 i915_gem_clflush_object(obj);
30602
30603 if (obj->base.pending_write_domain)
30604 - cd->flips |= atomic_read(&obj->pending_flip);
30605 + cd->flips |= atomic_read_unchecked(&obj->pending_flip);
30606
30607 /* The actual obj->write_domain will be updated with
30608 * pending_write_domain after we emit the accumulated flush for all
30609 @@ -882,9 +882,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
30610
30611 static int
30612 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
30613 - int count)
30614 + unsigned int count)
30615 {
30616 - int i;
30617 + unsigned int i;
30618
30619 for (i = 0; i < count; i++) {
30620 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
30621 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
30622 index 5bd4361..0241a42 100644
30623 --- a/drivers/gpu/drm/i915/i915_irq.c
30624 +++ b/drivers/gpu/drm/i915/i915_irq.c
30625 @@ -475,7 +475,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
30626 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
30627 struct drm_i915_master_private *master_priv;
30628
30629 - atomic_inc(&dev_priv->irq_received);
30630 + atomic_inc_unchecked(&dev_priv->irq_received);
30631
30632 /* disable master interrupt before clearing iir */
30633 de_ier = I915_READ(DEIER);
30634 @@ -566,7 +566,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
30635 struct drm_i915_master_private *master_priv;
30636 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
30637
30638 - atomic_inc(&dev_priv->irq_received);
30639 + atomic_inc_unchecked(&dev_priv->irq_received);
30640
30641 if (IS_GEN6(dev))
30642 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
30643 @@ -1231,7 +1231,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
30644 int ret = IRQ_NONE, pipe;
30645 bool blc_event = false;
30646
30647 - atomic_inc(&dev_priv->irq_received);
30648 + atomic_inc_unchecked(&dev_priv->irq_received);
30649
30650 iir = I915_READ(IIR);
30651
30652 @@ -1743,7 +1743,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
30653 {
30654 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30655
30656 - atomic_set(&dev_priv->irq_received, 0);
30657 + atomic_set_unchecked(&dev_priv->irq_received, 0);
30658
30659 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
30660 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
30661 @@ -1932,7 +1932,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
30662 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30663 int pipe;
30664
30665 - atomic_set(&dev_priv->irq_received, 0);
30666 + atomic_set_unchecked(&dev_priv->irq_received, 0);
30667
30668 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
30669 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
30670 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
30671 index 2163818..cede019 100644
30672 --- a/drivers/gpu/drm/i915/intel_display.c
30673 +++ b/drivers/gpu/drm/i915/intel_display.c
30674 @@ -2238,7 +2238,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
30675
30676 wait_event(dev_priv->pending_flip_queue,
30677 atomic_read(&dev_priv->mm.wedged) ||
30678 - atomic_read(&obj->pending_flip) == 0);
30679 + atomic_read_unchecked(&obj->pending_flip) == 0);
30680
30681 /* Big Hammer, we also need to ensure that any pending
30682 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
30683 @@ -2859,7 +2859,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
30684 obj = to_intel_framebuffer(crtc->fb)->obj;
30685 dev_priv = crtc->dev->dev_private;
30686 wait_event(dev_priv->pending_flip_queue,
30687 - atomic_read(&obj->pending_flip) == 0);
30688 + atomic_read_unchecked(&obj->pending_flip) == 0);
30689 }
30690
30691 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
30692 @@ -7171,7 +7171,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
30693
30694 atomic_clear_mask(1 << intel_crtc->plane,
30695 &obj->pending_flip.counter);
30696 - if (atomic_read(&obj->pending_flip) == 0)
30697 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
30698 wake_up(&dev_priv->pending_flip_queue);
30699
30700 schedule_work(&work->work);
30701 @@ -7354,7 +7354,13 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
30702 OUT_RING(fb->pitches[0] | obj->tiling_mode);
30703 OUT_RING(obj->gtt_offset);
30704
30705 - pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
30706 + /* Contrary to the suggestions in the documentation,
30707 + * "Enable Panel Fitter" does not seem to be required when page
30708 + * flipping with a non-native mode, and worse causes a normal
30709 + * modeset to fail.
30710 + * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
30711 + */
30712 + pf = 0;
30713 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
30714 OUT_RING(pf | pipesrc);
30715 ADVANCE_LP_RING();
30716 @@ -7461,7 +7467,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
30717 /* Block clients from rendering to the new back buffer until
30718 * the flip occurs and the object is no longer visible.
30719 */
30720 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30721 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30722
30723 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
30724 if (ret)
30725 @@ -7475,7 +7481,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
30726 return 0;
30727
30728 cleanup_pending:
30729 - atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30730 + atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30731 drm_gem_object_unreference(&work->old_fb_obj->base);
30732 drm_gem_object_unreference(&obj->base);
30733 mutex_unlock(&dev->struct_mutex);
30734 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
30735 index 54558a0..2d97005 100644
30736 --- a/drivers/gpu/drm/mga/mga_drv.h
30737 +++ b/drivers/gpu/drm/mga/mga_drv.h
30738 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
30739 u32 clear_cmd;
30740 u32 maccess;
30741
30742 - atomic_t vbl_received; /**< Number of vblanks received. */
30743 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
30744 wait_queue_head_t fence_queue;
30745 - atomic_t last_fence_retired;
30746 + atomic_unchecked_t last_fence_retired;
30747 u32 next_fence_to_post;
30748
30749 unsigned int fb_cpp;
30750 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
30751 index 2581202..f230a8d9 100644
30752 --- a/drivers/gpu/drm/mga/mga_irq.c
30753 +++ b/drivers/gpu/drm/mga/mga_irq.c
30754 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
30755 if (crtc != 0)
30756 return 0;
30757
30758 - return atomic_read(&dev_priv->vbl_received);
30759 + return atomic_read_unchecked(&dev_priv->vbl_received);
30760 }
30761
30762
30763 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
30764 /* VBLANK interrupt */
30765 if (status & MGA_VLINEPEN) {
30766 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
30767 - atomic_inc(&dev_priv->vbl_received);
30768 + atomic_inc_unchecked(&dev_priv->vbl_received);
30769 drm_handle_vblank(dev, 0);
30770 handled = 1;
30771 }
30772 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
30773 if ((prim_start & ~0x03) != (prim_end & ~0x03))
30774 MGA_WRITE(MGA_PRIMEND, prim_end);
30775
30776 - atomic_inc(&dev_priv->last_fence_retired);
30777 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
30778 DRM_WAKEUP(&dev_priv->fence_queue);
30779 handled = 1;
30780 }
30781 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
30782 * using fences.
30783 */
30784 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
30785 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
30786 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
30787 - *sequence) <= (1 << 23)));
30788
30789 *sequence = cur_fence;
30790 diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
30791 index e5cbead..6c354a3 100644
30792 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c
30793 +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
30794 @@ -199,7 +199,7 @@ struct methods {
30795 const char desc[8];
30796 void (*loadbios)(struct drm_device *, uint8_t *);
30797 const bool rw;
30798 -};
30799 +} __do_const;
30800
30801 static struct methods shadow_methods[] = {
30802 { "PRAMIN", load_vbios_pramin, true },
30803 @@ -5290,7 +5290,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
30804 struct bit_table {
30805 const char id;
30806 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
30807 -};
30808 +} __no_const;
30809
30810 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
30811
30812 diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
30813 index b827098..c31a797 100644
30814 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h
30815 +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
30816 @@ -242,7 +242,7 @@ struct nouveau_channel {
30817 struct list_head pending;
30818 uint32_t sequence;
30819 uint32_t sequence_ack;
30820 - atomic_t last_sequence_irq;
30821 + atomic_unchecked_t last_sequence_irq;
30822 struct nouveau_vma vma;
30823 } fence;
30824
30825 @@ -323,7 +323,7 @@ struct nouveau_exec_engine {
30826 u32 handle, u16 class);
30827 void (*set_tile_region)(struct drm_device *dev, int i);
30828 void (*tlb_flush)(struct drm_device *, int engine);
30829 -};
30830 +} __no_const;
30831
30832 struct nouveau_instmem_engine {
30833 void *priv;
30834 @@ -345,13 +345,13 @@ struct nouveau_instmem_engine {
30835 struct nouveau_mc_engine {
30836 int (*init)(struct drm_device *dev);
30837 void (*takedown)(struct drm_device *dev);
30838 -};
30839 +} __no_const;
30840
30841 struct nouveau_timer_engine {
30842 int (*init)(struct drm_device *dev);
30843 void (*takedown)(struct drm_device *dev);
30844 uint64_t (*read)(struct drm_device *dev);
30845 -};
30846 +} __no_const;
30847
30848 struct nouveau_fb_engine {
30849 int num_tiles;
30850 @@ -566,7 +566,7 @@ struct nouveau_vram_engine {
30851 void (*put)(struct drm_device *, struct nouveau_mem **);
30852
30853 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
30854 -};
30855 +} __no_const;
30856
30857 struct nouveau_engine {
30858 struct nouveau_instmem_engine instmem;
30859 @@ -714,7 +714,7 @@ struct drm_nouveau_private {
30860 struct drm_global_reference mem_global_ref;
30861 struct ttm_bo_global_ref bo_global_ref;
30862 struct ttm_bo_device bdev;
30863 - atomic_t validate_sequence;
30864 + atomic_unchecked_t validate_sequence;
30865 } ttm;
30866
30867 struct {
30868 diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
30869 index 2f6daae..c9d7b9e 100644
30870 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c
30871 +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
30872 @@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
30873 if (USE_REFCNT(dev))
30874 sequence = nvchan_rd32(chan, 0x48);
30875 else
30876 - sequence = atomic_read(&chan->fence.last_sequence_irq);
30877 + sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
30878
30879 if (chan->fence.sequence_ack == sequence)
30880 goto out;
30881 @@ -539,7 +539,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
30882 return ret;
30883 }
30884
30885 - atomic_set(&chan->fence.last_sequence_irq, 0);
30886 + atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
30887 return 0;
30888 }
30889
30890 diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
30891 index 7ce3fde..cb3ea04 100644
30892 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c
30893 +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
30894 @@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
30895 int trycnt = 0;
30896 int ret, i;
30897
30898 - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
30899 + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
30900 retry:
30901 if (++trycnt > 100000) {
30902 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
30903 diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
30904 index f80c5e0..936baa7 100644
30905 --- a/drivers/gpu/drm/nouveau/nouveau_state.c
30906 +++ b/drivers/gpu/drm/nouveau/nouveau_state.c
30907 @@ -543,7 +543,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
30908 bool can_switch;
30909
30910 spin_lock(&dev->count_lock);
30911 - can_switch = (dev->open_count == 0);
30912 + can_switch = (local_read(&dev->open_count) == 0);
30913 spin_unlock(&dev->count_lock);
30914 return can_switch;
30915 }
30916 diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
30917 index dbdea8e..cd6eeeb 100644
30918 --- a/drivers/gpu/drm/nouveau/nv04_graph.c
30919 +++ b/drivers/gpu/drm/nouveau/nv04_graph.c
30920 @@ -554,7 +554,7 @@ static int
30921 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
30922 u32 class, u32 mthd, u32 data)
30923 {
30924 - atomic_set(&chan->fence.last_sequence_irq, data);
30925 + atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
30926 return 0;
30927 }
30928
30929 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
30930 index bcac90b..53bfc76 100644
30931 --- a/drivers/gpu/drm/r128/r128_cce.c
30932 +++ b/drivers/gpu/drm/r128/r128_cce.c
30933 @@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
30934
30935 /* GH: Simple idle check.
30936 */
30937 - atomic_set(&dev_priv->idle_count, 0);
30938 + atomic_set_unchecked(&dev_priv->idle_count, 0);
30939
30940 /* We don't support anything other than bus-mastering ring mode,
30941 * but the ring can be in either AGP or PCI space for the ring
30942 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
30943 index 930c71b..499aded 100644
30944 --- a/drivers/gpu/drm/r128/r128_drv.h
30945 +++ b/drivers/gpu/drm/r128/r128_drv.h
30946 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
30947 int is_pci;
30948 unsigned long cce_buffers_offset;
30949
30950 - atomic_t idle_count;
30951 + atomic_unchecked_t idle_count;
30952
30953 int page_flipping;
30954 int current_page;
30955 u32 crtc_offset;
30956 u32 crtc_offset_cntl;
30957
30958 - atomic_t vbl_received;
30959 + atomic_unchecked_t vbl_received;
30960
30961 u32 color_fmt;
30962 unsigned int front_offset;
30963 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
30964 index 429d5a0..7e899ed 100644
30965 --- a/drivers/gpu/drm/r128/r128_irq.c
30966 +++ b/drivers/gpu/drm/r128/r128_irq.c
30967 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
30968 if (crtc != 0)
30969 return 0;
30970
30971 - return atomic_read(&dev_priv->vbl_received);
30972 + return atomic_read_unchecked(&dev_priv->vbl_received);
30973 }
30974
30975 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30976 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30977 /* VBLANK interrupt */
30978 if (status & R128_CRTC_VBLANK_INT) {
30979 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
30980 - atomic_inc(&dev_priv->vbl_received);
30981 + atomic_inc_unchecked(&dev_priv->vbl_received);
30982 drm_handle_vblank(dev, 0);
30983 return IRQ_HANDLED;
30984 }
30985 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
30986 index a9e33ce..09edd4b 100644
30987 --- a/drivers/gpu/drm/r128/r128_state.c
30988 +++ b/drivers/gpu/drm/r128/r128_state.c
30989 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
30990
30991 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
30992 {
30993 - if (atomic_read(&dev_priv->idle_count) == 0)
30994 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
30995 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
30996 else
30997 - atomic_set(&dev_priv->idle_count, 0);
30998 + atomic_set_unchecked(&dev_priv->idle_count, 0);
30999 }
31000
31001 #endif
31002 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
31003 index 5a82b6b..9e69c73 100644
31004 --- a/drivers/gpu/drm/radeon/mkregtable.c
31005 +++ b/drivers/gpu/drm/radeon/mkregtable.c
31006 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
31007 regex_t mask_rex;
31008 regmatch_t match[4];
31009 char buf[1024];
31010 - size_t end;
31011 + long end;
31012 int len;
31013 int done = 0;
31014 int r;
31015 unsigned o;
31016 struct offset *offset;
31017 char last_reg_s[10];
31018 - int last_reg;
31019 + unsigned long last_reg;
31020
31021 if (regcomp
31022 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
31023 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
31024 index 1668ec1..30ebdab 100644
31025 --- a/drivers/gpu/drm/radeon/radeon.h
31026 +++ b/drivers/gpu/drm/radeon/radeon.h
31027 @@ -250,7 +250,7 @@ struct radeon_fence_driver {
31028 uint32_t scratch_reg;
31029 uint64_t gpu_addr;
31030 volatile uint32_t *cpu_addr;
31031 - atomic_t seq;
31032 + atomic_unchecked_t seq;
31033 uint32_t last_seq;
31034 unsigned long last_jiffies;
31035 unsigned long last_timeout;
31036 @@ -752,7 +752,7 @@ struct r600_blit_cp_primitives {
31037 int x2, int y2);
31038 void (*draw_auto)(struct radeon_device *rdev);
31039 void (*set_default_state)(struct radeon_device *rdev);
31040 -};
31041 +} __no_const;
31042
31043 struct r600_blit {
31044 struct mutex mutex;
31045 @@ -1201,7 +1201,7 @@ struct radeon_asic {
31046 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
31047 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
31048 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
31049 -};
31050 +} __no_const;
31051
31052 /*
31053 * Asic structures
31054 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
31055 index 49f7cb7..2fcb48f 100644
31056 --- a/drivers/gpu/drm/radeon/radeon_device.c
31057 +++ b/drivers/gpu/drm/radeon/radeon_device.c
31058 @@ -687,7 +687,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
31059 bool can_switch;
31060
31061 spin_lock(&dev->count_lock);
31062 - can_switch = (dev->open_count == 0);
31063 + can_switch = (local_read(&dev->open_count) == 0);
31064 spin_unlock(&dev->count_lock);
31065 return can_switch;
31066 }
31067 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
31068 index a1b59ca..86f2d44 100644
31069 --- a/drivers/gpu/drm/radeon/radeon_drv.h
31070 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
31071 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
31072
31073 /* SW interrupt */
31074 wait_queue_head_t swi_queue;
31075 - atomic_t swi_emitted;
31076 + atomic_unchecked_t swi_emitted;
31077 int vblank_crtc;
31078 uint32_t irq_enable_reg;
31079 uint32_t r500_disp_irq_reg;
31080 diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
31081 index 4bd36a3..e66fe9c 100644
31082 --- a/drivers/gpu/drm/radeon/radeon_fence.c
31083 +++ b/drivers/gpu/drm/radeon/radeon_fence.c
31084 @@ -70,7 +70,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
31085 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
31086 return 0;
31087 }
31088 - fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
31089 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv[fence->ring].seq);
31090 if (!rdev->ring[fence->ring].ready)
31091 /* FIXME: cp is not running assume everythings is done right
31092 * away
31093 @@ -405,7 +405,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
31094 }
31095 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
31096 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
31097 - radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
31098 + radeon_fence_write(rdev, atomic_read_unchecked(&rdev->fence_drv[ring].seq), ring);
31099 rdev->fence_drv[ring].initialized = true;
31100 DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
31101 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
31102 @@ -418,7 +418,7 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
31103 rdev->fence_drv[ring].scratch_reg = -1;
31104 rdev->fence_drv[ring].cpu_addr = NULL;
31105 rdev->fence_drv[ring].gpu_addr = 0;
31106 - atomic_set(&rdev->fence_drv[ring].seq, 0);
31107 + atomic_set_unchecked(&rdev->fence_drv[ring].seq, 0);
31108 INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
31109 INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
31110 INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
31111 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
31112 index 48b7cea..342236f 100644
31113 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
31114 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
31115 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
31116 request = compat_alloc_user_space(sizeof(*request));
31117 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
31118 || __put_user(req32.param, &request->param)
31119 - || __put_user((void __user *)(unsigned long)req32.value,
31120 + || __put_user((unsigned long)req32.value,
31121 &request->value))
31122 return -EFAULT;
31123
31124 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
31125 index 00da384..32f972d 100644
31126 --- a/drivers/gpu/drm/radeon/radeon_irq.c
31127 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
31128 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
31129 unsigned int ret;
31130 RING_LOCALS;
31131
31132 - atomic_inc(&dev_priv->swi_emitted);
31133 - ret = atomic_read(&dev_priv->swi_emitted);
31134 + atomic_inc_unchecked(&dev_priv->swi_emitted);
31135 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
31136
31137 BEGIN_RING(4);
31138 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
31139 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
31140 drm_radeon_private_t *dev_priv =
31141 (drm_radeon_private_t *) dev->dev_private;
31142
31143 - atomic_set(&dev_priv->swi_emitted, 0);
31144 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
31145 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
31146
31147 dev->max_vblank_count = 0x001fffff;
31148 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
31149 index e8422ae..d22d4a8 100644
31150 --- a/drivers/gpu/drm/radeon/radeon_state.c
31151 +++ b/drivers/gpu/drm/radeon/radeon_state.c
31152 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
31153 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
31154 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
31155
31156 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
31157 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
31158 sarea_priv->nbox * sizeof(depth_boxes[0])))
31159 return -EFAULT;
31160
31161 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
31162 {
31163 drm_radeon_private_t *dev_priv = dev->dev_private;
31164 drm_radeon_getparam_t *param = data;
31165 - int value;
31166 + int value = 0;
31167
31168 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
31169
31170 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
31171 index c421e77..e6bf2e8 100644
31172 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
31173 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
31174 @@ -842,8 +842,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
31175 }
31176 if (unlikely(ttm_vm_ops == NULL)) {
31177 ttm_vm_ops = vma->vm_ops;
31178 - radeon_ttm_vm_ops = *ttm_vm_ops;
31179 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
31180 + pax_open_kernel();
31181 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
31182 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
31183 + pax_close_kernel();
31184 }
31185 vma->vm_ops = &radeon_ttm_vm_ops;
31186 return 0;
31187 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
31188 index f68dff2..8df955c 100644
31189 --- a/drivers/gpu/drm/radeon/rs690.c
31190 +++ b/drivers/gpu/drm/radeon/rs690.c
31191 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
31192 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
31193 rdev->pm.sideport_bandwidth.full)
31194 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
31195 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
31196 + read_delay_latency.full = dfixed_const(800 * 1000);
31197 read_delay_latency.full = dfixed_div(read_delay_latency,
31198 rdev->pm.igp_sideport_mclk);
31199 + a.full = dfixed_const(370);
31200 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
31201 } else {
31202 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
31203 rdev->pm.k8_bandwidth.full)
31204 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
31205 index 499debd..66fce72 100644
31206 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
31207 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
31208 @@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages(void)
31209 static int ttm_pool_mm_shrink(struct shrinker *shrink,
31210 struct shrink_control *sc)
31211 {
31212 - static atomic_t start_pool = ATOMIC_INIT(0);
31213 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
31214 unsigned i;
31215 - unsigned pool_offset = atomic_add_return(1, &start_pool);
31216 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
31217 struct ttm_page_pool *pool;
31218 int shrink_pages = sc->nr_to_scan;
31219
31220 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
31221 index 88edacc..1e5412b 100644
31222 --- a/drivers/gpu/drm/via/via_drv.h
31223 +++ b/drivers/gpu/drm/via/via_drv.h
31224 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
31225 typedef uint32_t maskarray_t[5];
31226
31227 typedef struct drm_via_irq {
31228 - atomic_t irq_received;
31229 + atomic_unchecked_t irq_received;
31230 uint32_t pending_mask;
31231 uint32_t enable_mask;
31232 wait_queue_head_t irq_queue;
31233 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
31234 struct timeval last_vblank;
31235 int last_vblank_valid;
31236 unsigned usec_per_vblank;
31237 - atomic_t vbl_received;
31238 + atomic_unchecked_t vbl_received;
31239 drm_via_state_t hc_state;
31240 char pci_buf[VIA_PCI_BUF_SIZE];
31241 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
31242 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
31243 index d391f48..10c8ca3 100644
31244 --- a/drivers/gpu/drm/via/via_irq.c
31245 +++ b/drivers/gpu/drm/via/via_irq.c
31246 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
31247 if (crtc != 0)
31248 return 0;
31249
31250 - return atomic_read(&dev_priv->vbl_received);
31251 + return atomic_read_unchecked(&dev_priv->vbl_received);
31252 }
31253
31254 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31255 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31256
31257 status = VIA_READ(VIA_REG_INTERRUPT);
31258 if (status & VIA_IRQ_VBLANK_PENDING) {
31259 - atomic_inc(&dev_priv->vbl_received);
31260 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
31261 + atomic_inc_unchecked(&dev_priv->vbl_received);
31262 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
31263 do_gettimeofday(&cur_vblank);
31264 if (dev_priv->last_vblank_valid) {
31265 dev_priv->usec_per_vblank =
31266 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31267 dev_priv->last_vblank = cur_vblank;
31268 dev_priv->last_vblank_valid = 1;
31269 }
31270 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
31271 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
31272 DRM_DEBUG("US per vblank is: %u\n",
31273 dev_priv->usec_per_vblank);
31274 }
31275 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31276
31277 for (i = 0; i < dev_priv->num_irqs; ++i) {
31278 if (status & cur_irq->pending_mask) {
31279 - atomic_inc(&cur_irq->irq_received);
31280 + atomic_inc_unchecked(&cur_irq->irq_received);
31281 DRM_WAKEUP(&cur_irq->irq_queue);
31282 handled = 1;
31283 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
31284 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
31285 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
31286 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
31287 masks[irq][4]));
31288 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
31289 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
31290 } else {
31291 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
31292 (((cur_irq_sequence =
31293 - atomic_read(&cur_irq->irq_received)) -
31294 + atomic_read_unchecked(&cur_irq->irq_received)) -
31295 *sequence) <= (1 << 23)));
31296 }
31297 *sequence = cur_irq_sequence;
31298 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
31299 }
31300
31301 for (i = 0; i < dev_priv->num_irqs; ++i) {
31302 - atomic_set(&cur_irq->irq_received, 0);
31303 + atomic_set_unchecked(&cur_irq->irq_received, 0);
31304 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
31305 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
31306 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
31307 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
31308 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
31309 case VIA_IRQ_RELATIVE:
31310 irqwait->request.sequence +=
31311 - atomic_read(&cur_irq->irq_received);
31312 + atomic_read_unchecked(&cur_irq->irq_received);
31313 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
31314 case VIA_IRQ_ABSOLUTE:
31315 break;
31316 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31317 index dc27970..f18b008 100644
31318 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31319 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31320 @@ -260,7 +260,7 @@ struct vmw_private {
31321 * Fencing and IRQs.
31322 */
31323
31324 - atomic_t marker_seq;
31325 + atomic_unchecked_t marker_seq;
31326 wait_queue_head_t fence_queue;
31327 wait_queue_head_t fifo_queue;
31328 int fence_queue_waiters; /* Protected by hw_mutex */
31329 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31330 index a0c2f12..68ae6cb 100644
31331 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31332 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31333 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
31334 (unsigned int) min,
31335 (unsigned int) fifo->capabilities);
31336
31337 - atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
31338 + atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
31339 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
31340 vmw_marker_queue_init(&fifo->marker_queue);
31341 return vmw_fifo_send_fence(dev_priv, &dummy);
31342 @@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
31343 if (reserveable)
31344 iowrite32(bytes, fifo_mem +
31345 SVGA_FIFO_RESERVED);
31346 - return fifo_mem + (next_cmd >> 2);
31347 + return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
31348 } else {
31349 need_bounce = true;
31350 }
31351 @@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
31352
31353 fm = vmw_fifo_reserve(dev_priv, bytes);
31354 if (unlikely(fm == NULL)) {
31355 - *seqno = atomic_read(&dev_priv->marker_seq);
31356 + *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
31357 ret = -ENOMEM;
31358 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
31359 false, 3*HZ);
31360 @@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
31361 }
31362
31363 do {
31364 - *seqno = atomic_add_return(1, &dev_priv->marker_seq);
31365 + *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
31366 } while (*seqno == 0);
31367
31368 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
31369 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31370 index cabc95f..14b3d77 100644
31371 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31372 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31373 @@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
31374 * emitted. Then the fence is stale and signaled.
31375 */
31376
31377 - ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
31378 + ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
31379 > VMW_FENCE_WRAP);
31380
31381 return ret;
31382 @@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
31383
31384 if (fifo_idle)
31385 down_read(&fifo_state->rwsem);
31386 - signal_seq = atomic_read(&dev_priv->marker_seq);
31387 + signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
31388 ret = 0;
31389
31390 for (;;) {
31391 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31392 index 8a8725c..afed796 100644
31393 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31394 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31395 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
31396 while (!vmw_lag_lt(queue, us)) {
31397 spin_lock(&queue->lock);
31398 if (list_empty(&queue->head))
31399 - seqno = atomic_read(&dev_priv->marker_seq);
31400 + seqno = atomic_read_unchecked(&dev_priv->marker_seq);
31401 else {
31402 marker = list_first_entry(&queue->head,
31403 struct vmw_marker, head);
31404 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
31405 index 75dbe34..f9204a8 100644
31406 --- a/drivers/hid/hid-core.c
31407 +++ b/drivers/hid/hid-core.c
31408 @@ -2021,7 +2021,7 @@ static bool hid_ignore(struct hid_device *hdev)
31409
31410 int hid_add_device(struct hid_device *hdev)
31411 {
31412 - static atomic_t id = ATOMIC_INIT(0);
31413 + static atomic_unchecked_t id = ATOMIC_INIT(0);
31414 int ret;
31415
31416 if (WARN_ON(hdev->status & HID_STAT_ADDED))
31417 @@ -2036,7 +2036,7 @@ int hid_add_device(struct hid_device *hdev)
31418 /* XXX hack, any other cleaner solution after the driver core
31419 * is converted to allow more than 20 bytes as the device name? */
31420 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
31421 - hdev->vendor, hdev->product, atomic_inc_return(&id));
31422 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
31423
31424 hid_debug_register(hdev, dev_name(&hdev->dev));
31425 ret = device_add(&hdev->dev);
31426 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
31427 index b1ec0e2..c295a61 100644
31428 --- a/drivers/hid/usbhid/hiddev.c
31429 +++ b/drivers/hid/usbhid/hiddev.c
31430 @@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
31431 break;
31432
31433 case HIDIOCAPPLICATION:
31434 - if (arg < 0 || arg >= hid->maxapplication)
31435 + if (arg >= hid->maxapplication)
31436 break;
31437
31438 for (i = 0; i < hid->maxcollection; i++)
31439 diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
31440 index 4065374..10ed7dc 100644
31441 --- a/drivers/hv/channel.c
31442 +++ b/drivers/hv/channel.c
31443 @@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
31444 int ret = 0;
31445 int t;
31446
31447 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
31448 - atomic_inc(&vmbus_connection.next_gpadl_handle);
31449 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
31450 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
31451
31452 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
31453 if (ret)
31454 diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
31455 index 12aa97f..c0679f7 100644
31456 --- a/drivers/hv/hv.c
31457 +++ b/drivers/hv/hv.c
31458 @@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
31459 u64 output_address = (output) ? virt_to_phys(output) : 0;
31460 u32 output_address_hi = output_address >> 32;
31461 u32 output_address_lo = output_address & 0xFFFFFFFF;
31462 - void *hypercall_page = hv_context.hypercall_page;
31463 + void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
31464
31465 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
31466 "=a"(hv_status_lo) : "d" (control_hi),
31467 diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
31468 index 6d7d286..92b0873 100644
31469 --- a/drivers/hv/hyperv_vmbus.h
31470 +++ b/drivers/hv/hyperv_vmbus.h
31471 @@ -556,7 +556,7 @@ enum vmbus_connect_state {
31472 struct vmbus_connection {
31473 enum vmbus_connect_state conn_state;
31474
31475 - atomic_t next_gpadl_handle;
31476 + atomic_unchecked_t next_gpadl_handle;
31477
31478 /*
31479 * Represents channel interrupts. Each bit position represents a
31480 diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
31481 index a220e57..428f54d 100644
31482 --- a/drivers/hv/vmbus_drv.c
31483 +++ b/drivers/hv/vmbus_drv.c
31484 @@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
31485 {
31486 int ret = 0;
31487
31488 - static atomic_t device_num = ATOMIC_INIT(0);
31489 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
31490
31491 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
31492 - atomic_inc_return(&device_num));
31493 + atomic_inc_return_unchecked(&device_num));
31494
31495 child_device_obj->device.bus = &hv_bus;
31496 child_device_obj->device.parent = &hv_acpi_dev->dev;
31497 diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
31498 index 554f046..f8b4729 100644
31499 --- a/drivers/hwmon/acpi_power_meter.c
31500 +++ b/drivers/hwmon/acpi_power_meter.c
31501 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
31502 return res;
31503
31504 temp /= 1000;
31505 - if (temp < 0)
31506 - return -EINVAL;
31507
31508 mutex_lock(&resource->lock);
31509 resource->trip[attr->index - 7] = temp;
31510 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
31511 index 91fdd1f..b66a686 100644
31512 --- a/drivers/hwmon/sht15.c
31513 +++ b/drivers/hwmon/sht15.c
31514 @@ -166,7 +166,7 @@ struct sht15_data {
31515 int supply_uV;
31516 bool supply_uV_valid;
31517 struct work_struct update_supply_work;
31518 - atomic_t interrupt_handled;
31519 + atomic_unchecked_t interrupt_handled;
31520 };
31521
31522 /**
31523 @@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
31524 return ret;
31525
31526 gpio_direction_input(data->pdata->gpio_data);
31527 - atomic_set(&data->interrupt_handled, 0);
31528 + atomic_set_unchecked(&data->interrupt_handled, 0);
31529
31530 enable_irq(gpio_to_irq(data->pdata->gpio_data));
31531 if (gpio_get_value(data->pdata->gpio_data) == 0) {
31532 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
31533 /* Only relevant if the interrupt hasn't occurred. */
31534 - if (!atomic_read(&data->interrupt_handled))
31535 + if (!atomic_read_unchecked(&data->interrupt_handled))
31536 schedule_work(&data->read_work);
31537 }
31538 ret = wait_event_timeout(data->wait_queue,
31539 @@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
31540
31541 /* First disable the interrupt */
31542 disable_irq_nosync(irq);
31543 - atomic_inc(&data->interrupt_handled);
31544 + atomic_inc_unchecked(&data->interrupt_handled);
31545 /* Then schedule a reading work struct */
31546 if (data->state != SHT15_READING_NOTHING)
31547 schedule_work(&data->read_work);
31548 @@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
31549 * If not, then start the interrupt again - care here as could
31550 * have gone low in meantime so verify it hasn't!
31551 */
31552 - atomic_set(&data->interrupt_handled, 0);
31553 + atomic_set_unchecked(&data->interrupt_handled, 0);
31554 enable_irq(gpio_to_irq(data->pdata->gpio_data));
31555 /* If still not occurred or another handler has been scheduled */
31556 if (gpio_get_value(data->pdata->gpio_data)
31557 - || atomic_read(&data->interrupt_handled))
31558 + || atomic_read_unchecked(&data->interrupt_handled))
31559 return;
31560 }
31561
31562 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
31563 index 378fcb5..5e91fa8 100644
31564 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
31565 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
31566 @@ -43,7 +43,7 @@
31567 extern struct i2c_adapter amd756_smbus;
31568
31569 static struct i2c_adapter *s4882_adapter;
31570 -static struct i2c_algorithm *s4882_algo;
31571 +static i2c_algorithm_no_const *s4882_algo;
31572
31573 /* Wrapper access functions for multiplexed SMBus */
31574 static DEFINE_MUTEX(amd756_lock);
31575 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
31576 index 29015eb..af2d8e9 100644
31577 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
31578 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
31579 @@ -41,7 +41,7 @@
31580 extern struct i2c_adapter *nforce2_smbus;
31581
31582 static struct i2c_adapter *s4985_adapter;
31583 -static struct i2c_algorithm *s4985_algo;
31584 +static i2c_algorithm_no_const *s4985_algo;
31585
31586 /* Wrapper access functions for multiplexed SMBus */
31587 static DEFINE_MUTEX(nforce2_lock);
31588 diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
31589 index d7a4833..7fae376 100644
31590 --- a/drivers/i2c/i2c-mux.c
31591 +++ b/drivers/i2c/i2c-mux.c
31592 @@ -28,7 +28,7 @@
31593 /* multiplexer per channel data */
31594 struct i2c_mux_priv {
31595 struct i2c_adapter adap;
31596 - struct i2c_algorithm algo;
31597 + i2c_algorithm_no_const algo;
31598
31599 struct i2c_adapter *parent;
31600 void *mux_dev; /* the mux chip/device */
31601 diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
31602 index 57d00ca..0145194 100644
31603 --- a/drivers/ide/aec62xx.c
31604 +++ b/drivers/ide/aec62xx.c
31605 @@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
31606 .cable_detect = atp86x_cable_detect,
31607 };
31608
31609 -static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
31610 +static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
31611 { /* 0: AEC6210 */
31612 .name = DRV_NAME,
31613 .init_chipset = init_chipset_aec62xx,
31614 diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
31615 index 2c8016a..911a27c 100644
31616 --- a/drivers/ide/alim15x3.c
31617 +++ b/drivers/ide/alim15x3.c
31618 @@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
31619 .dma_sff_read_status = ide_dma_sff_read_status,
31620 };
31621
31622 -static const struct ide_port_info ali15x3_chipset __devinitdata = {
31623 +static const struct ide_port_info ali15x3_chipset __devinitconst = {
31624 .name = DRV_NAME,
31625 .init_chipset = init_chipset_ali15x3,
31626 .init_hwif = init_hwif_ali15x3,
31627 diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
31628 index 3747b25..56fc995 100644
31629 --- a/drivers/ide/amd74xx.c
31630 +++ b/drivers/ide/amd74xx.c
31631 @@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
31632 .udma_mask = udma, \
31633 }
31634
31635 -static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
31636 +static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
31637 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
31638 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
31639 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
31640 diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
31641 index 15f0ead..cb43480 100644
31642 --- a/drivers/ide/atiixp.c
31643 +++ b/drivers/ide/atiixp.c
31644 @@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
31645 .cable_detect = atiixp_cable_detect,
31646 };
31647
31648 -static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
31649 +static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
31650 { /* 0: IXP200/300/400/700 */
31651 .name = DRV_NAME,
31652 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
31653 diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
31654 index 5f80312..d1fc438 100644
31655 --- a/drivers/ide/cmd64x.c
31656 +++ b/drivers/ide/cmd64x.c
31657 @@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
31658 .dma_sff_read_status = ide_dma_sff_read_status,
31659 };
31660
31661 -static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
31662 +static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
31663 { /* 0: CMD643 */
31664 .name = DRV_NAME,
31665 .init_chipset = init_chipset_cmd64x,
31666 diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
31667 index 2c1e5f7..1444762 100644
31668 --- a/drivers/ide/cs5520.c
31669 +++ b/drivers/ide/cs5520.c
31670 @@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
31671 .set_dma_mode = cs5520_set_dma_mode,
31672 };
31673
31674 -static const struct ide_port_info cyrix_chipset __devinitdata = {
31675 +static const struct ide_port_info cyrix_chipset __devinitconst = {
31676 .name = DRV_NAME,
31677 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
31678 .port_ops = &cs5520_port_ops,
31679 diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
31680 index 4dc4eb9..49b40ad 100644
31681 --- a/drivers/ide/cs5530.c
31682 +++ b/drivers/ide/cs5530.c
31683 @@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
31684 .udma_filter = cs5530_udma_filter,
31685 };
31686
31687 -static const struct ide_port_info cs5530_chipset __devinitdata = {
31688 +static const struct ide_port_info cs5530_chipset __devinitconst = {
31689 .name = DRV_NAME,
31690 .init_chipset = init_chipset_cs5530,
31691 .init_hwif = init_hwif_cs5530,
31692 diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
31693 index 5059faf..18d4c85 100644
31694 --- a/drivers/ide/cs5535.c
31695 +++ b/drivers/ide/cs5535.c
31696 @@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
31697 .cable_detect = cs5535_cable_detect,
31698 };
31699
31700 -static const struct ide_port_info cs5535_chipset __devinitdata = {
31701 +static const struct ide_port_info cs5535_chipset __devinitconst = {
31702 .name = DRV_NAME,
31703 .port_ops = &cs5535_port_ops,
31704 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
31705 diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
31706 index 847553f..3ffb49d 100644
31707 --- a/drivers/ide/cy82c693.c
31708 +++ b/drivers/ide/cy82c693.c
31709 @@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
31710 .set_dma_mode = cy82c693_set_dma_mode,
31711 };
31712
31713 -static const struct ide_port_info cy82c693_chipset __devinitdata = {
31714 +static const struct ide_port_info cy82c693_chipset __devinitconst = {
31715 .name = DRV_NAME,
31716 .init_iops = init_iops_cy82c693,
31717 .port_ops = &cy82c693_port_ops,
31718 diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
31719 index 58c51cd..4aec3b8 100644
31720 --- a/drivers/ide/hpt366.c
31721 +++ b/drivers/ide/hpt366.c
31722 @@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
31723 }
31724 };
31725
31726 -static const struct hpt_info hpt36x __devinitdata = {
31727 +static const struct hpt_info hpt36x __devinitconst = {
31728 .chip_name = "HPT36x",
31729 .chip_type = HPT36x,
31730 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
31731 @@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
31732 .timings = &hpt36x_timings
31733 };
31734
31735 -static const struct hpt_info hpt370 __devinitdata = {
31736 +static const struct hpt_info hpt370 __devinitconst = {
31737 .chip_name = "HPT370",
31738 .chip_type = HPT370,
31739 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
31740 @@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
31741 .timings = &hpt37x_timings
31742 };
31743
31744 -static const struct hpt_info hpt370a __devinitdata = {
31745 +static const struct hpt_info hpt370a __devinitconst = {
31746 .chip_name = "HPT370A",
31747 .chip_type = HPT370A,
31748 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
31749 @@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
31750 .timings = &hpt37x_timings
31751 };
31752
31753 -static const struct hpt_info hpt374 __devinitdata = {
31754 +static const struct hpt_info hpt374 __devinitconst = {
31755 .chip_name = "HPT374",
31756 .chip_type = HPT374,
31757 .udma_mask = ATA_UDMA5,
31758 @@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
31759 .timings = &hpt37x_timings
31760 };
31761
31762 -static const struct hpt_info hpt372 __devinitdata = {
31763 +static const struct hpt_info hpt372 __devinitconst = {
31764 .chip_name = "HPT372",
31765 .chip_type = HPT372,
31766 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31767 @@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
31768 .timings = &hpt37x_timings
31769 };
31770
31771 -static const struct hpt_info hpt372a __devinitdata = {
31772 +static const struct hpt_info hpt372a __devinitconst = {
31773 .chip_name = "HPT372A",
31774 .chip_type = HPT372A,
31775 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31776 @@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
31777 .timings = &hpt37x_timings
31778 };
31779
31780 -static const struct hpt_info hpt302 __devinitdata = {
31781 +static const struct hpt_info hpt302 __devinitconst = {
31782 .chip_name = "HPT302",
31783 .chip_type = HPT302,
31784 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31785 @@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
31786 .timings = &hpt37x_timings
31787 };
31788
31789 -static const struct hpt_info hpt371 __devinitdata = {
31790 +static const struct hpt_info hpt371 __devinitconst = {
31791 .chip_name = "HPT371",
31792 .chip_type = HPT371,
31793 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31794 @@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
31795 .timings = &hpt37x_timings
31796 };
31797
31798 -static const struct hpt_info hpt372n __devinitdata = {
31799 +static const struct hpt_info hpt372n __devinitconst = {
31800 .chip_name = "HPT372N",
31801 .chip_type = HPT372N,
31802 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31803 @@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
31804 .timings = &hpt37x_timings
31805 };
31806
31807 -static const struct hpt_info hpt302n __devinitdata = {
31808 +static const struct hpt_info hpt302n __devinitconst = {
31809 .chip_name = "HPT302N",
31810 .chip_type = HPT302N,
31811 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31812 @@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
31813 .timings = &hpt37x_timings
31814 };
31815
31816 -static const struct hpt_info hpt371n __devinitdata = {
31817 +static const struct hpt_info hpt371n __devinitconst = {
31818 .chip_name = "HPT371N",
31819 .chip_type = HPT371N,
31820 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31821 @@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
31822 .dma_sff_read_status = ide_dma_sff_read_status,
31823 };
31824
31825 -static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
31826 +static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
31827 { /* 0: HPT36x */
31828 .name = DRV_NAME,
31829 .init_chipset = init_chipset_hpt366,
31830 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
31831 index 8126824..55a2798 100644
31832 --- a/drivers/ide/ide-cd.c
31833 +++ b/drivers/ide/ide-cd.c
31834 @@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
31835 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
31836 if ((unsigned long)buf & alignment
31837 || blk_rq_bytes(rq) & q->dma_pad_mask
31838 - || object_is_on_stack(buf))
31839 + || object_starts_on_stack(buf))
31840 drive->dma = 0;
31841 }
31842 }
31843 diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
31844 index 7f56b73..dab5b67 100644
31845 --- a/drivers/ide/ide-pci-generic.c
31846 +++ b/drivers/ide/ide-pci-generic.c
31847 @@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
31848 .udma_mask = ATA_UDMA6, \
31849 }
31850
31851 -static const struct ide_port_info generic_chipsets[] __devinitdata = {
31852 +static const struct ide_port_info generic_chipsets[] __devinitconst = {
31853 /* 0: Unknown */
31854 DECLARE_GENERIC_PCI_DEV(0),
31855
31856 diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
31857 index 560e66d..d5dd180 100644
31858 --- a/drivers/ide/it8172.c
31859 +++ b/drivers/ide/it8172.c
31860 @@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
31861 .set_dma_mode = it8172_set_dma_mode,
31862 };
31863
31864 -static const struct ide_port_info it8172_port_info __devinitdata = {
31865 +static const struct ide_port_info it8172_port_info __devinitconst = {
31866 .name = DRV_NAME,
31867 .port_ops = &it8172_port_ops,
31868 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
31869 diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
31870 index 46816ba..1847aeb 100644
31871 --- a/drivers/ide/it8213.c
31872 +++ b/drivers/ide/it8213.c
31873 @@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
31874 .cable_detect = it8213_cable_detect,
31875 };
31876
31877 -static const struct ide_port_info it8213_chipset __devinitdata = {
31878 +static const struct ide_port_info it8213_chipset __devinitconst = {
31879 .name = DRV_NAME,
31880 .enablebits = { {0x41, 0x80, 0x80} },
31881 .port_ops = &it8213_port_ops,
31882 diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
31883 index 2e3169f..c5611db 100644
31884 --- a/drivers/ide/it821x.c
31885 +++ b/drivers/ide/it821x.c
31886 @@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
31887 .cable_detect = it821x_cable_detect,
31888 };
31889
31890 -static const struct ide_port_info it821x_chipset __devinitdata = {
31891 +static const struct ide_port_info it821x_chipset __devinitconst = {
31892 .name = DRV_NAME,
31893 .init_chipset = init_chipset_it821x,
31894 .init_hwif = init_hwif_it821x,
31895 diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
31896 index 74c2c4a..efddd7d 100644
31897 --- a/drivers/ide/jmicron.c
31898 +++ b/drivers/ide/jmicron.c
31899 @@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
31900 .cable_detect = jmicron_cable_detect,
31901 };
31902
31903 -static const struct ide_port_info jmicron_chipset __devinitdata = {
31904 +static const struct ide_port_info jmicron_chipset __devinitconst = {
31905 .name = DRV_NAME,
31906 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
31907 .port_ops = &jmicron_port_ops,
31908 diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
31909 index 95327a2..73f78d8 100644
31910 --- a/drivers/ide/ns87415.c
31911 +++ b/drivers/ide/ns87415.c
31912 @@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
31913 .dma_sff_read_status = superio_dma_sff_read_status,
31914 };
31915
31916 -static const struct ide_port_info ns87415_chipset __devinitdata = {
31917 +static const struct ide_port_info ns87415_chipset __devinitconst = {
31918 .name = DRV_NAME,
31919 .init_hwif = init_hwif_ns87415,
31920 .tp_ops = &ns87415_tp_ops,
31921 diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
31922 index 1a53a4c..39edc66 100644
31923 --- a/drivers/ide/opti621.c
31924 +++ b/drivers/ide/opti621.c
31925 @@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
31926 .set_pio_mode = opti621_set_pio_mode,
31927 };
31928
31929 -static const struct ide_port_info opti621_chipset __devinitdata = {
31930 +static const struct ide_port_info opti621_chipset __devinitconst = {
31931 .name = DRV_NAME,
31932 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
31933 .port_ops = &opti621_port_ops,
31934 diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
31935 index 9546fe2..2e5ceb6 100644
31936 --- a/drivers/ide/pdc202xx_new.c
31937 +++ b/drivers/ide/pdc202xx_new.c
31938 @@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
31939 .udma_mask = udma, \
31940 }
31941
31942 -static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
31943 +static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
31944 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
31945 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
31946 };
31947 diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
31948 index 3a35ec6..5634510 100644
31949 --- a/drivers/ide/pdc202xx_old.c
31950 +++ b/drivers/ide/pdc202xx_old.c
31951 @@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
31952 .max_sectors = sectors, \
31953 }
31954
31955 -static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
31956 +static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
31957 { /* 0: PDC20246 */
31958 .name = DRV_NAME,
31959 .init_chipset = init_chipset_pdc202xx,
31960 diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
31961 index 1892e81..fe0fd60 100644
31962 --- a/drivers/ide/piix.c
31963 +++ b/drivers/ide/piix.c
31964 @@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
31965 .udma_mask = udma, \
31966 }
31967
31968 -static const struct ide_port_info piix_pci_info[] __devinitdata = {
31969 +static const struct ide_port_info piix_pci_info[] __devinitconst = {
31970 /* 0: MPIIX */
31971 { /*
31972 * MPIIX actually has only a single IDE channel mapped to
31973 diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
31974 index a6414a8..c04173e 100644
31975 --- a/drivers/ide/rz1000.c
31976 +++ b/drivers/ide/rz1000.c
31977 @@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
31978 }
31979 }
31980
31981 -static const struct ide_port_info rz1000_chipset __devinitdata = {
31982 +static const struct ide_port_info rz1000_chipset __devinitconst = {
31983 .name = DRV_NAME,
31984 .host_flags = IDE_HFLAG_NO_DMA,
31985 };
31986 diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
31987 index 356b9b5..d4758eb 100644
31988 --- a/drivers/ide/sc1200.c
31989 +++ b/drivers/ide/sc1200.c
31990 @@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
31991 .dma_sff_read_status = ide_dma_sff_read_status,
31992 };
31993
31994 -static const struct ide_port_info sc1200_chipset __devinitdata = {
31995 +static const struct ide_port_info sc1200_chipset __devinitconst = {
31996 .name = DRV_NAME,
31997 .port_ops = &sc1200_port_ops,
31998 .dma_ops = &sc1200_dma_ops,
31999 diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
32000 index b7f5b0c..9701038 100644
32001 --- a/drivers/ide/scc_pata.c
32002 +++ b/drivers/ide/scc_pata.c
32003 @@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
32004 .dma_sff_read_status = scc_dma_sff_read_status,
32005 };
32006
32007 -static const struct ide_port_info scc_chipset __devinitdata = {
32008 +static const struct ide_port_info scc_chipset __devinitconst = {
32009 .name = "sccIDE",
32010 .init_iops = init_iops_scc,
32011 .init_dma = scc_init_dma,
32012 diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
32013 index 35fb8da..24d72ef 100644
32014 --- a/drivers/ide/serverworks.c
32015 +++ b/drivers/ide/serverworks.c
32016 @@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
32017 .cable_detect = svwks_cable_detect,
32018 };
32019
32020 -static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
32021 +static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
32022 { /* 0: OSB4 */
32023 .name = DRV_NAME,
32024 .init_chipset = init_chipset_svwks,
32025 diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
32026 index ddeda44..46f7e30 100644
32027 --- a/drivers/ide/siimage.c
32028 +++ b/drivers/ide/siimage.c
32029 @@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
32030 .udma_mask = ATA_UDMA6, \
32031 }
32032
32033 -static const struct ide_port_info siimage_chipsets[] __devinitdata = {
32034 +static const struct ide_port_info siimage_chipsets[] __devinitconst = {
32035 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
32036 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
32037 };
32038 diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
32039 index 4a00225..09e61b4 100644
32040 --- a/drivers/ide/sis5513.c
32041 +++ b/drivers/ide/sis5513.c
32042 @@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
32043 .cable_detect = sis_cable_detect,
32044 };
32045
32046 -static const struct ide_port_info sis5513_chipset __devinitdata = {
32047 +static const struct ide_port_info sis5513_chipset __devinitconst = {
32048 .name = DRV_NAME,
32049 .init_chipset = init_chipset_sis5513,
32050 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
32051 diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
32052 index f21dc2a..d051cd2 100644
32053 --- a/drivers/ide/sl82c105.c
32054 +++ b/drivers/ide/sl82c105.c
32055 @@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
32056 .dma_sff_read_status = ide_dma_sff_read_status,
32057 };
32058
32059 -static const struct ide_port_info sl82c105_chipset __devinitdata = {
32060 +static const struct ide_port_info sl82c105_chipset __devinitconst = {
32061 .name = DRV_NAME,
32062 .init_chipset = init_chipset_sl82c105,
32063 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
32064 diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
32065 index 864ffe0..863a5e9 100644
32066 --- a/drivers/ide/slc90e66.c
32067 +++ b/drivers/ide/slc90e66.c
32068 @@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
32069 .cable_detect = slc90e66_cable_detect,
32070 };
32071
32072 -static const struct ide_port_info slc90e66_chipset __devinitdata = {
32073 +static const struct ide_port_info slc90e66_chipset __devinitconst = {
32074 .name = DRV_NAME,
32075 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
32076 .port_ops = &slc90e66_port_ops,
32077 diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
32078 index 4799d5c..1794678 100644
32079 --- a/drivers/ide/tc86c001.c
32080 +++ b/drivers/ide/tc86c001.c
32081 @@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
32082 .dma_sff_read_status = ide_dma_sff_read_status,
32083 };
32084
32085 -static const struct ide_port_info tc86c001_chipset __devinitdata = {
32086 +static const struct ide_port_info tc86c001_chipset __devinitconst = {
32087 .name = DRV_NAME,
32088 .init_hwif = init_hwif_tc86c001,
32089 .port_ops = &tc86c001_port_ops,
32090 diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
32091 index 281c914..55ce1b8 100644
32092 --- a/drivers/ide/triflex.c
32093 +++ b/drivers/ide/triflex.c
32094 @@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
32095 .set_dma_mode = triflex_set_mode,
32096 };
32097
32098 -static const struct ide_port_info triflex_device __devinitdata = {
32099 +static const struct ide_port_info triflex_device __devinitconst = {
32100 .name = DRV_NAME,
32101 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
32102 .port_ops = &triflex_port_ops,
32103 diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
32104 index 4b42ca0..e494a98 100644
32105 --- a/drivers/ide/trm290.c
32106 +++ b/drivers/ide/trm290.c
32107 @@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
32108 .dma_check = trm290_dma_check,
32109 };
32110
32111 -static const struct ide_port_info trm290_chipset __devinitdata = {
32112 +static const struct ide_port_info trm290_chipset __devinitconst = {
32113 .name = DRV_NAME,
32114 .init_hwif = init_hwif_trm290,
32115 .tp_ops = &trm290_tp_ops,
32116 diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
32117 index f46f49c..eb77678 100644
32118 --- a/drivers/ide/via82cxxx.c
32119 +++ b/drivers/ide/via82cxxx.c
32120 @@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
32121 .cable_detect = via82cxxx_cable_detect,
32122 };
32123
32124 -static const struct ide_port_info via82cxxx_chipset __devinitdata = {
32125 +static const struct ide_port_info via82cxxx_chipset __devinitconst = {
32126 .name = DRV_NAME,
32127 .init_chipset = init_chipset_via82cxxx,
32128 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
32129 diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
32130 index 73d4531..c90cd2d 100644
32131 --- a/drivers/ieee802154/fakehard.c
32132 +++ b/drivers/ieee802154/fakehard.c
32133 @@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
32134 phy->transmit_power = 0xbf;
32135
32136 dev->netdev_ops = &fake_ops;
32137 - dev->ml_priv = &fake_mlme;
32138 + dev->ml_priv = (void *)&fake_mlme;
32139
32140 priv = netdev_priv(dev);
32141 priv->phy = phy;
32142 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
32143 index c889aae..6cf5aa7 100644
32144 --- a/drivers/infiniband/core/cm.c
32145 +++ b/drivers/infiniband/core/cm.c
32146 @@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
32147
32148 struct cm_counter_group {
32149 struct kobject obj;
32150 - atomic_long_t counter[CM_ATTR_COUNT];
32151 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
32152 };
32153
32154 struct cm_counter_attribute {
32155 @@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
32156 struct ib_mad_send_buf *msg = NULL;
32157 int ret;
32158
32159 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32160 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32161 counter[CM_REQ_COUNTER]);
32162
32163 /* Quick state check to discard duplicate REQs. */
32164 @@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
32165 if (!cm_id_priv)
32166 return;
32167
32168 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32169 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32170 counter[CM_REP_COUNTER]);
32171 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
32172 if (ret)
32173 @@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
32174 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
32175 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
32176 spin_unlock_irq(&cm_id_priv->lock);
32177 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32178 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32179 counter[CM_RTU_COUNTER]);
32180 goto out;
32181 }
32182 @@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
32183 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
32184 dreq_msg->local_comm_id);
32185 if (!cm_id_priv) {
32186 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32187 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32188 counter[CM_DREQ_COUNTER]);
32189 cm_issue_drep(work->port, work->mad_recv_wc);
32190 return -EINVAL;
32191 @@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
32192 case IB_CM_MRA_REP_RCVD:
32193 break;
32194 case IB_CM_TIMEWAIT:
32195 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32196 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32197 counter[CM_DREQ_COUNTER]);
32198 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
32199 goto unlock;
32200 @@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
32201 cm_free_msg(msg);
32202 goto deref;
32203 case IB_CM_DREQ_RCVD:
32204 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32205 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32206 counter[CM_DREQ_COUNTER]);
32207 goto unlock;
32208 default:
32209 @@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
32210 ib_modify_mad(cm_id_priv->av.port->mad_agent,
32211 cm_id_priv->msg, timeout)) {
32212 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
32213 - atomic_long_inc(&work->port->
32214 + atomic_long_inc_unchecked(&work->port->
32215 counter_group[CM_RECV_DUPLICATES].
32216 counter[CM_MRA_COUNTER]);
32217 goto out;
32218 @@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
32219 break;
32220 case IB_CM_MRA_REQ_RCVD:
32221 case IB_CM_MRA_REP_RCVD:
32222 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32223 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32224 counter[CM_MRA_COUNTER]);
32225 /* fall through */
32226 default:
32227 @@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
32228 case IB_CM_LAP_IDLE:
32229 break;
32230 case IB_CM_MRA_LAP_SENT:
32231 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32232 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32233 counter[CM_LAP_COUNTER]);
32234 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
32235 goto unlock;
32236 @@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
32237 cm_free_msg(msg);
32238 goto deref;
32239 case IB_CM_LAP_RCVD:
32240 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32241 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32242 counter[CM_LAP_COUNTER]);
32243 goto unlock;
32244 default:
32245 @@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
32246 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
32247 if (cur_cm_id_priv) {
32248 spin_unlock_irq(&cm.lock);
32249 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32250 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32251 counter[CM_SIDR_REQ_COUNTER]);
32252 goto out; /* Duplicate message. */
32253 }
32254 @@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
32255 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
32256 msg->retries = 1;
32257
32258 - atomic_long_add(1 + msg->retries,
32259 + atomic_long_add_unchecked(1 + msg->retries,
32260 &port->counter_group[CM_XMIT].counter[attr_index]);
32261 if (msg->retries)
32262 - atomic_long_add(msg->retries,
32263 + atomic_long_add_unchecked(msg->retries,
32264 &port->counter_group[CM_XMIT_RETRIES].
32265 counter[attr_index]);
32266
32267 @@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
32268 }
32269
32270 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
32271 - atomic_long_inc(&port->counter_group[CM_RECV].
32272 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
32273 counter[attr_id - CM_ATTR_ID_OFFSET]);
32274
32275 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
32276 @@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
32277 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
32278
32279 return sprintf(buf, "%ld\n",
32280 - atomic_long_read(&group->counter[cm_attr->index]));
32281 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
32282 }
32283
32284 static const struct sysfs_ops cm_counter_ops = {
32285 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
32286 index 176c8f9..2627b62 100644
32287 --- a/drivers/infiniband/core/fmr_pool.c
32288 +++ b/drivers/infiniband/core/fmr_pool.c
32289 @@ -98,8 +98,8 @@ struct ib_fmr_pool {
32290
32291 struct task_struct *thread;
32292
32293 - atomic_t req_ser;
32294 - atomic_t flush_ser;
32295 + atomic_unchecked_t req_ser;
32296 + atomic_unchecked_t flush_ser;
32297
32298 wait_queue_head_t force_wait;
32299 };
32300 @@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
32301 struct ib_fmr_pool *pool = pool_ptr;
32302
32303 do {
32304 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
32305 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
32306 ib_fmr_batch_release(pool);
32307
32308 - atomic_inc(&pool->flush_ser);
32309 + atomic_inc_unchecked(&pool->flush_ser);
32310 wake_up_interruptible(&pool->force_wait);
32311
32312 if (pool->flush_function)
32313 @@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
32314 }
32315
32316 set_current_state(TASK_INTERRUPTIBLE);
32317 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
32318 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
32319 !kthread_should_stop())
32320 schedule();
32321 __set_current_state(TASK_RUNNING);
32322 @@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
32323 pool->dirty_watermark = params->dirty_watermark;
32324 pool->dirty_len = 0;
32325 spin_lock_init(&pool->pool_lock);
32326 - atomic_set(&pool->req_ser, 0);
32327 - atomic_set(&pool->flush_ser, 0);
32328 + atomic_set_unchecked(&pool->req_ser, 0);
32329 + atomic_set_unchecked(&pool->flush_ser, 0);
32330 init_waitqueue_head(&pool->force_wait);
32331
32332 pool->thread = kthread_run(ib_fmr_cleanup_thread,
32333 @@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
32334 }
32335 spin_unlock_irq(&pool->pool_lock);
32336
32337 - serial = atomic_inc_return(&pool->req_ser);
32338 + serial = atomic_inc_return_unchecked(&pool->req_ser);
32339 wake_up_process(pool->thread);
32340
32341 if (wait_event_interruptible(pool->force_wait,
32342 - atomic_read(&pool->flush_ser) - serial >= 0))
32343 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
32344 return -EINTR;
32345
32346 return 0;
32347 @@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
32348 } else {
32349 list_add_tail(&fmr->list, &pool->dirty_list);
32350 if (++pool->dirty_len >= pool->dirty_watermark) {
32351 - atomic_inc(&pool->req_ser);
32352 + atomic_inc_unchecked(&pool->req_ser);
32353 wake_up_process(pool->thread);
32354 }
32355 }
32356 diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
32357 index 40c8353..946b0e4 100644
32358 --- a/drivers/infiniband/hw/cxgb4/mem.c
32359 +++ b/drivers/infiniband/hw/cxgb4/mem.c
32360 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
32361 int err;
32362 struct fw_ri_tpte tpt;
32363 u32 stag_idx;
32364 - static atomic_t key;
32365 + static atomic_unchecked_t key;
32366
32367 if (c4iw_fatal_error(rdev))
32368 return -EIO;
32369 @@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
32370 &rdev->resource.tpt_fifo_lock);
32371 if (!stag_idx)
32372 return -ENOMEM;
32373 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
32374 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
32375 }
32376 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
32377 __func__, stag_state, type, pdid, stag_idx);
32378 diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
32379 index a4de9d5..5fa20c3 100644
32380 --- a/drivers/infiniband/hw/ipath/ipath_fs.c
32381 +++ b/drivers/infiniband/hw/ipath/ipath_fs.c
32382 @@ -126,6 +126,8 @@ static const struct file_operations atomic_counters_ops = {
32383 };
32384
32385 static ssize_t flash_read(struct file *file, char __user *buf,
32386 + size_t count, loff_t *ppos) __size_overflow(3);
32387 +static ssize_t flash_read(struct file *file, char __user *buf,
32388 size_t count, loff_t *ppos)
32389 {
32390 struct ipath_devdata *dd;
32391 @@ -177,6 +179,8 @@ bail:
32392 }
32393
32394 static ssize_t flash_write(struct file *file, const char __user *buf,
32395 + size_t count, loff_t *ppos) __size_overflow(3);
32396 +static ssize_t flash_write(struct file *file, const char __user *buf,
32397 size_t count, loff_t *ppos)
32398 {
32399 struct ipath_devdata *dd;
32400 diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
32401 index 79b3dbc..96e5fcc 100644
32402 --- a/drivers/infiniband/hw/ipath/ipath_rc.c
32403 +++ b/drivers/infiniband/hw/ipath/ipath_rc.c
32404 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
32405 struct ib_atomic_eth *ateth;
32406 struct ipath_ack_entry *e;
32407 u64 vaddr;
32408 - atomic64_t *maddr;
32409 + atomic64_unchecked_t *maddr;
32410 u64 sdata;
32411 u32 rkey;
32412 u8 next;
32413 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
32414 IB_ACCESS_REMOTE_ATOMIC)))
32415 goto nack_acc_unlck;
32416 /* Perform atomic OP and save result. */
32417 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
32418 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
32419 sdata = be64_to_cpu(ateth->swap_data);
32420 e = &qp->s_ack_queue[qp->r_head_ack_queue];
32421 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
32422 - (u64) atomic64_add_return(sdata, maddr) - sdata :
32423 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
32424 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
32425 be64_to_cpu(ateth->compare_data),
32426 sdata);
32427 diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
32428 index 1f95bba..9530f87 100644
32429 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c
32430 +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
32431 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
32432 unsigned long flags;
32433 struct ib_wc wc;
32434 u64 sdata;
32435 - atomic64_t *maddr;
32436 + atomic64_unchecked_t *maddr;
32437 enum ib_wc_status send_status;
32438
32439 /*
32440 @@ -382,11 +382,11 @@ again:
32441 IB_ACCESS_REMOTE_ATOMIC)))
32442 goto acc_err;
32443 /* Perform atomic OP and save result. */
32444 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
32445 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
32446 sdata = wqe->wr.wr.atomic.compare_add;
32447 *(u64 *) sqp->s_sge.sge.vaddr =
32448 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
32449 - (u64) atomic64_add_return(sdata, maddr) - sdata :
32450 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
32451 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
32452 sdata, wqe->wr.wr.atomic.swap);
32453 goto send_comp;
32454 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
32455 index 7140199..da60063 100644
32456 --- a/drivers/infiniband/hw/nes/nes.c
32457 +++ b/drivers/infiniband/hw/nes/nes.c
32458 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
32459 LIST_HEAD(nes_adapter_list);
32460 static LIST_HEAD(nes_dev_list);
32461
32462 -atomic_t qps_destroyed;
32463 +atomic_unchecked_t qps_destroyed;
32464
32465 static unsigned int ee_flsh_adapter;
32466 static unsigned int sysfs_nonidx_addr;
32467 @@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
32468 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
32469 struct nes_adapter *nesadapter = nesdev->nesadapter;
32470
32471 - atomic_inc(&qps_destroyed);
32472 + atomic_inc_unchecked(&qps_destroyed);
32473
32474 /* Free the control structures */
32475
32476 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
32477 index c438e46..ca30356 100644
32478 --- a/drivers/infiniband/hw/nes/nes.h
32479 +++ b/drivers/infiniband/hw/nes/nes.h
32480 @@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
32481 extern unsigned int wqm_quanta;
32482 extern struct list_head nes_adapter_list;
32483
32484 -extern atomic_t cm_connects;
32485 -extern atomic_t cm_accepts;
32486 -extern atomic_t cm_disconnects;
32487 -extern atomic_t cm_closes;
32488 -extern atomic_t cm_connecteds;
32489 -extern atomic_t cm_connect_reqs;
32490 -extern atomic_t cm_rejects;
32491 -extern atomic_t mod_qp_timouts;
32492 -extern atomic_t qps_created;
32493 -extern atomic_t qps_destroyed;
32494 -extern atomic_t sw_qps_destroyed;
32495 +extern atomic_unchecked_t cm_connects;
32496 +extern atomic_unchecked_t cm_accepts;
32497 +extern atomic_unchecked_t cm_disconnects;
32498 +extern atomic_unchecked_t cm_closes;
32499 +extern atomic_unchecked_t cm_connecteds;
32500 +extern atomic_unchecked_t cm_connect_reqs;
32501 +extern atomic_unchecked_t cm_rejects;
32502 +extern atomic_unchecked_t mod_qp_timouts;
32503 +extern atomic_unchecked_t qps_created;
32504 +extern atomic_unchecked_t qps_destroyed;
32505 +extern atomic_unchecked_t sw_qps_destroyed;
32506 extern u32 mh_detected;
32507 extern u32 mh_pauses_sent;
32508 extern u32 cm_packets_sent;
32509 @@ -197,16 +197,16 @@ extern u32 cm_packets_created;
32510 extern u32 cm_packets_received;
32511 extern u32 cm_packets_dropped;
32512 extern u32 cm_packets_retrans;
32513 -extern atomic_t cm_listens_created;
32514 -extern atomic_t cm_listens_destroyed;
32515 +extern atomic_unchecked_t cm_listens_created;
32516 +extern atomic_unchecked_t cm_listens_destroyed;
32517 extern u32 cm_backlog_drops;
32518 -extern atomic_t cm_loopbacks;
32519 -extern atomic_t cm_nodes_created;
32520 -extern atomic_t cm_nodes_destroyed;
32521 -extern atomic_t cm_accel_dropped_pkts;
32522 -extern atomic_t cm_resets_recvd;
32523 -extern atomic_t pau_qps_created;
32524 -extern atomic_t pau_qps_destroyed;
32525 +extern atomic_unchecked_t cm_loopbacks;
32526 +extern atomic_unchecked_t cm_nodes_created;
32527 +extern atomic_unchecked_t cm_nodes_destroyed;
32528 +extern atomic_unchecked_t cm_accel_dropped_pkts;
32529 +extern atomic_unchecked_t cm_resets_recvd;
32530 +extern atomic_unchecked_t pau_qps_created;
32531 +extern atomic_unchecked_t pau_qps_destroyed;
32532
32533 extern u32 int_mod_timer_init;
32534 extern u32 int_mod_cq_depth_256;
32535 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
32536 index a4972ab..1bcfc31 100644
32537 --- a/drivers/infiniband/hw/nes/nes_cm.c
32538 +++ b/drivers/infiniband/hw/nes/nes_cm.c
32539 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
32540 u32 cm_packets_retrans;
32541 u32 cm_packets_created;
32542 u32 cm_packets_received;
32543 -atomic_t cm_listens_created;
32544 -atomic_t cm_listens_destroyed;
32545 +atomic_unchecked_t cm_listens_created;
32546 +atomic_unchecked_t cm_listens_destroyed;
32547 u32 cm_backlog_drops;
32548 -atomic_t cm_loopbacks;
32549 -atomic_t cm_nodes_created;
32550 -atomic_t cm_nodes_destroyed;
32551 -atomic_t cm_accel_dropped_pkts;
32552 -atomic_t cm_resets_recvd;
32553 +atomic_unchecked_t cm_loopbacks;
32554 +atomic_unchecked_t cm_nodes_created;
32555 +atomic_unchecked_t cm_nodes_destroyed;
32556 +atomic_unchecked_t cm_accel_dropped_pkts;
32557 +atomic_unchecked_t cm_resets_recvd;
32558
32559 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
32560 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
32561 @@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
32562
32563 static struct nes_cm_core *g_cm_core;
32564
32565 -atomic_t cm_connects;
32566 -atomic_t cm_accepts;
32567 -atomic_t cm_disconnects;
32568 -atomic_t cm_closes;
32569 -atomic_t cm_connecteds;
32570 -atomic_t cm_connect_reqs;
32571 -atomic_t cm_rejects;
32572 +atomic_unchecked_t cm_connects;
32573 +atomic_unchecked_t cm_accepts;
32574 +atomic_unchecked_t cm_disconnects;
32575 +atomic_unchecked_t cm_closes;
32576 +atomic_unchecked_t cm_connecteds;
32577 +atomic_unchecked_t cm_connect_reqs;
32578 +atomic_unchecked_t cm_rejects;
32579
32580 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
32581 {
32582 @@ -1274,7 +1274,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
32583 kfree(listener);
32584 listener = NULL;
32585 ret = 0;
32586 - atomic_inc(&cm_listens_destroyed);
32587 + atomic_inc_unchecked(&cm_listens_destroyed);
32588 } else {
32589 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
32590 }
32591 @@ -1473,7 +1473,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
32592 cm_node->rem_mac);
32593
32594 add_hte_node(cm_core, cm_node);
32595 - atomic_inc(&cm_nodes_created);
32596 + atomic_inc_unchecked(&cm_nodes_created);
32597
32598 return cm_node;
32599 }
32600 @@ -1531,7 +1531,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
32601 }
32602
32603 atomic_dec(&cm_core->node_cnt);
32604 - atomic_inc(&cm_nodes_destroyed);
32605 + atomic_inc_unchecked(&cm_nodes_destroyed);
32606 nesqp = cm_node->nesqp;
32607 if (nesqp) {
32608 nesqp->cm_node = NULL;
32609 @@ -1595,7 +1595,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
32610
32611 static void drop_packet(struct sk_buff *skb)
32612 {
32613 - atomic_inc(&cm_accel_dropped_pkts);
32614 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
32615 dev_kfree_skb_any(skb);
32616 }
32617
32618 @@ -1658,7 +1658,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
32619 {
32620
32621 int reset = 0; /* whether to send reset in case of err.. */
32622 - atomic_inc(&cm_resets_recvd);
32623 + atomic_inc_unchecked(&cm_resets_recvd);
32624 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
32625 " refcnt=%d\n", cm_node, cm_node->state,
32626 atomic_read(&cm_node->ref_count));
32627 @@ -2299,7 +2299,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
32628 rem_ref_cm_node(cm_node->cm_core, cm_node);
32629 return NULL;
32630 }
32631 - atomic_inc(&cm_loopbacks);
32632 + atomic_inc_unchecked(&cm_loopbacks);
32633 loopbackremotenode->loopbackpartner = cm_node;
32634 loopbackremotenode->tcp_cntxt.rcv_wscale =
32635 NES_CM_DEFAULT_RCV_WND_SCALE;
32636 @@ -2574,7 +2574,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
32637 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
32638 else {
32639 rem_ref_cm_node(cm_core, cm_node);
32640 - atomic_inc(&cm_accel_dropped_pkts);
32641 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
32642 dev_kfree_skb_any(skb);
32643 }
32644 break;
32645 @@ -2881,7 +2881,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
32646
32647 if ((cm_id) && (cm_id->event_handler)) {
32648 if (issue_disconn) {
32649 - atomic_inc(&cm_disconnects);
32650 + atomic_inc_unchecked(&cm_disconnects);
32651 cm_event.event = IW_CM_EVENT_DISCONNECT;
32652 cm_event.status = disconn_status;
32653 cm_event.local_addr = cm_id->local_addr;
32654 @@ -2903,7 +2903,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
32655 }
32656
32657 if (issue_close) {
32658 - atomic_inc(&cm_closes);
32659 + atomic_inc_unchecked(&cm_closes);
32660 nes_disconnect(nesqp, 1);
32661
32662 cm_id->provider_data = nesqp;
32663 @@ -3039,7 +3039,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
32664
32665 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
32666 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
32667 - atomic_inc(&cm_accepts);
32668 + atomic_inc_unchecked(&cm_accepts);
32669
32670 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
32671 netdev_refcnt_read(nesvnic->netdev));
32672 @@ -3241,7 +3241,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
32673 struct nes_cm_core *cm_core;
32674 u8 *start_buff;
32675
32676 - atomic_inc(&cm_rejects);
32677 + atomic_inc_unchecked(&cm_rejects);
32678 cm_node = (struct nes_cm_node *)cm_id->provider_data;
32679 loopback = cm_node->loopbackpartner;
32680 cm_core = cm_node->cm_core;
32681 @@ -3301,7 +3301,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
32682 ntohl(cm_id->local_addr.sin_addr.s_addr),
32683 ntohs(cm_id->local_addr.sin_port));
32684
32685 - atomic_inc(&cm_connects);
32686 + atomic_inc_unchecked(&cm_connects);
32687 nesqp->active_conn = 1;
32688
32689 /* cache the cm_id in the qp */
32690 @@ -3407,7 +3407,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
32691 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
32692 return err;
32693 }
32694 - atomic_inc(&cm_listens_created);
32695 + atomic_inc_unchecked(&cm_listens_created);
32696 }
32697
32698 cm_id->add_ref(cm_id);
32699 @@ -3508,7 +3508,7 @@ static void cm_event_connected(struct nes_cm_event *event)
32700
32701 if (nesqp->destroyed)
32702 return;
32703 - atomic_inc(&cm_connecteds);
32704 + atomic_inc_unchecked(&cm_connecteds);
32705 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
32706 " local port 0x%04X. jiffies = %lu.\n",
32707 nesqp->hwqp.qp_id,
32708 @@ -3695,7 +3695,7 @@ static void cm_event_reset(struct nes_cm_event *event)
32709
32710 cm_id->add_ref(cm_id);
32711 ret = cm_id->event_handler(cm_id, &cm_event);
32712 - atomic_inc(&cm_closes);
32713 + atomic_inc_unchecked(&cm_closes);
32714 cm_event.event = IW_CM_EVENT_CLOSE;
32715 cm_event.status = 0;
32716 cm_event.provider_data = cm_id->provider_data;
32717 @@ -3731,7 +3731,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
32718 return;
32719 cm_id = cm_node->cm_id;
32720
32721 - atomic_inc(&cm_connect_reqs);
32722 + atomic_inc_unchecked(&cm_connect_reqs);
32723 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
32724 cm_node, cm_id, jiffies);
32725
32726 @@ -3771,7 +3771,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
32727 return;
32728 cm_id = cm_node->cm_id;
32729
32730 - atomic_inc(&cm_connect_reqs);
32731 + atomic_inc_unchecked(&cm_connect_reqs);
32732 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
32733 cm_node, cm_id, jiffies);
32734
32735 diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
32736 index 3ba7be3..c81f6ff 100644
32737 --- a/drivers/infiniband/hw/nes/nes_mgt.c
32738 +++ b/drivers/infiniband/hw/nes/nes_mgt.c
32739 @@ -40,8 +40,8 @@
32740 #include "nes.h"
32741 #include "nes_mgt.h"
32742
32743 -atomic_t pau_qps_created;
32744 -atomic_t pau_qps_destroyed;
32745 +atomic_unchecked_t pau_qps_created;
32746 +atomic_unchecked_t pau_qps_destroyed;
32747
32748 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
32749 {
32750 @@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
32751 {
32752 struct sk_buff *skb;
32753 unsigned long flags;
32754 - atomic_inc(&pau_qps_destroyed);
32755 + atomic_inc_unchecked(&pau_qps_destroyed);
32756
32757 /* Free packets that have not yet been forwarded */
32758 /* Lock is acquired by skb_dequeue when removing the skb */
32759 @@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
32760 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
32761 skb_queue_head_init(&nesqp->pau_list);
32762 spin_lock_init(&nesqp->pau_lock);
32763 - atomic_inc(&pau_qps_created);
32764 + atomic_inc_unchecked(&pau_qps_created);
32765 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
32766 }
32767
32768 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
32769 index f3a3ecf..57d311d 100644
32770 --- a/drivers/infiniband/hw/nes/nes_nic.c
32771 +++ b/drivers/infiniband/hw/nes/nes_nic.c
32772 @@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
32773 target_stat_values[++index] = mh_detected;
32774 target_stat_values[++index] = mh_pauses_sent;
32775 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
32776 - target_stat_values[++index] = atomic_read(&cm_connects);
32777 - target_stat_values[++index] = atomic_read(&cm_accepts);
32778 - target_stat_values[++index] = atomic_read(&cm_disconnects);
32779 - target_stat_values[++index] = atomic_read(&cm_connecteds);
32780 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
32781 - target_stat_values[++index] = atomic_read(&cm_rejects);
32782 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
32783 - target_stat_values[++index] = atomic_read(&qps_created);
32784 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
32785 - target_stat_values[++index] = atomic_read(&qps_destroyed);
32786 - target_stat_values[++index] = atomic_read(&cm_closes);
32787 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
32788 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
32789 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
32790 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
32791 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
32792 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
32793 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
32794 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
32795 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
32796 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
32797 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
32798 target_stat_values[++index] = cm_packets_sent;
32799 target_stat_values[++index] = cm_packets_bounced;
32800 target_stat_values[++index] = cm_packets_created;
32801 target_stat_values[++index] = cm_packets_received;
32802 target_stat_values[++index] = cm_packets_dropped;
32803 target_stat_values[++index] = cm_packets_retrans;
32804 - target_stat_values[++index] = atomic_read(&cm_listens_created);
32805 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
32806 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
32807 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
32808 target_stat_values[++index] = cm_backlog_drops;
32809 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
32810 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
32811 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
32812 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
32813 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
32814 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
32815 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
32816 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
32817 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
32818 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
32819 target_stat_values[++index] = nesadapter->free_4kpbl;
32820 target_stat_values[++index] = nesadapter->free_256pbl;
32821 target_stat_values[++index] = int_mod_timer_init;
32822 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
32823 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
32824 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
32825 - target_stat_values[++index] = atomic_read(&pau_qps_created);
32826 - target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
32827 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
32828 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
32829 }
32830
32831 /**
32832 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
32833 index 0927b5c..ed67986 100644
32834 --- a/drivers/infiniband/hw/nes/nes_verbs.c
32835 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
32836 @@ -46,9 +46,9 @@
32837
32838 #include <rdma/ib_umem.h>
32839
32840 -atomic_t mod_qp_timouts;
32841 -atomic_t qps_created;
32842 -atomic_t sw_qps_destroyed;
32843 +atomic_unchecked_t mod_qp_timouts;
32844 +atomic_unchecked_t qps_created;
32845 +atomic_unchecked_t sw_qps_destroyed;
32846
32847 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
32848
32849 @@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
32850 if (init_attr->create_flags)
32851 return ERR_PTR(-EINVAL);
32852
32853 - atomic_inc(&qps_created);
32854 + atomic_inc_unchecked(&qps_created);
32855 switch (init_attr->qp_type) {
32856 case IB_QPT_RC:
32857 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
32858 @@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
32859 struct iw_cm_event cm_event;
32860 int ret = 0;
32861
32862 - atomic_inc(&sw_qps_destroyed);
32863 + atomic_inc_unchecked(&sw_qps_destroyed);
32864 nesqp->destroyed = 1;
32865
32866 /* Blow away the connection if it exists. */
32867 diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
32868 index b881bdc..c2e360c 100644
32869 --- a/drivers/infiniband/hw/qib/qib.h
32870 +++ b/drivers/infiniband/hw/qib/qib.h
32871 @@ -51,6 +51,7 @@
32872 #include <linux/completion.h>
32873 #include <linux/kref.h>
32874 #include <linux/sched.h>
32875 +#include <linux/slab.h>
32876
32877 #include "qib_common.h"
32878 #include "qib_verbs.h"
32879 diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
32880 index 05e0f17..0275789 100644
32881 --- a/drivers/infiniband/hw/qib/qib_fs.c
32882 +++ b/drivers/infiniband/hw/qib/qib_fs.c
32883 @@ -267,6 +267,8 @@ static const struct file_operations qsfp_ops[] = {
32884 };
32885
32886 static ssize_t flash_read(struct file *file, char __user *buf,
32887 + size_t count, loff_t *ppos) __size_overflow(3);
32888 +static ssize_t flash_read(struct file *file, char __user *buf,
32889 size_t count, loff_t *ppos)
32890 {
32891 struct qib_devdata *dd;
32892 @@ -318,6 +320,8 @@ bail:
32893 }
32894
32895 static ssize_t flash_write(struct file *file, const char __user *buf,
32896 + size_t count, loff_t *ppos) __size_overflow(3);
32897 +static ssize_t flash_write(struct file *file, const char __user *buf,
32898 size_t count, loff_t *ppos)
32899 {
32900 struct qib_devdata *dd;
32901 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
32902 index c351aa4..e6967c2 100644
32903 --- a/drivers/input/gameport/gameport.c
32904 +++ b/drivers/input/gameport/gameport.c
32905 @@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
32906 */
32907 static void gameport_init_port(struct gameport *gameport)
32908 {
32909 - static atomic_t gameport_no = ATOMIC_INIT(0);
32910 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
32911
32912 __module_get(THIS_MODULE);
32913
32914 mutex_init(&gameport->drv_mutex);
32915 device_initialize(&gameport->dev);
32916 dev_set_name(&gameport->dev, "gameport%lu",
32917 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
32918 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
32919 gameport->dev.bus = &gameport_bus;
32920 gameport->dev.release = gameport_release_port;
32921 if (gameport->parent)
32922 diff --git a/drivers/input/input.c b/drivers/input/input.c
32923 index 1f78c95..3cddc6c 100644
32924 --- a/drivers/input/input.c
32925 +++ b/drivers/input/input.c
32926 @@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
32927 */
32928 int input_register_device(struct input_dev *dev)
32929 {
32930 - static atomic_t input_no = ATOMIC_INIT(0);
32931 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
32932 struct input_handler *handler;
32933 const char *path;
32934 int error;
32935 @@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
32936 dev->setkeycode = input_default_setkeycode;
32937
32938 dev_set_name(&dev->dev, "input%ld",
32939 - (unsigned long) atomic_inc_return(&input_no) - 1);
32940 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
32941
32942 error = device_add(&dev->dev);
32943 if (error)
32944 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
32945 index b8d8611..7a4a04b 100644
32946 --- a/drivers/input/joystick/sidewinder.c
32947 +++ b/drivers/input/joystick/sidewinder.c
32948 @@ -30,6 +30,7 @@
32949 #include <linux/kernel.h>
32950 #include <linux/module.h>
32951 #include <linux/slab.h>
32952 +#include <linux/sched.h>
32953 #include <linux/init.h>
32954 #include <linux/input.h>
32955 #include <linux/gameport.h>
32956 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
32957 index fd7a0d5..a4af10c 100644
32958 --- a/drivers/input/joystick/xpad.c
32959 +++ b/drivers/input/joystick/xpad.c
32960 @@ -710,7 +710,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
32961
32962 static int xpad_led_probe(struct usb_xpad *xpad)
32963 {
32964 - static atomic_t led_seq = ATOMIC_INIT(0);
32965 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
32966 long led_no;
32967 struct xpad_led *led;
32968 struct led_classdev *led_cdev;
32969 @@ -723,7 +723,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
32970 if (!led)
32971 return -ENOMEM;
32972
32973 - led_no = (long)atomic_inc_return(&led_seq) - 1;
32974 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
32975
32976 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
32977 led->xpad = xpad;
32978 diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
32979 index 0110b5a..d3ad144 100644
32980 --- a/drivers/input/mousedev.c
32981 +++ b/drivers/input/mousedev.c
32982 @@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
32983
32984 spin_unlock_irq(&client->packet_lock);
32985
32986 - if (copy_to_user(buffer, data, count))
32987 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
32988 return -EFAULT;
32989
32990 return count;
32991 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
32992 index ba70058..571d25d 100644
32993 --- a/drivers/input/serio/serio.c
32994 +++ b/drivers/input/serio/serio.c
32995 @@ -497,7 +497,7 @@ static void serio_release_port(struct device *dev)
32996 */
32997 static void serio_init_port(struct serio *serio)
32998 {
32999 - static atomic_t serio_no = ATOMIC_INIT(0);
33000 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
33001
33002 __module_get(THIS_MODULE);
33003
33004 @@ -508,7 +508,7 @@ static void serio_init_port(struct serio *serio)
33005 mutex_init(&serio->drv_mutex);
33006 device_initialize(&serio->dev);
33007 dev_set_name(&serio->dev, "serio%ld",
33008 - (long)atomic_inc_return(&serio_no) - 1);
33009 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
33010 serio->dev.bus = &serio_bus;
33011 serio->dev.release = serio_release_port;
33012 serio->dev.groups = serio_device_attr_groups;
33013 diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
33014 index e44933d..9ba484a 100644
33015 --- a/drivers/isdn/capi/capi.c
33016 +++ b/drivers/isdn/capi/capi.c
33017 @@ -83,8 +83,8 @@ struct capiminor {
33018
33019 struct capi20_appl *ap;
33020 u32 ncci;
33021 - atomic_t datahandle;
33022 - atomic_t msgid;
33023 + atomic_unchecked_t datahandle;
33024 + atomic_unchecked_t msgid;
33025
33026 struct tty_port port;
33027 int ttyinstop;
33028 @@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
33029 capimsg_setu16(s, 2, mp->ap->applid);
33030 capimsg_setu8 (s, 4, CAPI_DATA_B3);
33031 capimsg_setu8 (s, 5, CAPI_RESP);
33032 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
33033 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
33034 capimsg_setu32(s, 8, mp->ncci);
33035 capimsg_setu16(s, 12, datahandle);
33036 }
33037 @@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
33038 mp->outbytes -= len;
33039 spin_unlock_bh(&mp->outlock);
33040
33041 - datahandle = atomic_inc_return(&mp->datahandle);
33042 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
33043 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
33044 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
33045 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
33046 capimsg_setu16(skb->data, 2, mp->ap->applid);
33047 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
33048 capimsg_setu8 (skb->data, 5, CAPI_REQ);
33049 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
33050 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
33051 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
33052 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
33053 capimsg_setu16(skb->data, 16, len); /* Data length */
33054 diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
33055 index db621db..825ea1a 100644
33056 --- a/drivers/isdn/gigaset/common.c
33057 +++ b/drivers/isdn/gigaset/common.c
33058 @@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
33059 cs->commands_pending = 0;
33060 cs->cur_at_seq = 0;
33061 cs->gotfwver = -1;
33062 - cs->open_count = 0;
33063 + local_set(&cs->open_count, 0);
33064 cs->dev = NULL;
33065 cs->tty = NULL;
33066 cs->tty_dev = NULL;
33067 diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
33068 index 212efaf..f187c6b 100644
33069 --- a/drivers/isdn/gigaset/gigaset.h
33070 +++ b/drivers/isdn/gigaset/gigaset.h
33071 @@ -35,6 +35,7 @@
33072 #include <linux/tty_driver.h>
33073 #include <linux/list.h>
33074 #include <linux/atomic.h>
33075 +#include <asm/local.h>
33076
33077 #define GIG_VERSION {0, 5, 0, 0}
33078 #define GIG_COMPAT {0, 4, 0, 0}
33079 @@ -433,7 +434,7 @@ struct cardstate {
33080 spinlock_t cmdlock;
33081 unsigned curlen, cmdbytes;
33082
33083 - unsigned open_count;
33084 + local_t open_count;
33085 struct tty_struct *tty;
33086 struct tasklet_struct if_wake_tasklet;
33087 unsigned control_state;
33088 diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
33089 index ee0a549..a7c9798 100644
33090 --- a/drivers/isdn/gigaset/interface.c
33091 +++ b/drivers/isdn/gigaset/interface.c
33092 @@ -163,9 +163,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
33093 }
33094 tty->driver_data = cs;
33095
33096 - ++cs->open_count;
33097 -
33098 - if (cs->open_count == 1) {
33099 + if (local_inc_return(&cs->open_count) == 1) {
33100 spin_lock_irqsave(&cs->lock, flags);
33101 cs->tty = tty;
33102 spin_unlock_irqrestore(&cs->lock, flags);
33103 @@ -193,10 +191,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
33104
33105 if (!cs->connected)
33106 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
33107 - else if (!cs->open_count)
33108 + else if (!local_read(&cs->open_count))
33109 dev_warn(cs->dev, "%s: device not opened\n", __func__);
33110 else {
33111 - if (!--cs->open_count) {
33112 + if (!local_dec_return(&cs->open_count)) {
33113 spin_lock_irqsave(&cs->lock, flags);
33114 cs->tty = NULL;
33115 spin_unlock_irqrestore(&cs->lock, flags);
33116 @@ -231,7 +229,7 @@ static int if_ioctl(struct tty_struct *tty,
33117 if (!cs->connected) {
33118 gig_dbg(DEBUG_IF, "not connected");
33119 retval = -ENODEV;
33120 - } else if (!cs->open_count)
33121 + } else if (!local_read(&cs->open_count))
33122 dev_warn(cs->dev, "%s: device not opened\n", __func__);
33123 else {
33124 retval = 0;
33125 @@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
33126 retval = -ENODEV;
33127 goto done;
33128 }
33129 - if (!cs->open_count) {
33130 + if (!local_read(&cs->open_count)) {
33131 dev_warn(cs->dev, "%s: device not opened\n", __func__);
33132 retval = -ENODEV;
33133 goto done;
33134 @@ -414,7 +412,7 @@ static int if_write_room(struct tty_struct *tty)
33135 if (!cs->connected) {
33136 gig_dbg(DEBUG_IF, "not connected");
33137 retval = -ENODEV;
33138 - } else if (!cs->open_count)
33139 + } else if (!local_read(&cs->open_count))
33140 dev_warn(cs->dev, "%s: device not opened\n", __func__);
33141 else if (cs->mstate != MS_LOCKED) {
33142 dev_warn(cs->dev, "can't write to unlocked device\n");
33143 @@ -444,7 +442,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
33144
33145 if (!cs->connected)
33146 gig_dbg(DEBUG_IF, "not connected");
33147 - else if (!cs->open_count)
33148 + else if (!local_read(&cs->open_count))
33149 dev_warn(cs->dev, "%s: device not opened\n", __func__);
33150 else if (cs->mstate != MS_LOCKED)
33151 dev_warn(cs->dev, "can't write to unlocked device\n");
33152 @@ -472,7 +470,7 @@ static void if_throttle(struct tty_struct *tty)
33153
33154 if (!cs->connected)
33155 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
33156 - else if (!cs->open_count)
33157 + else if (!local_read(&cs->open_count))
33158 dev_warn(cs->dev, "%s: device not opened\n", __func__);
33159 else
33160 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
33161 @@ -496,7 +494,7 @@ static void if_unthrottle(struct tty_struct *tty)
33162
33163 if (!cs->connected)
33164 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
33165 - else if (!cs->open_count)
33166 + else if (!local_read(&cs->open_count))
33167 dev_warn(cs->dev, "%s: device not opened\n", __func__);
33168 else
33169 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
33170 @@ -527,7 +525,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
33171 goto out;
33172 }
33173
33174 - if (!cs->open_count) {
33175 + if (!local_read(&cs->open_count)) {
33176 dev_warn(cs->dev, "%s: device not opened\n", __func__);
33177 goto out;
33178 }
33179 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
33180 index 2a57da59..e7a12ed 100644
33181 --- a/drivers/isdn/hardware/avm/b1.c
33182 +++ b/drivers/isdn/hardware/avm/b1.c
33183 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
33184 }
33185 if (left) {
33186 if (t4file->user) {
33187 - if (copy_from_user(buf, dp, left))
33188 + if (left > sizeof buf || copy_from_user(buf, dp, left))
33189 return -EFAULT;
33190 } else {
33191 memcpy(buf, dp, left);
33192 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
33193 }
33194 if (left) {
33195 if (config->user) {
33196 - if (copy_from_user(buf, dp, left))
33197 + if (left > sizeof buf || copy_from_user(buf, dp, left))
33198 return -EFAULT;
33199 } else {
33200 memcpy(buf, dp, left);
33201 diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
33202 index 85784a7..a19ca98 100644
33203 --- a/drivers/isdn/hardware/eicon/divasync.h
33204 +++ b/drivers/isdn/hardware/eicon/divasync.h
33205 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
33206 } diva_didd_add_adapter_t;
33207 typedef struct _diva_didd_remove_adapter {
33208 IDI_CALL p_request;
33209 -} diva_didd_remove_adapter_t;
33210 +} __no_const diva_didd_remove_adapter_t;
33211 typedef struct _diva_didd_read_adapter_array {
33212 void * buffer;
33213 dword length;
33214 diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
33215 index a3bd163..8956575 100644
33216 --- a/drivers/isdn/hardware/eicon/xdi_adapter.h
33217 +++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
33218 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
33219 typedef struct _diva_os_idi_adapter_interface {
33220 diva_init_card_proc_t cleanup_adapter_proc;
33221 diva_cmd_card_proc_t cmd_proc;
33222 -} diva_os_idi_adapter_interface_t;
33223 +} __no_const diva_os_idi_adapter_interface_t;
33224
33225 typedef struct _diva_os_xdi_adapter {
33226 struct list_head link;
33227 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
33228 index 1f355bb..43f1fea 100644
33229 --- a/drivers/isdn/icn/icn.c
33230 +++ b/drivers/isdn/icn/icn.c
33231 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
33232 if (count > len)
33233 count = len;
33234 if (user) {
33235 - if (copy_from_user(msg, buf, count))
33236 + if (count > sizeof msg || copy_from_user(msg, buf, count))
33237 return -EFAULT;
33238 } else
33239 memcpy(msg, buf, count);
33240 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
33241 index b5fdcb7..5b6c59f 100644
33242 --- a/drivers/lguest/core.c
33243 +++ b/drivers/lguest/core.c
33244 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
33245 * it's worked so far. The end address needs +1 because __get_vm_area
33246 * allocates an extra guard page, so we need space for that.
33247 */
33248 +
33249 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
33250 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
33251 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
33252 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
33253 +#else
33254 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
33255 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
33256 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
33257 +#endif
33258 +
33259 if (!switcher_vma) {
33260 err = -ENOMEM;
33261 printk("lguest: could not map switcher pages high\n");
33262 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
33263 * Now the Switcher is mapped at the right address, we can't fail!
33264 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
33265 */
33266 - memcpy(switcher_vma->addr, start_switcher_text,
33267 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
33268 end_switcher_text - start_switcher_text);
33269
33270 printk(KERN_INFO "lguest: mapped switcher at %p\n",
33271 diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
33272 index ff4a0bc..f5fdd9c 100644
33273 --- a/drivers/lguest/lguest_user.c
33274 +++ b/drivers/lguest/lguest_user.c
33275 @@ -198,6 +198,7 @@ static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input)
33276 * Once our Guest is initialized, the Launcher makes it run by reading
33277 * from /dev/lguest.
33278 */
33279 +static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) __size_overflow(3);
33280 static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o)
33281 {
33282 struct lguest *lg = file->private_data;
33283 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
33284 index 3980903..ce25c5e 100644
33285 --- a/drivers/lguest/x86/core.c
33286 +++ b/drivers/lguest/x86/core.c
33287 @@ -59,7 +59,7 @@ static struct {
33288 /* Offset from where switcher.S was compiled to where we've copied it */
33289 static unsigned long switcher_offset(void)
33290 {
33291 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
33292 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
33293 }
33294
33295 /* This cpu's struct lguest_pages. */
33296 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
33297 * These copies are pretty cheap, so we do them unconditionally: */
33298 /* Save the current Host top-level page directory.
33299 */
33300 +
33301 +#ifdef CONFIG_PAX_PER_CPU_PGD
33302 + pages->state.host_cr3 = read_cr3();
33303 +#else
33304 pages->state.host_cr3 = __pa(current->mm->pgd);
33305 +#endif
33306 +
33307 /*
33308 * Set up the Guest's page tables to see this CPU's pages (and no
33309 * other CPU's pages).
33310 @@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
33311 * compiled-in switcher code and the high-mapped copy we just made.
33312 */
33313 for (i = 0; i < IDT_ENTRIES; i++)
33314 - default_idt_entries[i] += switcher_offset();
33315 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
33316
33317 /*
33318 * Set up the Switcher's per-cpu areas.
33319 @@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
33320 * it will be undisturbed when we switch. To change %cs and jump we
33321 * need this structure to feed to Intel's "lcall" instruction.
33322 */
33323 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
33324 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
33325 lguest_entry.segment = LGUEST_CS;
33326
33327 /*
33328 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
33329 index 40634b0..4f5855e 100644
33330 --- a/drivers/lguest/x86/switcher_32.S
33331 +++ b/drivers/lguest/x86/switcher_32.S
33332 @@ -87,6 +87,7 @@
33333 #include <asm/page.h>
33334 #include <asm/segment.h>
33335 #include <asm/lguest.h>
33336 +#include <asm/processor-flags.h>
33337
33338 // We mark the start of the code to copy
33339 // It's placed in .text tho it's never run here
33340 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
33341 // Changes type when we load it: damn Intel!
33342 // For after we switch over our page tables
33343 // That entry will be read-only: we'd crash.
33344 +
33345 +#ifdef CONFIG_PAX_KERNEXEC
33346 + mov %cr0, %edx
33347 + xor $X86_CR0_WP, %edx
33348 + mov %edx, %cr0
33349 +#endif
33350 +
33351 movl $(GDT_ENTRY_TSS*8), %edx
33352 ltr %dx
33353
33354 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
33355 // Let's clear it again for our return.
33356 // The GDT descriptor of the Host
33357 // Points to the table after two "size" bytes
33358 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
33359 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
33360 // Clear "used" from type field (byte 5, bit 2)
33361 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
33362 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
33363 +
33364 +#ifdef CONFIG_PAX_KERNEXEC
33365 + mov %cr0, %eax
33366 + xor $X86_CR0_WP, %eax
33367 + mov %eax, %cr0
33368 +#endif
33369
33370 // Once our page table's switched, the Guest is live!
33371 // The Host fades as we run this final step.
33372 @@ -295,13 +309,12 @@ deliver_to_host:
33373 // I consulted gcc, and it gave
33374 // These instructions, which I gladly credit:
33375 leal (%edx,%ebx,8), %eax
33376 - movzwl (%eax),%edx
33377 - movl 4(%eax), %eax
33378 - xorw %ax, %ax
33379 - orl %eax, %edx
33380 + movl 4(%eax), %edx
33381 + movw (%eax), %dx
33382 // Now the address of the handler's in %edx
33383 // We call it now: its "iret" drops us home.
33384 - jmp *%edx
33385 + ljmp $__KERNEL_CS, $1f
33386 +1: jmp *%edx
33387
33388 // Every interrupt can come to us here
33389 // But we must truly tell each apart.
33390 diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
33391 index 4daf9e5..b8d1d0f 100644
33392 --- a/drivers/macintosh/macio_asic.c
33393 +++ b/drivers/macintosh/macio_asic.c
33394 @@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
33395 * MacIO is matched against any Apple ID, it's probe() function
33396 * will then decide wether it applies or not
33397 */
33398 -static const struct pci_device_id __devinitdata pci_ids [] = { {
33399 +static const struct pci_device_id __devinitconst pci_ids [] = { {
33400 .vendor = PCI_VENDOR_ID_APPLE,
33401 .device = PCI_ANY_ID,
33402 .subvendor = PCI_ANY_ID,
33403 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
33404 index 1ce84ed..0fdd40a 100644
33405 --- a/drivers/md/dm-ioctl.c
33406 +++ b/drivers/md/dm-ioctl.c
33407 @@ -1589,7 +1589,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
33408 cmd == DM_LIST_VERSIONS_CMD)
33409 return 0;
33410
33411 - if ((cmd == DM_DEV_CREATE_CMD)) {
33412 + if (cmd == DM_DEV_CREATE_CMD) {
33413 if (!*param->name) {
33414 DMWARN("name not supplied when creating device");
33415 return -EINVAL;
33416 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
33417 index 9bfd057..5373ff3 100644
33418 --- a/drivers/md/dm-raid1.c
33419 +++ b/drivers/md/dm-raid1.c
33420 @@ -40,7 +40,7 @@ enum dm_raid1_error {
33421
33422 struct mirror {
33423 struct mirror_set *ms;
33424 - atomic_t error_count;
33425 + atomic_unchecked_t error_count;
33426 unsigned long error_type;
33427 struct dm_dev *dev;
33428 sector_t offset;
33429 @@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
33430 struct mirror *m;
33431
33432 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
33433 - if (!atomic_read(&m->error_count))
33434 + if (!atomic_read_unchecked(&m->error_count))
33435 return m;
33436
33437 return NULL;
33438 @@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
33439 * simple way to tell if a device has encountered
33440 * errors.
33441 */
33442 - atomic_inc(&m->error_count);
33443 + atomic_inc_unchecked(&m->error_count);
33444
33445 if (test_and_set_bit(error_type, &m->error_type))
33446 return;
33447 @@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
33448 struct mirror *m = get_default_mirror(ms);
33449
33450 do {
33451 - if (likely(!atomic_read(&m->error_count)))
33452 + if (likely(!atomic_read_unchecked(&m->error_count)))
33453 return m;
33454
33455 if (m-- == ms->mirror)
33456 @@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
33457 {
33458 struct mirror *default_mirror = get_default_mirror(m->ms);
33459
33460 - return !atomic_read(&default_mirror->error_count);
33461 + return !atomic_read_unchecked(&default_mirror->error_count);
33462 }
33463
33464 static int mirror_available(struct mirror_set *ms, struct bio *bio)
33465 @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
33466 */
33467 if (likely(region_in_sync(ms, region, 1)))
33468 m = choose_mirror(ms, bio->bi_sector);
33469 - else if (m && atomic_read(&m->error_count))
33470 + else if (m && atomic_read_unchecked(&m->error_count))
33471 m = NULL;
33472
33473 if (likely(m))
33474 @@ -848,6 +848,10 @@ static void do_mirror(struct work_struct *work)
33475 static struct mirror_set *alloc_context(unsigned int nr_mirrors,
33476 uint32_t region_size,
33477 struct dm_target *ti,
33478 + struct dm_dirty_log *dl) __size_overflow(1);
33479 +static struct mirror_set *alloc_context(unsigned int nr_mirrors,
33480 + uint32_t region_size,
33481 + struct dm_target *ti,
33482 struct dm_dirty_log *dl)
33483 {
33484 size_t len;
33485 @@ -937,7 +941,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
33486 }
33487
33488 ms->mirror[mirror].ms = ms;
33489 - atomic_set(&(ms->mirror[mirror].error_count), 0);
33490 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
33491 ms->mirror[mirror].error_type = 0;
33492 ms->mirror[mirror].offset = offset;
33493
33494 @@ -1347,7 +1351,7 @@ static void mirror_resume(struct dm_target *ti)
33495 */
33496 static char device_status_char(struct mirror *m)
33497 {
33498 - if (!atomic_read(&(m->error_count)))
33499 + if (!atomic_read_unchecked(&(m->error_count)))
33500 return 'A';
33501
33502 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
33503 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
33504 index 3d80cf0..7d98e1a 100644
33505 --- a/drivers/md/dm-stripe.c
33506 +++ b/drivers/md/dm-stripe.c
33507 @@ -20,7 +20,7 @@ struct stripe {
33508 struct dm_dev *dev;
33509 sector_t physical_start;
33510
33511 - atomic_t error_count;
33512 + atomic_unchecked_t error_count;
33513 };
33514
33515 struct stripe_c {
33516 @@ -55,6 +55,7 @@ static void trigger_event(struct work_struct *work)
33517 dm_table_event(sc->ti->table);
33518 }
33519
33520 +static inline struct stripe_c *alloc_context(unsigned int stripes) __size_overflow(1);
33521 static inline struct stripe_c *alloc_context(unsigned int stripes)
33522 {
33523 size_t len;
33524 @@ -192,7 +193,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
33525 kfree(sc);
33526 return r;
33527 }
33528 - atomic_set(&(sc->stripe[i].error_count), 0);
33529 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
33530 }
33531
33532 ti->private = sc;
33533 @@ -314,7 +315,7 @@ static int stripe_status(struct dm_target *ti,
33534 DMEMIT("%d ", sc->stripes);
33535 for (i = 0; i < sc->stripes; i++) {
33536 DMEMIT("%s ", sc->stripe[i].dev->name);
33537 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
33538 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
33539 'D' : 'A';
33540 }
33541 buffer[i] = '\0';
33542 @@ -361,8 +362,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
33543 */
33544 for (i = 0; i < sc->stripes; i++)
33545 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
33546 - atomic_inc(&(sc->stripe[i].error_count));
33547 - if (atomic_read(&(sc->stripe[i].error_count)) <
33548 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
33549 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
33550 DM_IO_ERROR_THRESHOLD)
33551 schedule_work(&sc->trigger_event);
33552 }
33553 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
33554 index 63cc542..8d45caf3 100644
33555 --- a/drivers/md/dm-table.c
33556 +++ b/drivers/md/dm-table.c
33557 @@ -391,7 +391,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
33558 if (!dev_size)
33559 return 0;
33560
33561 - if ((start >= dev_size) || (start + len > dev_size)) {
33562 + if ((start >= dev_size) || (len > dev_size - start)) {
33563 DMWARN("%s: %s too small for target: "
33564 "start=%llu, len=%llu, dev_size=%llu",
33565 dm_device_name(ti->table->md), bdevname(bdev, b),
33566 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
33567 index 237571a..fb6d19b 100644
33568 --- a/drivers/md/dm-thin-metadata.c
33569 +++ b/drivers/md/dm-thin-metadata.c
33570 @@ -432,7 +432,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
33571
33572 pmd->info.tm = tm;
33573 pmd->info.levels = 2;
33574 - pmd->info.value_type.context = pmd->data_sm;
33575 + pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
33576 pmd->info.value_type.size = sizeof(__le64);
33577 pmd->info.value_type.inc = data_block_inc;
33578 pmd->info.value_type.dec = data_block_dec;
33579 @@ -451,7 +451,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
33580
33581 pmd->bl_info.tm = tm;
33582 pmd->bl_info.levels = 1;
33583 - pmd->bl_info.value_type.context = pmd->data_sm;
33584 + pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
33585 pmd->bl_info.value_type.size = sizeof(__le64);
33586 pmd->bl_info.value_type.inc = data_block_inc;
33587 pmd->bl_info.value_type.dec = data_block_dec;
33588 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
33589 index b89c548..2af3ce4 100644
33590 --- a/drivers/md/dm.c
33591 +++ b/drivers/md/dm.c
33592 @@ -176,9 +176,9 @@ struct mapped_device {
33593 /*
33594 * Event handling.
33595 */
33596 - atomic_t event_nr;
33597 + atomic_unchecked_t event_nr;
33598 wait_queue_head_t eventq;
33599 - atomic_t uevent_seq;
33600 + atomic_unchecked_t uevent_seq;
33601 struct list_head uevent_list;
33602 spinlock_t uevent_lock; /* Protect access to uevent_list */
33603
33604 @@ -1844,8 +1844,8 @@ static struct mapped_device *alloc_dev(int minor)
33605 rwlock_init(&md->map_lock);
33606 atomic_set(&md->holders, 1);
33607 atomic_set(&md->open_count, 0);
33608 - atomic_set(&md->event_nr, 0);
33609 - atomic_set(&md->uevent_seq, 0);
33610 + atomic_set_unchecked(&md->event_nr, 0);
33611 + atomic_set_unchecked(&md->uevent_seq, 0);
33612 INIT_LIST_HEAD(&md->uevent_list);
33613 spin_lock_init(&md->uevent_lock);
33614
33615 @@ -1979,7 +1979,7 @@ static void event_callback(void *context)
33616
33617 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
33618
33619 - atomic_inc(&md->event_nr);
33620 + atomic_inc_unchecked(&md->event_nr);
33621 wake_up(&md->eventq);
33622 }
33623
33624 @@ -2621,18 +2621,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
33625
33626 uint32_t dm_next_uevent_seq(struct mapped_device *md)
33627 {
33628 - return atomic_add_return(1, &md->uevent_seq);
33629 + return atomic_add_return_unchecked(1, &md->uevent_seq);
33630 }
33631
33632 uint32_t dm_get_event_nr(struct mapped_device *md)
33633 {
33634 - return atomic_read(&md->event_nr);
33635 + return atomic_read_unchecked(&md->event_nr);
33636 }
33637
33638 int dm_wait_event(struct mapped_device *md, int event_nr)
33639 {
33640 return wait_event_interruptible(md->eventq,
33641 - (event_nr != atomic_read(&md->event_nr)));
33642 + (event_nr != atomic_read_unchecked(&md->event_nr)));
33643 }
33644
33645 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
33646 diff --git a/drivers/md/md.c b/drivers/md/md.c
33647 index 58027d8..d9cddcd 100644
33648 --- a/drivers/md/md.c
33649 +++ b/drivers/md/md.c
33650 @@ -277,10 +277,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
33651 * start build, activate spare
33652 */
33653 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
33654 -static atomic_t md_event_count;
33655 +static atomic_unchecked_t md_event_count;
33656 void md_new_event(struct mddev *mddev)
33657 {
33658 - atomic_inc(&md_event_count);
33659 + atomic_inc_unchecked(&md_event_count);
33660 wake_up(&md_event_waiters);
33661 }
33662 EXPORT_SYMBOL_GPL(md_new_event);
33663 @@ -290,7 +290,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
33664 */
33665 static void md_new_event_inintr(struct mddev *mddev)
33666 {
33667 - atomic_inc(&md_event_count);
33668 + atomic_inc_unchecked(&md_event_count);
33669 wake_up(&md_event_waiters);
33670 }
33671
33672 @@ -1524,7 +1524,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
33673
33674 rdev->preferred_minor = 0xffff;
33675 rdev->data_offset = le64_to_cpu(sb->data_offset);
33676 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
33677 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
33678
33679 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
33680 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
33681 @@ -1743,7 +1743,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
33682 else
33683 sb->resync_offset = cpu_to_le64(0);
33684
33685 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
33686 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
33687
33688 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
33689 sb->size = cpu_to_le64(mddev->dev_sectors);
33690 @@ -2689,7 +2689,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
33691 static ssize_t
33692 errors_show(struct md_rdev *rdev, char *page)
33693 {
33694 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
33695 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
33696 }
33697
33698 static ssize_t
33699 @@ -2698,7 +2698,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
33700 char *e;
33701 unsigned long n = simple_strtoul(buf, &e, 10);
33702 if (*buf && (*e == 0 || *e == '\n')) {
33703 - atomic_set(&rdev->corrected_errors, n);
33704 + atomic_set_unchecked(&rdev->corrected_errors, n);
33705 return len;
33706 }
33707 return -EINVAL;
33708 @@ -3084,8 +3084,8 @@ int md_rdev_init(struct md_rdev *rdev)
33709 rdev->sb_loaded = 0;
33710 rdev->bb_page = NULL;
33711 atomic_set(&rdev->nr_pending, 0);
33712 - atomic_set(&rdev->read_errors, 0);
33713 - atomic_set(&rdev->corrected_errors, 0);
33714 + atomic_set_unchecked(&rdev->read_errors, 0);
33715 + atomic_set_unchecked(&rdev->corrected_errors, 0);
33716
33717 INIT_LIST_HEAD(&rdev->same_set);
33718 init_waitqueue_head(&rdev->blocked_wait);
33719 @@ -6736,7 +6736,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
33720
33721 spin_unlock(&pers_lock);
33722 seq_printf(seq, "\n");
33723 - seq->poll_event = atomic_read(&md_event_count);
33724 + seq->poll_event = atomic_read_unchecked(&md_event_count);
33725 return 0;
33726 }
33727 if (v == (void*)2) {
33728 @@ -6828,7 +6828,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
33729 chunk_kb ? "KB" : "B");
33730 if (bitmap->file) {
33731 seq_printf(seq, ", file: ");
33732 - seq_path(seq, &bitmap->file->f_path, " \t\n");
33733 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
33734 }
33735
33736 seq_printf(seq, "\n");
33737 @@ -6859,7 +6859,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
33738 return error;
33739
33740 seq = file->private_data;
33741 - seq->poll_event = atomic_read(&md_event_count);
33742 + seq->poll_event = atomic_read_unchecked(&md_event_count);
33743 return error;
33744 }
33745
33746 @@ -6873,7 +6873,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
33747 /* always allow read */
33748 mask = POLLIN | POLLRDNORM;
33749
33750 - if (seq->poll_event != atomic_read(&md_event_count))
33751 + if (seq->poll_event != atomic_read_unchecked(&md_event_count))
33752 mask |= POLLERR | POLLPRI;
33753 return mask;
33754 }
33755 @@ -6917,7 +6917,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
33756 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
33757 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
33758 (int)part_stat_read(&disk->part0, sectors[1]) -
33759 - atomic_read(&disk->sync_io);
33760 + atomic_read_unchecked(&disk->sync_io);
33761 /* sync IO will cause sync_io to increase before the disk_stats
33762 * as sync_io is counted when a request starts, and
33763 * disk_stats is counted when it completes.
33764 diff --git a/drivers/md/md.h b/drivers/md/md.h
33765 index 44c63df..b795d1a 100644
33766 --- a/drivers/md/md.h
33767 +++ b/drivers/md/md.h
33768 @@ -93,13 +93,13 @@ struct md_rdev {
33769 * only maintained for arrays that
33770 * support hot removal
33771 */
33772 - atomic_t read_errors; /* number of consecutive read errors that
33773 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
33774 * we have tried to ignore.
33775 */
33776 struct timespec last_read_error; /* monotonic time since our
33777 * last read error
33778 */
33779 - atomic_t corrected_errors; /* number of corrected read errors,
33780 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
33781 * for reporting to userspace and storing
33782 * in superblock.
33783 */
33784 @@ -421,7 +421,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
33785
33786 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
33787 {
33788 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
33789 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
33790 }
33791
33792 struct md_personality
33793 diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
33794 index 50ed53b..4f29d7d 100644
33795 --- a/drivers/md/persistent-data/dm-space-map-checker.c
33796 +++ b/drivers/md/persistent-data/dm-space-map-checker.c
33797 @@ -159,7 +159,7 @@ static void ca_destroy(struct count_array *ca)
33798 /*----------------------------------------------------------------*/
33799
33800 struct sm_checker {
33801 - struct dm_space_map sm;
33802 + dm_space_map_no_const sm;
33803
33804 struct count_array old_counts;
33805 struct count_array counts;
33806 diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
33807 index fc469ba..2d91555 100644
33808 --- a/drivers/md/persistent-data/dm-space-map-disk.c
33809 +++ b/drivers/md/persistent-data/dm-space-map-disk.c
33810 @@ -23,7 +23,7 @@
33811 * Space map interface.
33812 */
33813 struct sm_disk {
33814 - struct dm_space_map sm;
33815 + dm_space_map_no_const sm;
33816
33817 struct ll_disk ll;
33818 struct ll_disk old_ll;
33819 diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
33820 index e89ae5e..062e4c2 100644
33821 --- a/drivers/md/persistent-data/dm-space-map-metadata.c
33822 +++ b/drivers/md/persistent-data/dm-space-map-metadata.c
33823 @@ -43,7 +43,7 @@ struct block_op {
33824 };
33825
33826 struct sm_metadata {
33827 - struct dm_space_map sm;
33828 + dm_space_map_no_const sm;
33829
33830 struct ll_disk ll;
33831 struct ll_disk old_ll;
33832 diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
33833 index 1cbfc6b..56e1dbb 100644
33834 --- a/drivers/md/persistent-data/dm-space-map.h
33835 +++ b/drivers/md/persistent-data/dm-space-map.h
33836 @@ -60,6 +60,7 @@ struct dm_space_map {
33837 int (*root_size)(struct dm_space_map *sm, size_t *result);
33838 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
33839 };
33840 +typedef struct dm_space_map __no_const dm_space_map_no_const;
33841
33842 /*----------------------------------------------------------------*/
33843
33844 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
33845 index edc735a..e9b97f1 100644
33846 --- a/drivers/md/raid1.c
33847 +++ b/drivers/md/raid1.c
33848 @@ -1645,7 +1645,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
33849 if (r1_sync_page_io(rdev, sect, s,
33850 bio->bi_io_vec[idx].bv_page,
33851 READ) != 0)
33852 - atomic_add(s, &rdev->corrected_errors);
33853 + atomic_add_unchecked(s, &rdev->corrected_errors);
33854 }
33855 sectors -= s;
33856 sect += s;
33857 @@ -1859,7 +1859,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
33858 test_bit(In_sync, &rdev->flags)) {
33859 if (r1_sync_page_io(rdev, sect, s,
33860 conf->tmppage, READ)) {
33861 - atomic_add(s, &rdev->corrected_errors);
33862 + atomic_add_unchecked(s, &rdev->corrected_errors);
33863 printk(KERN_INFO
33864 "md/raid1:%s: read error corrected "
33865 "(%d sectors at %llu on %s)\n",
33866 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
33867 index 1898389..a3aa617 100644
33868 --- a/drivers/md/raid10.c
33869 +++ b/drivers/md/raid10.c
33870 @@ -1636,7 +1636,7 @@ static void end_sync_read(struct bio *bio, int error)
33871 /* The write handler will notice the lack of
33872 * R10BIO_Uptodate and record any errors etc
33873 */
33874 - atomic_add(r10_bio->sectors,
33875 + atomic_add_unchecked(r10_bio->sectors,
33876 &conf->mirrors[d].rdev->corrected_errors);
33877
33878 /* for reconstruct, we always reschedule after a read.
33879 @@ -1987,7 +1987,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
33880 {
33881 struct timespec cur_time_mon;
33882 unsigned long hours_since_last;
33883 - unsigned int read_errors = atomic_read(&rdev->read_errors);
33884 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
33885
33886 ktime_get_ts(&cur_time_mon);
33887
33888 @@ -2009,9 +2009,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
33889 * overflowing the shift of read_errors by hours_since_last.
33890 */
33891 if (hours_since_last >= 8 * sizeof(read_errors))
33892 - atomic_set(&rdev->read_errors, 0);
33893 + atomic_set_unchecked(&rdev->read_errors, 0);
33894 else
33895 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
33896 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
33897 }
33898
33899 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
33900 @@ -2065,8 +2065,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33901 return;
33902
33903 check_decay_read_errors(mddev, rdev);
33904 - atomic_inc(&rdev->read_errors);
33905 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
33906 + atomic_inc_unchecked(&rdev->read_errors);
33907 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
33908 char b[BDEVNAME_SIZE];
33909 bdevname(rdev->bdev, b);
33910
33911 @@ -2074,7 +2074,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33912 "md/raid10:%s: %s: Raid device exceeded "
33913 "read_error threshold [cur %d:max %d]\n",
33914 mdname(mddev), b,
33915 - atomic_read(&rdev->read_errors), max_read_errors);
33916 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
33917 printk(KERN_NOTICE
33918 "md/raid10:%s: %s: Failing raid device\n",
33919 mdname(mddev), b);
33920 @@ -2223,7 +2223,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33921 (unsigned long long)(
33922 sect + rdev->data_offset),
33923 bdevname(rdev->bdev, b));
33924 - atomic_add(s, &rdev->corrected_errors);
33925 + atomic_add_unchecked(s, &rdev->corrected_errors);
33926 }
33927
33928 rdev_dec_pending(rdev, mddev);
33929 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
33930 index d1162e5..c7cd902 100644
33931 --- a/drivers/md/raid5.c
33932 +++ b/drivers/md/raid5.c
33933 @@ -1687,18 +1687,18 @@ static void raid5_end_read_request(struct bio * bi, int error)
33934 (unsigned long long)(sh->sector
33935 + rdev->data_offset),
33936 bdevname(rdev->bdev, b));
33937 - atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
33938 + atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
33939 clear_bit(R5_ReadError, &sh->dev[i].flags);
33940 clear_bit(R5_ReWrite, &sh->dev[i].flags);
33941 }
33942 - if (atomic_read(&rdev->read_errors))
33943 - atomic_set(&rdev->read_errors, 0);
33944 + if (atomic_read_unchecked(&rdev->read_errors))
33945 + atomic_set_unchecked(&rdev->read_errors, 0);
33946 } else {
33947 const char *bdn = bdevname(rdev->bdev, b);
33948 int retry = 0;
33949
33950 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
33951 - atomic_inc(&rdev->read_errors);
33952 + atomic_inc_unchecked(&rdev->read_errors);
33953 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
33954 printk_ratelimited(
33955 KERN_WARNING
33956 @@ -1727,7 +1727,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
33957 (unsigned long long)(sh->sector
33958 + rdev->data_offset),
33959 bdn);
33960 - else if (atomic_read(&rdev->read_errors)
33961 + else if (atomic_read_unchecked(&rdev->read_errors)
33962 > conf->max_nr_stripes)
33963 printk(KERN_WARNING
33964 "md/raid:%s: Too many read errors, failing device %s.\n",
33965 diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
33966 index ce4f858..7bcfb46 100644
33967 --- a/drivers/media/dvb/ddbridge/ddbridge-core.c
33968 +++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
33969 @@ -1678,7 +1678,7 @@ static struct ddb_info ddb_v6 = {
33970 .subvendor = _subvend, .subdevice = _subdev, \
33971 .driver_data = (unsigned long)&_driverdata }
33972
33973 -static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
33974 +static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
33975 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
33976 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
33977 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
33978 diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
33979 index a7d876f..8c21b61 100644
33980 --- a/drivers/media/dvb/dvb-core/dvb_demux.h
33981 +++ b/drivers/media/dvb/dvb-core/dvb_demux.h
33982 @@ -73,7 +73,7 @@ struct dvb_demux_feed {
33983 union {
33984 dmx_ts_cb ts;
33985 dmx_section_cb sec;
33986 - } cb;
33987 + } __no_const cb;
33988
33989 struct dvb_demux *demux;
33990 void *priv;
33991 diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
33992 index 00a6732..70a682e 100644
33993 --- a/drivers/media/dvb/dvb-core/dvbdev.c
33994 +++ b/drivers/media/dvb/dvb-core/dvbdev.c
33995 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
33996 const struct dvb_device *template, void *priv, int type)
33997 {
33998 struct dvb_device *dvbdev;
33999 - struct file_operations *dvbdevfops;
34000 + file_operations_no_const *dvbdevfops;
34001 struct device *clsdev;
34002 int minor;
34003 int id;
34004 diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
34005 index 3940bb0..fb3952a 100644
34006 --- a/drivers/media/dvb/dvb-usb/cxusb.c
34007 +++ b/drivers/media/dvb/dvb-usb/cxusb.c
34008 @@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
34009
34010 struct dib0700_adapter_state {
34011 int (*set_param_save) (struct dvb_frontend *);
34012 -};
34013 +} __no_const;
34014
34015 static int dib7070_set_param_override(struct dvb_frontend *fe)
34016 {
34017 diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
34018 index 451c5a7..649f711 100644
34019 --- a/drivers/media/dvb/dvb-usb/dw2102.c
34020 +++ b/drivers/media/dvb/dvb-usb/dw2102.c
34021 @@ -95,7 +95,7 @@ struct su3000_state {
34022
34023 struct s6x0_state {
34024 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
34025 -};
34026 +} __no_const;
34027
34028 /* debug */
34029 static int dvb_usb_dw2102_debug;
34030 diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
34031 index 404f63a..4796533 100644
34032 --- a/drivers/media/dvb/frontends/dib3000.h
34033 +++ b/drivers/media/dvb/frontends/dib3000.h
34034 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
34035 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
34036 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
34037 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
34038 -};
34039 +} __no_const;
34040
34041 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
34042 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
34043 diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
34044 index 8418c02..8555013 100644
34045 --- a/drivers/media/dvb/ngene/ngene-cards.c
34046 +++ b/drivers/media/dvb/ngene/ngene-cards.c
34047 @@ -477,7 +477,7 @@ static struct ngene_info ngene_info_m780 = {
34048
34049 /****************************************************************************/
34050
34051 -static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
34052 +static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
34053 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
34054 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
34055 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
34056 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
34057 index 16a089f..ab1667d 100644
34058 --- a/drivers/media/radio/radio-cadet.c
34059 +++ b/drivers/media/radio/radio-cadet.c
34060 @@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
34061 unsigned char readbuf[RDS_BUFFER];
34062 int i = 0;
34063
34064 + if (count > RDS_BUFFER)
34065 + return -EFAULT;
34066 mutex_lock(&dev->lock);
34067 if (dev->rdsstat == 0) {
34068 dev->rdsstat = 1;
34069 diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
34070 index 9cde353..8c6a1c3 100644
34071 --- a/drivers/media/video/au0828/au0828.h
34072 +++ b/drivers/media/video/au0828/au0828.h
34073 @@ -191,7 +191,7 @@ struct au0828_dev {
34074
34075 /* I2C */
34076 struct i2c_adapter i2c_adap;
34077 - struct i2c_algorithm i2c_algo;
34078 + i2c_algorithm_no_const i2c_algo;
34079 struct i2c_client i2c_client;
34080 u32 i2c_rc;
34081
34082 diff --git a/drivers/media/video/cpia2/cpia2_core.c b/drivers/media/video/cpia2/cpia2_core.c
34083 index ee91e295..04ad048 100644
34084 --- a/drivers/media/video/cpia2/cpia2_core.c
34085 +++ b/drivers/media/video/cpia2/cpia2_core.c
34086 @@ -86,6 +86,7 @@ static inline unsigned long kvirt_to_pa(unsigned long adr)
34087 return ret;
34088 }
34089
34090 +static void *rvmalloc(unsigned long size) __size_overflow(1);
34091 static void *rvmalloc(unsigned long size)
34092 {
34093 void *mem;
34094 diff --git a/drivers/media/video/cx18/cx18-alsa-pcm.c b/drivers/media/video/cx18/cx18-alsa-pcm.c
34095 index 82d195b..181103c 100644
34096 --- a/drivers/media/video/cx18/cx18-alsa-pcm.c
34097 +++ b/drivers/media/video/cx18/cx18-alsa-pcm.c
34098 @@ -229,6 +229,8 @@ static int snd_cx18_pcm_ioctl(struct snd_pcm_substream *substream,
34099
34100
34101 static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
34102 + size_t size) __size_overflow(2);
34103 +static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
34104 size_t size)
34105 {
34106 struct snd_pcm_runtime *runtime = subs->runtime;
34107 diff --git a/drivers/media/video/cx231xx/cx231xx-audio.c b/drivers/media/video/cx231xx/cx231xx-audio.c
34108 index a2c2b7d..8f1bec7 100644
34109 --- a/drivers/media/video/cx231xx/cx231xx-audio.c
34110 +++ b/drivers/media/video/cx231xx/cx231xx-audio.c
34111 @@ -389,6 +389,8 @@ static int cx231xx_init_audio_bulk(struct cx231xx *dev)
34112 }
34113
34114 static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
34115 + size_t size) __size_overflow(2);
34116 +static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
34117 size_t size)
34118 {
34119 struct snd_pcm_runtime *runtime = subs->runtime;
34120 diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
34121 index 04bf662..e0ac026 100644
34122 --- a/drivers/media/video/cx88/cx88-alsa.c
34123 +++ b/drivers/media/video/cx88/cx88-alsa.c
34124 @@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
34125 * Only boards with eeprom and byte 1 at eeprom=1 have it
34126 */
34127
34128 -static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
34129 +static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
34130 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
34131 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
34132 {0, }
34133 diff --git a/drivers/media/video/em28xx/em28xx-audio.c b/drivers/media/video/em28xx/em28xx-audio.c
34134 index e2a7b77..753d0ee 100644
34135 --- a/drivers/media/video/em28xx/em28xx-audio.c
34136 +++ b/drivers/media/video/em28xx/em28xx-audio.c
34137 @@ -225,6 +225,8 @@ static int em28xx_init_audio_isoc(struct em28xx *dev)
34138 }
34139
34140 static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
34141 + size_t size) __size_overflow(2);
34142 +static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs,
34143 size_t size)
34144 {
34145 struct snd_pcm_runtime *runtime = subs->runtime;
34146 diff --git a/drivers/media/video/meye.c b/drivers/media/video/meye.c
34147 index b09a3c8..6dcba0a 100644
34148 --- a/drivers/media/video/meye.c
34149 +++ b/drivers/media/video/meye.c
34150 @@ -72,6 +72,7 @@ static struct meye meye;
34151 /****************************************************************************/
34152 /* Memory allocation routines (stolen from bttv-driver.c) */
34153 /****************************************************************************/
34154 +static void *rvmalloc(unsigned long size) __size_overflow(1);
34155 static void *rvmalloc(unsigned long size)
34156 {
34157 void *mem;
34158 diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
34159 index 1fb7d5b..3901e77 100644
34160 --- a/drivers/media/video/omap/omap_vout.c
34161 +++ b/drivers/media/video/omap/omap_vout.c
34162 @@ -64,7 +64,6 @@ enum omap_vout_channels {
34163 OMAP_VIDEO2,
34164 };
34165
34166 -static struct videobuf_queue_ops video_vbq_ops;
34167 /* Variables configurable through module params*/
34168 static u32 video1_numbuffers = 3;
34169 static u32 video2_numbuffers = 3;
34170 @@ -1000,6 +999,12 @@ static int omap_vout_open(struct file *file)
34171 {
34172 struct videobuf_queue *q;
34173 struct omap_vout_device *vout = NULL;
34174 + static struct videobuf_queue_ops video_vbq_ops = {
34175 + .buf_setup = omap_vout_buffer_setup,
34176 + .buf_prepare = omap_vout_buffer_prepare,
34177 + .buf_release = omap_vout_buffer_release,
34178 + .buf_queue = omap_vout_buffer_queue,
34179 + };
34180
34181 vout = video_drvdata(file);
34182 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
34183 @@ -1017,10 +1022,6 @@ static int omap_vout_open(struct file *file)
34184 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
34185
34186 q = &vout->vbq;
34187 - video_vbq_ops.buf_setup = omap_vout_buffer_setup;
34188 - video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
34189 - video_vbq_ops.buf_release = omap_vout_buffer_release;
34190 - video_vbq_ops.buf_queue = omap_vout_buffer_queue;
34191 spin_lock_init(&vout->vbq_lock);
34192
34193 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
34194 diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
34195 index 305e6aa..0143317 100644
34196 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
34197 +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
34198 @@ -196,7 +196,7 @@ struct pvr2_hdw {
34199
34200 /* I2C stuff */
34201 struct i2c_adapter i2c_adap;
34202 - struct i2c_algorithm i2c_algo;
34203 + i2c_algorithm_no_const i2c_algo;
34204 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
34205 int i2c_cx25840_hack_state;
34206 int i2c_linked;
34207 diff --git a/drivers/media/video/saa7164/saa7164-encoder.c b/drivers/media/video/saa7164/saa7164-encoder.c
34208 index 2fd38a0..ddec3c4 100644
34209 --- a/drivers/media/video/saa7164/saa7164-encoder.c
34210 +++ b/drivers/media/video/saa7164/saa7164-encoder.c
34211 @@ -1136,6 +1136,8 @@ struct saa7164_user_buffer *saa7164_enc_next_buf(struct saa7164_port *port)
34212 }
34213
34214 static ssize_t fops_read(struct file *file, char __user *buffer,
34215 + size_t count, loff_t *pos) __size_overflow(3);
34216 +static ssize_t fops_read(struct file *file, char __user *buffer,
34217 size_t count, loff_t *pos)
34218 {
34219 struct saa7164_encoder_fh *fh = file->private_data;
34220 diff --git a/drivers/media/video/saa7164/saa7164-vbi.c b/drivers/media/video/saa7164/saa7164-vbi.c
34221 index e2e0341..b80056c 100644
34222 --- a/drivers/media/video/saa7164/saa7164-vbi.c
34223 +++ b/drivers/media/video/saa7164/saa7164-vbi.c
34224 @@ -1081,6 +1081,8 @@ struct saa7164_user_buffer *saa7164_vbi_next_buf(struct saa7164_port *port)
34225 }
34226
34227 static ssize_t fops_read(struct file *file, char __user *buffer,
34228 + size_t count, loff_t *pos) __size_overflow(3);
34229 +static ssize_t fops_read(struct file *file, char __user *buffer,
34230 size_t count, loff_t *pos)
34231 {
34232 struct saa7164_vbi_fh *fh = file->private_data;
34233 diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
34234 index 4ed1c7c2..8f15e13 100644
34235 --- a/drivers/media/video/timblogiw.c
34236 +++ b/drivers/media/video/timblogiw.c
34237 @@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
34238
34239 /* Platform device functions */
34240
34241 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
34242 +static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
34243 .vidioc_querycap = timblogiw_querycap,
34244 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
34245 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
34246 @@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
34247 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
34248 };
34249
34250 -static __devinitconst struct v4l2_file_operations timblogiw_fops = {
34251 +static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
34252 .owner = THIS_MODULE,
34253 .open = timblogiw_open,
34254 .release = timblogiw_close,
34255 diff --git a/drivers/media/video/videobuf-dma-contig.c b/drivers/media/video/videobuf-dma-contig.c
34256 index c969111..a7910f4 100644
34257 --- a/drivers/media/video/videobuf-dma-contig.c
34258 +++ b/drivers/media/video/videobuf-dma-contig.c
34259 @@ -184,6 +184,7 @@ static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
34260 return ret;
34261 }
34262
34263 +static struct videobuf_buffer *__videobuf_alloc_vb(size_t size) __size_overflow(1);
34264 static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
34265 {
34266 struct videobuf_dma_contig_memory *mem;
34267 diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
34268 index f300dea..5fc9c4a 100644
34269 --- a/drivers/media/video/videobuf-dma-sg.c
34270 +++ b/drivers/media/video/videobuf-dma-sg.c
34271 @@ -419,6 +419,7 @@ static const struct vm_operations_struct videobuf_vm_ops = {
34272 struct videobuf_dma_sg_memory
34273 */
34274
34275 +static struct videobuf_buffer *__videobuf_alloc_vb(size_t size) __size_overflow(1);
34276 static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
34277 {
34278 struct videobuf_dma_sg_memory *mem;
34279 diff --git a/drivers/media/video/videobuf-vmalloc.c b/drivers/media/video/videobuf-vmalloc.c
34280 index df14258..12cc7a3 100644
34281 --- a/drivers/media/video/videobuf-vmalloc.c
34282 +++ b/drivers/media/video/videobuf-vmalloc.c
34283 @@ -135,6 +135,7 @@ static const struct vm_operations_struct videobuf_vm_ops = {
34284 struct videobuf_dma_sg_memory
34285 */
34286
34287 +static struct videobuf_buffer *__videobuf_alloc_vb(size_t size) __size_overflow(1);
34288 static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
34289 {
34290 struct videobuf_vmalloc_memory *mem;
34291 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
34292 index a7dc467..a55c423 100644
34293 --- a/drivers/message/fusion/mptbase.c
34294 +++ b/drivers/message/fusion/mptbase.c
34295 @@ -6754,8 +6754,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
34296 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
34297 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
34298
34299 +#ifdef CONFIG_GRKERNSEC_HIDESYM
34300 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
34301 +#else
34302 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
34303 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
34304 +#endif
34305 +
34306 /*
34307 * Rounding UP to nearest 4-kB boundary here...
34308 */
34309 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
34310 index 551262e..7551198 100644
34311 --- a/drivers/message/fusion/mptsas.c
34312 +++ b/drivers/message/fusion/mptsas.c
34313 @@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
34314 return 0;
34315 }
34316
34317 +static inline void
34318 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
34319 +{
34320 + if (phy_info->port_details) {
34321 + phy_info->port_details->rphy = rphy;
34322 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
34323 + ioc->name, rphy));
34324 + }
34325 +
34326 + if (rphy) {
34327 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
34328 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
34329 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
34330 + ioc->name, rphy, rphy->dev.release));
34331 + }
34332 +}
34333 +
34334 /* no mutex */
34335 static void
34336 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
34337 @@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
34338 return NULL;
34339 }
34340
34341 -static inline void
34342 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
34343 -{
34344 - if (phy_info->port_details) {
34345 - phy_info->port_details->rphy = rphy;
34346 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
34347 - ioc->name, rphy));
34348 - }
34349 -
34350 - if (rphy) {
34351 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
34352 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
34353 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
34354 - ioc->name, rphy, rphy->dev.release));
34355 - }
34356 -}
34357 -
34358 static inline struct sas_port *
34359 mptsas_get_port(struct mptsas_phyinfo *phy_info)
34360 {
34361 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
34362 index 0c3ced7..1fe34ec 100644
34363 --- a/drivers/message/fusion/mptscsih.c
34364 +++ b/drivers/message/fusion/mptscsih.c
34365 @@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
34366
34367 h = shost_priv(SChost);
34368
34369 - if (h) {
34370 - if (h->info_kbuf == NULL)
34371 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
34372 - return h->info_kbuf;
34373 - h->info_kbuf[0] = '\0';
34374 + if (!h)
34375 + return NULL;
34376
34377 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
34378 - h->info_kbuf[size-1] = '\0';
34379 - }
34380 + if (h->info_kbuf == NULL)
34381 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
34382 + return h->info_kbuf;
34383 + h->info_kbuf[0] = '\0';
34384 +
34385 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
34386 + h->info_kbuf[size-1] = '\0';
34387
34388 return h->info_kbuf;
34389 }
34390 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
34391 index 6d115c7..58ff7fd 100644
34392 --- a/drivers/message/i2o/i2o_proc.c
34393 +++ b/drivers/message/i2o/i2o_proc.c
34394 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
34395 "Array Controller Device"
34396 };
34397
34398 -static char *chtostr(u8 * chars, int n)
34399 -{
34400 - char tmp[256];
34401 - tmp[0] = 0;
34402 - return strncat(tmp, (char *)chars, n);
34403 -}
34404 -
34405 static int i2o_report_query_status(struct seq_file *seq, int block_status,
34406 char *group)
34407 {
34408 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
34409
34410 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
34411 seq_printf(seq, "%-#8x", ddm_table.module_id);
34412 - seq_printf(seq, "%-29s",
34413 - chtostr(ddm_table.module_name_version, 28));
34414 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
34415 seq_printf(seq, "%9d ", ddm_table.data_size);
34416 seq_printf(seq, "%8d", ddm_table.code_size);
34417
34418 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
34419
34420 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
34421 seq_printf(seq, "%-#8x", dst->module_id);
34422 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
34423 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
34424 + seq_printf(seq, "%-.28s", dst->module_name_version);
34425 + seq_printf(seq, "%-.8s", dst->date);
34426 seq_printf(seq, "%8d ", dst->module_size);
34427 seq_printf(seq, "%8d ", dst->mpb_size);
34428 seq_printf(seq, "0x%04x", dst->module_flags);
34429 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
34430 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
34431 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
34432 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
34433 - seq_printf(seq, "Vendor info : %s\n",
34434 - chtostr((u8 *) (work32 + 2), 16));
34435 - seq_printf(seq, "Product info : %s\n",
34436 - chtostr((u8 *) (work32 + 6), 16));
34437 - seq_printf(seq, "Description : %s\n",
34438 - chtostr((u8 *) (work32 + 10), 16));
34439 - seq_printf(seq, "Product rev. : %s\n",
34440 - chtostr((u8 *) (work32 + 14), 8));
34441 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
34442 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
34443 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
34444 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
34445
34446 seq_printf(seq, "Serial number : ");
34447 print_serial_number(seq, (u8 *) (work32 + 16),
34448 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
34449 }
34450
34451 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
34452 - seq_printf(seq, "Module name : %s\n",
34453 - chtostr(result.module_name, 24));
34454 - seq_printf(seq, "Module revision : %s\n",
34455 - chtostr(result.module_rev, 8));
34456 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
34457 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
34458
34459 seq_printf(seq, "Serial number : ");
34460 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
34461 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
34462 return 0;
34463 }
34464
34465 - seq_printf(seq, "Device name : %s\n",
34466 - chtostr(result.device_name, 64));
34467 - seq_printf(seq, "Service name : %s\n",
34468 - chtostr(result.service_name, 64));
34469 - seq_printf(seq, "Physical name : %s\n",
34470 - chtostr(result.physical_location, 64));
34471 - seq_printf(seq, "Instance number : %s\n",
34472 - chtostr(result.instance_number, 4));
34473 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
34474 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
34475 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
34476 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
34477
34478 return 0;
34479 }
34480 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
34481 index a8c08f3..155fe3d 100644
34482 --- a/drivers/message/i2o/iop.c
34483 +++ b/drivers/message/i2o/iop.c
34484 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
34485
34486 spin_lock_irqsave(&c->context_list_lock, flags);
34487
34488 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
34489 - atomic_inc(&c->context_list_counter);
34490 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
34491 + atomic_inc_unchecked(&c->context_list_counter);
34492
34493 - entry->context = atomic_read(&c->context_list_counter);
34494 + entry->context = atomic_read_unchecked(&c->context_list_counter);
34495
34496 list_add(&entry->list, &c->context_list);
34497
34498 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
34499
34500 #if BITS_PER_LONG == 64
34501 spin_lock_init(&c->context_list_lock);
34502 - atomic_set(&c->context_list_counter, 0);
34503 + atomic_set_unchecked(&c->context_list_counter, 0);
34504 INIT_LIST_HEAD(&c->context_list);
34505 #endif
34506
34507 diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
34508 index 7ce65f4..e66e9bc 100644
34509 --- a/drivers/mfd/abx500-core.c
34510 +++ b/drivers/mfd/abx500-core.c
34511 @@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
34512
34513 struct abx500_device_entry {
34514 struct list_head list;
34515 - struct abx500_ops ops;
34516 + abx500_ops_no_const ops;
34517 struct device *dev;
34518 };
34519
34520 diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
34521 index a9223ed..4127b13 100644
34522 --- a/drivers/mfd/janz-cmodio.c
34523 +++ b/drivers/mfd/janz-cmodio.c
34524 @@ -13,6 +13,7 @@
34525
34526 #include <linux/kernel.h>
34527 #include <linux/module.h>
34528 +#include <linux/slab.h>
34529 #include <linux/init.h>
34530 #include <linux/pci.h>
34531 #include <linux/interrupt.h>
34532 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
34533 index a981e2a..5ca0c8b 100644
34534 --- a/drivers/misc/lis3lv02d/lis3lv02d.c
34535 +++ b/drivers/misc/lis3lv02d/lis3lv02d.c
34536 @@ -466,7 +466,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
34537 * the lid is closed. This leads to interrupts as soon as a little move
34538 * is done.
34539 */
34540 - atomic_inc(&lis3->count);
34541 + atomic_inc_unchecked(&lis3->count);
34542
34543 wake_up_interruptible(&lis3->misc_wait);
34544 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
34545 @@ -552,7 +552,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
34546 if (lis3->pm_dev)
34547 pm_runtime_get_sync(lis3->pm_dev);
34548
34549 - atomic_set(&lis3->count, 0);
34550 + atomic_set_unchecked(&lis3->count, 0);
34551 return 0;
34552 }
34553
34554 @@ -585,7 +585,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
34555 add_wait_queue(&lis3->misc_wait, &wait);
34556 while (true) {
34557 set_current_state(TASK_INTERRUPTIBLE);
34558 - data = atomic_xchg(&lis3->count, 0);
34559 + data = atomic_xchg_unchecked(&lis3->count, 0);
34560 if (data)
34561 break;
34562
34563 @@ -626,7 +626,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
34564 struct lis3lv02d, miscdev);
34565
34566 poll_wait(file, &lis3->misc_wait, wait);
34567 - if (atomic_read(&lis3->count))
34568 + if (atomic_read_unchecked(&lis3->count))
34569 return POLLIN | POLLRDNORM;
34570 return 0;
34571 }
34572 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
34573 index 2b1482a..5d33616 100644
34574 --- a/drivers/misc/lis3lv02d/lis3lv02d.h
34575 +++ b/drivers/misc/lis3lv02d/lis3lv02d.h
34576 @@ -266,7 +266,7 @@ struct lis3lv02d {
34577 struct input_polled_dev *idev; /* input device */
34578 struct platform_device *pdev; /* platform device */
34579 struct regulator_bulk_data regulators[2];
34580 - atomic_t count; /* interrupt count after last read */
34581 + atomic_unchecked_t count; /* interrupt count after last read */
34582 union axis_conversion ac; /* hw -> logical axis */
34583 int mapped_btns[3];
34584
34585 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
34586 index 2f30bad..c4c13d0 100644
34587 --- a/drivers/misc/sgi-gru/gruhandles.c
34588 +++ b/drivers/misc/sgi-gru/gruhandles.c
34589 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
34590 unsigned long nsec;
34591
34592 nsec = CLKS2NSEC(clks);
34593 - atomic_long_inc(&mcs_op_statistics[op].count);
34594 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
34595 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
34596 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
34597 if (mcs_op_statistics[op].max < nsec)
34598 mcs_op_statistics[op].max = nsec;
34599 }
34600 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
34601 index 950dbe9..eeef0f8 100644
34602 --- a/drivers/misc/sgi-gru/gruprocfs.c
34603 +++ b/drivers/misc/sgi-gru/gruprocfs.c
34604 @@ -32,9 +32,9 @@
34605
34606 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
34607
34608 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
34609 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
34610 {
34611 - unsigned long val = atomic_long_read(v);
34612 + unsigned long val = atomic_long_read_unchecked(v);
34613
34614 seq_printf(s, "%16lu %s\n", val, id);
34615 }
34616 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
34617
34618 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
34619 for (op = 0; op < mcsop_last; op++) {
34620 - count = atomic_long_read(&mcs_op_statistics[op].count);
34621 - total = atomic_long_read(&mcs_op_statistics[op].total);
34622 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
34623 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
34624 max = mcs_op_statistics[op].max;
34625 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
34626 count ? total / count : 0, max);
34627 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
34628 index 5c3ce24..4915ccb 100644
34629 --- a/drivers/misc/sgi-gru/grutables.h
34630 +++ b/drivers/misc/sgi-gru/grutables.h
34631 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
34632 * GRU statistics.
34633 */
34634 struct gru_stats_s {
34635 - atomic_long_t vdata_alloc;
34636 - atomic_long_t vdata_free;
34637 - atomic_long_t gts_alloc;
34638 - atomic_long_t gts_free;
34639 - atomic_long_t gms_alloc;
34640 - atomic_long_t gms_free;
34641 - atomic_long_t gts_double_allocate;
34642 - atomic_long_t assign_context;
34643 - atomic_long_t assign_context_failed;
34644 - atomic_long_t free_context;
34645 - atomic_long_t load_user_context;
34646 - atomic_long_t load_kernel_context;
34647 - atomic_long_t lock_kernel_context;
34648 - atomic_long_t unlock_kernel_context;
34649 - atomic_long_t steal_user_context;
34650 - atomic_long_t steal_kernel_context;
34651 - atomic_long_t steal_context_failed;
34652 - atomic_long_t nopfn;
34653 - atomic_long_t asid_new;
34654 - atomic_long_t asid_next;
34655 - atomic_long_t asid_wrap;
34656 - atomic_long_t asid_reuse;
34657 - atomic_long_t intr;
34658 - atomic_long_t intr_cbr;
34659 - atomic_long_t intr_tfh;
34660 - atomic_long_t intr_spurious;
34661 - atomic_long_t intr_mm_lock_failed;
34662 - atomic_long_t call_os;
34663 - atomic_long_t call_os_wait_queue;
34664 - atomic_long_t user_flush_tlb;
34665 - atomic_long_t user_unload_context;
34666 - atomic_long_t user_exception;
34667 - atomic_long_t set_context_option;
34668 - atomic_long_t check_context_retarget_intr;
34669 - atomic_long_t check_context_unload;
34670 - atomic_long_t tlb_dropin;
34671 - atomic_long_t tlb_preload_page;
34672 - atomic_long_t tlb_dropin_fail_no_asid;
34673 - atomic_long_t tlb_dropin_fail_upm;
34674 - atomic_long_t tlb_dropin_fail_invalid;
34675 - atomic_long_t tlb_dropin_fail_range_active;
34676 - atomic_long_t tlb_dropin_fail_idle;
34677 - atomic_long_t tlb_dropin_fail_fmm;
34678 - atomic_long_t tlb_dropin_fail_no_exception;
34679 - atomic_long_t tfh_stale_on_fault;
34680 - atomic_long_t mmu_invalidate_range;
34681 - atomic_long_t mmu_invalidate_page;
34682 - atomic_long_t flush_tlb;
34683 - atomic_long_t flush_tlb_gru;
34684 - atomic_long_t flush_tlb_gru_tgh;
34685 - atomic_long_t flush_tlb_gru_zero_asid;
34686 + atomic_long_unchecked_t vdata_alloc;
34687 + atomic_long_unchecked_t vdata_free;
34688 + atomic_long_unchecked_t gts_alloc;
34689 + atomic_long_unchecked_t gts_free;
34690 + atomic_long_unchecked_t gms_alloc;
34691 + atomic_long_unchecked_t gms_free;
34692 + atomic_long_unchecked_t gts_double_allocate;
34693 + atomic_long_unchecked_t assign_context;
34694 + atomic_long_unchecked_t assign_context_failed;
34695 + atomic_long_unchecked_t free_context;
34696 + atomic_long_unchecked_t load_user_context;
34697 + atomic_long_unchecked_t load_kernel_context;
34698 + atomic_long_unchecked_t lock_kernel_context;
34699 + atomic_long_unchecked_t unlock_kernel_context;
34700 + atomic_long_unchecked_t steal_user_context;
34701 + atomic_long_unchecked_t steal_kernel_context;
34702 + atomic_long_unchecked_t steal_context_failed;
34703 + atomic_long_unchecked_t nopfn;
34704 + atomic_long_unchecked_t asid_new;
34705 + atomic_long_unchecked_t asid_next;
34706 + atomic_long_unchecked_t asid_wrap;
34707 + atomic_long_unchecked_t asid_reuse;
34708 + atomic_long_unchecked_t intr;
34709 + atomic_long_unchecked_t intr_cbr;
34710 + atomic_long_unchecked_t intr_tfh;
34711 + atomic_long_unchecked_t intr_spurious;
34712 + atomic_long_unchecked_t intr_mm_lock_failed;
34713 + atomic_long_unchecked_t call_os;
34714 + atomic_long_unchecked_t call_os_wait_queue;
34715 + atomic_long_unchecked_t user_flush_tlb;
34716 + atomic_long_unchecked_t user_unload_context;
34717 + atomic_long_unchecked_t user_exception;
34718 + atomic_long_unchecked_t set_context_option;
34719 + atomic_long_unchecked_t check_context_retarget_intr;
34720 + atomic_long_unchecked_t check_context_unload;
34721 + atomic_long_unchecked_t tlb_dropin;
34722 + atomic_long_unchecked_t tlb_preload_page;
34723 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
34724 + atomic_long_unchecked_t tlb_dropin_fail_upm;
34725 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
34726 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
34727 + atomic_long_unchecked_t tlb_dropin_fail_idle;
34728 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
34729 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
34730 + atomic_long_unchecked_t tfh_stale_on_fault;
34731 + atomic_long_unchecked_t mmu_invalidate_range;
34732 + atomic_long_unchecked_t mmu_invalidate_page;
34733 + atomic_long_unchecked_t flush_tlb;
34734 + atomic_long_unchecked_t flush_tlb_gru;
34735 + atomic_long_unchecked_t flush_tlb_gru_tgh;
34736 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
34737
34738 - atomic_long_t copy_gpa;
34739 - atomic_long_t read_gpa;
34740 + atomic_long_unchecked_t copy_gpa;
34741 + atomic_long_unchecked_t read_gpa;
34742
34743 - atomic_long_t mesq_receive;
34744 - atomic_long_t mesq_receive_none;
34745 - atomic_long_t mesq_send;
34746 - atomic_long_t mesq_send_failed;
34747 - atomic_long_t mesq_noop;
34748 - atomic_long_t mesq_send_unexpected_error;
34749 - atomic_long_t mesq_send_lb_overflow;
34750 - atomic_long_t mesq_send_qlimit_reached;
34751 - atomic_long_t mesq_send_amo_nacked;
34752 - atomic_long_t mesq_send_put_nacked;
34753 - atomic_long_t mesq_page_overflow;
34754 - atomic_long_t mesq_qf_locked;
34755 - atomic_long_t mesq_qf_noop_not_full;
34756 - atomic_long_t mesq_qf_switch_head_failed;
34757 - atomic_long_t mesq_qf_unexpected_error;
34758 - atomic_long_t mesq_noop_unexpected_error;
34759 - atomic_long_t mesq_noop_lb_overflow;
34760 - atomic_long_t mesq_noop_qlimit_reached;
34761 - atomic_long_t mesq_noop_amo_nacked;
34762 - atomic_long_t mesq_noop_put_nacked;
34763 - atomic_long_t mesq_noop_page_overflow;
34764 + atomic_long_unchecked_t mesq_receive;
34765 + atomic_long_unchecked_t mesq_receive_none;
34766 + atomic_long_unchecked_t mesq_send;
34767 + atomic_long_unchecked_t mesq_send_failed;
34768 + atomic_long_unchecked_t mesq_noop;
34769 + atomic_long_unchecked_t mesq_send_unexpected_error;
34770 + atomic_long_unchecked_t mesq_send_lb_overflow;
34771 + atomic_long_unchecked_t mesq_send_qlimit_reached;
34772 + atomic_long_unchecked_t mesq_send_amo_nacked;
34773 + atomic_long_unchecked_t mesq_send_put_nacked;
34774 + atomic_long_unchecked_t mesq_page_overflow;
34775 + atomic_long_unchecked_t mesq_qf_locked;
34776 + atomic_long_unchecked_t mesq_qf_noop_not_full;
34777 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
34778 + atomic_long_unchecked_t mesq_qf_unexpected_error;
34779 + atomic_long_unchecked_t mesq_noop_unexpected_error;
34780 + atomic_long_unchecked_t mesq_noop_lb_overflow;
34781 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
34782 + atomic_long_unchecked_t mesq_noop_amo_nacked;
34783 + atomic_long_unchecked_t mesq_noop_put_nacked;
34784 + atomic_long_unchecked_t mesq_noop_page_overflow;
34785
34786 };
34787
34788 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
34789 tghop_invalidate, mcsop_last};
34790
34791 struct mcs_op_statistic {
34792 - atomic_long_t count;
34793 - atomic_long_t total;
34794 + atomic_long_unchecked_t count;
34795 + atomic_long_unchecked_t total;
34796 unsigned long max;
34797 };
34798
34799 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
34800
34801 #define STAT(id) do { \
34802 if (gru_options & OPT_STATS) \
34803 - atomic_long_inc(&gru_stats.id); \
34804 + atomic_long_inc_unchecked(&gru_stats.id); \
34805 } while (0)
34806
34807 #ifdef CONFIG_SGI_GRU_DEBUG
34808 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
34809 index 851b2f2..a4ec097 100644
34810 --- a/drivers/misc/sgi-xp/xp.h
34811 +++ b/drivers/misc/sgi-xp/xp.h
34812 @@ -289,7 +289,7 @@ struct xpc_interface {
34813 xpc_notify_func, void *);
34814 void (*received) (short, int, void *);
34815 enum xp_retval (*partid_to_nasids) (short, void *);
34816 -};
34817 +} __no_const;
34818
34819 extern struct xpc_interface xpc_interface;
34820
34821 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
34822 index b94d5f7..7f494c5 100644
34823 --- a/drivers/misc/sgi-xp/xpc.h
34824 +++ b/drivers/misc/sgi-xp/xpc.h
34825 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
34826 void (*received_payload) (struct xpc_channel *, void *);
34827 void (*notify_senders_of_disconnect) (struct xpc_channel *);
34828 };
34829 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
34830
34831 /* struct xpc_partition act_state values (for XPC HB) */
34832
34833 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
34834 /* found in xpc_main.c */
34835 extern struct device *xpc_part;
34836 extern struct device *xpc_chan;
34837 -extern struct xpc_arch_operations xpc_arch_ops;
34838 +extern xpc_arch_operations_no_const xpc_arch_ops;
34839 extern int xpc_disengage_timelimit;
34840 extern int xpc_disengage_timedout;
34841 extern int xpc_activate_IRQ_rcvd;
34842 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
34843 index 8d082b4..aa749ae 100644
34844 --- a/drivers/misc/sgi-xp/xpc_main.c
34845 +++ b/drivers/misc/sgi-xp/xpc_main.c
34846 @@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
34847 .notifier_call = xpc_system_die,
34848 };
34849
34850 -struct xpc_arch_operations xpc_arch_ops;
34851 +xpc_arch_operations_no_const xpc_arch_ops;
34852
34853 /*
34854 * Timer function to enforce the timelimit on the partition disengage.
34855 diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
34856 index 6ebdc40..9edf5d8 100644
34857 --- a/drivers/mmc/host/sdhci-pci.c
34858 +++ b/drivers/mmc/host/sdhci-pci.c
34859 @@ -631,7 +631,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
34860 .probe = via_probe,
34861 };
34862
34863 -static const struct pci_device_id pci_ids[] __devinitdata = {
34864 +static const struct pci_device_id pci_ids[] __devinitconst = {
34865 {
34866 .vendor = PCI_VENDOR_ID_RICOH,
34867 .device = PCI_DEVICE_ID_RICOH_R5C822,
34868 diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
34869 index 87a431c..4959b43 100644
34870 --- a/drivers/mtd/devices/doc2000.c
34871 +++ b/drivers/mtd/devices/doc2000.c
34872 @@ -764,7 +764,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
34873
34874 /* The ECC will not be calculated correctly if less than 512 is written */
34875 /* DBB-
34876 - if (len != 0x200 && eccbuf)
34877 + if (len != 0x200)
34878 printk(KERN_WARNING
34879 "ECC needs a full sector write (adr: %lx size %lx)\n",
34880 (long) to, (long) len);
34881 diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
34882 index 9eacf67..4534b5b 100644
34883 --- a/drivers/mtd/devices/doc2001.c
34884 +++ b/drivers/mtd/devices/doc2001.c
34885 @@ -384,7 +384,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
34886 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
34887
34888 /* Don't allow read past end of device */
34889 - if (from >= this->totlen)
34890 + if (from >= this->totlen || !len)
34891 return -EINVAL;
34892
34893 /* Don't allow a single read to cross a 512-byte block boundary */
34894 diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
34895 index 3984d48..28aa897 100644
34896 --- a/drivers/mtd/nand/denali.c
34897 +++ b/drivers/mtd/nand/denali.c
34898 @@ -26,6 +26,7 @@
34899 #include <linux/pci.h>
34900 #include <linux/mtd/mtd.h>
34901 #include <linux/module.h>
34902 +#include <linux/slab.h>
34903
34904 #include "denali.h"
34905
34906 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
34907 index 51b9d6a..52af9a7 100644
34908 --- a/drivers/mtd/nftlmount.c
34909 +++ b/drivers/mtd/nftlmount.c
34910 @@ -24,6 +24,7 @@
34911 #include <asm/errno.h>
34912 #include <linux/delay.h>
34913 #include <linux/slab.h>
34914 +#include <linux/sched.h>
34915 #include <linux/mtd/mtd.h>
34916 #include <linux/mtd/nand.h>
34917 #include <linux/mtd/nftl.h>
34918 diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
34919 index e2cdebf..d48183a 100644
34920 --- a/drivers/mtd/ubi/debug.c
34921 +++ b/drivers/mtd/ubi/debug.c
34922 @@ -338,6 +338,8 @@ out:
34923
34924 /* Write an UBI debugfs file */
34925 static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
34926 + size_t count, loff_t *ppos) __size_overflow(3);
34927 +static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
34928 size_t count, loff_t *ppos)
34929 {
34930 unsigned long ubi_num = (unsigned long)file->private_data;
34931 diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
34932 index 071f4c8..440862e 100644
34933 --- a/drivers/net/ethernet/atheros/atlx/atl2.c
34934 +++ b/drivers/net/ethernet/atheros/atlx/atl2.c
34935 @@ -2862,7 +2862,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
34936 */
34937
34938 #define ATL2_PARAM(X, desc) \
34939 - static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
34940 + static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
34941 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
34942 MODULE_PARM_DESC(X, desc);
34943 #else
34944 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34945 index 66da39f..5dc436d 100644
34946 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34947 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34948 @@ -473,7 +473,7 @@ struct bnx2x_rx_mode_obj {
34949
34950 int (*wait_comp)(struct bnx2x *bp,
34951 struct bnx2x_rx_mode_ramrod_params *p);
34952 -};
34953 +} __no_const;
34954
34955 /********************** Set multicast group ***********************************/
34956
34957 diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
34958 index aea8f72..fcebf75 100644
34959 --- a/drivers/net/ethernet/broadcom/tg3.h
34960 +++ b/drivers/net/ethernet/broadcom/tg3.h
34961 @@ -140,6 +140,7 @@
34962 #define CHIPREV_ID_5750_A0 0x4000
34963 #define CHIPREV_ID_5750_A1 0x4001
34964 #define CHIPREV_ID_5750_A3 0x4003
34965 +#define CHIPREV_ID_5750_C1 0x4201
34966 #define CHIPREV_ID_5750_C2 0x4202
34967 #define CHIPREV_ID_5752_A0_HW 0x5000
34968 #define CHIPREV_ID_5752_A0 0x6000
34969 diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
34970 index 47a8435..248e4b3 100644
34971 --- a/drivers/net/ethernet/chelsio/cxgb/sge.c
34972 +++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
34973 @@ -1052,6 +1052,8 @@ MODULE_PARM_DESC(copybreak, "Receive copy threshold");
34974 * be copied but there is no memory for the copy.
34975 */
34976 static inline struct sk_buff *get_packet(struct pci_dev *pdev,
34977 + struct freelQ *fl, unsigned int len) __size_overflow(3);
34978 +static inline struct sk_buff *get_packet(struct pci_dev *pdev,
34979 struct freelQ *fl, unsigned int len)
34980 {
34981 struct sk_buff *skb;
34982 diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34983 index c4e8643..0979484 100644
34984 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34985 +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34986 @@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
34987 */
34988 struct l2t_skb_cb {
34989 arp_failure_handler_func arp_failure_handler;
34990 -};
34991 +} __no_const;
34992
34993 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
34994
34995 diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
34996 index cfb60e1..94af340 100644
34997 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
34998 +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
34999 @@ -611,6 +611,8 @@ static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
35000 * of the SW ring.
35001 */
35002 static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
35003 + size_t sw_size, dma_addr_t * phys, void *metadata) __size_overflow(2,4);
35004 +static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
35005 size_t sw_size, dma_addr_t * phys, void *metadata)
35006 {
35007 size_t len = nelem * elem_size;
35008 @@ -777,6 +779,8 @@ static inline unsigned int flits_to_desc(unsigned int n)
35009 * be copied but there is no memory for the copy.
35010 */
35011 static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
35012 + unsigned int len, unsigned int drop_thres) __size_overflow(3);
35013 +static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
35014 unsigned int len, unsigned int drop_thres)
35015 {
35016 struct sk_buff *skb = NULL;
35017 diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
35018 index 2dae795..73037d2 100644
35019 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
35020 +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
35021 @@ -593,6 +593,9 @@ static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
35022 */
35023 static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
35024 size_t sw_size, dma_addr_t *phys, void *metadata,
35025 + size_t stat_size, int node) __size_overflow(2,4);
35026 +static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
35027 + size_t sw_size, dma_addr_t *phys, void *metadata,
35028 size_t stat_size, int node)
35029 {
35030 size_t len = nelem * elem_size + stat_size;
35031 diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
35032 index 0bd585b..d954ca5 100644
35033 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
35034 +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
35035 @@ -729,6 +729,9 @@ static inline void __refill_fl(struct adapter *adapter, struct sge_fl *fl)
35036 */
35037 static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
35038 size_t swsize, dma_addr_t *busaddrp, void *swringp,
35039 + size_t stat_size) __size_overflow(2,4);
35040 +static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
35041 + size_t swsize, dma_addr_t *busaddrp, void *swringp,
35042 size_t stat_size)
35043 {
35044 /*
35045 diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
35046 index 4d71f5a..8004440 100644
35047 --- a/drivers/net/ethernet/dec/tulip/de4x5.c
35048 +++ b/drivers/net/ethernet/dec/tulip/de4x5.c
35049 @@ -5392,7 +5392,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
35050 for (i=0; i<ETH_ALEN; i++) {
35051 tmp.addr[i] = dev->dev_addr[i];
35052 }
35053 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
35054 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
35055 break;
35056
35057 case DE4X5_SET_HWADDR: /* Set the hardware address */
35058 @@ -5432,7 +5432,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
35059 spin_lock_irqsave(&lp->lock, flags);
35060 memcpy(&statbuf, &lp->pktStats, ioc->len);
35061 spin_unlock_irqrestore(&lp->lock, flags);
35062 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
35063 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
35064 return -EFAULT;
35065 break;
35066 }
35067 diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
35068 index 14d5b61..1398636 100644
35069 --- a/drivers/net/ethernet/dec/tulip/eeprom.c
35070 +++ b/drivers/net/ethernet/dec/tulip/eeprom.c
35071 @@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
35072 {NULL}};
35073
35074
35075 -static const char *block_name[] __devinitdata = {
35076 +static const char *block_name[] __devinitconst = {
35077 "21140 non-MII",
35078 "21140 MII PHY",
35079 "21142 Serial PHY",
35080 diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
35081 index 52da7b2..4ddfe1c 100644
35082 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c
35083 +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
35084 @@ -236,7 +236,7 @@ struct pci_id_info {
35085 int drv_flags; /* Driver use, intended as capability flags. */
35086 };
35087
35088 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
35089 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
35090 { /* Sometime a Level-One switch card. */
35091 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
35092 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
35093 diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
35094 index b2dc2c8..2e09edb 100644
35095 --- a/drivers/net/ethernet/dlink/dl2k.c
35096 +++ b/drivers/net/ethernet/dlink/dl2k.c
35097 @@ -1259,55 +1259,21 @@ rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
35098 {
35099 int phy_addr;
35100 struct netdev_private *np = netdev_priv(dev);
35101 - struct mii_data *miidata = (struct mii_data *) &rq->ifr_ifru;
35102 -
35103 - struct netdev_desc *desc;
35104 - int i;
35105 + struct mii_ioctl_data *miidata = if_mii(rq);
35106
35107 phy_addr = np->phy_addr;
35108 switch (cmd) {
35109 - case SIOCDEVPRIVATE:
35110 + case SIOCGMIIPHY:
35111 + miidata->phy_id = phy_addr;
35112 break;
35113 -
35114 - case SIOCDEVPRIVATE + 1:
35115 - miidata->out_value = mii_read (dev, phy_addr, miidata->reg_num);
35116 + case SIOCGMIIREG:
35117 + miidata->val_out = mii_read (dev, phy_addr, miidata->reg_num);
35118 break;
35119 - case SIOCDEVPRIVATE + 2:
35120 - mii_write (dev, phy_addr, miidata->reg_num, miidata->in_value);
35121 + case SIOCSMIIREG:
35122 + if (!capable(CAP_NET_ADMIN))
35123 + return -EPERM;
35124 + mii_write (dev, phy_addr, miidata->reg_num, miidata->val_in);
35125 break;
35126 - case SIOCDEVPRIVATE + 3:
35127 - break;
35128 - case SIOCDEVPRIVATE + 4:
35129 - break;
35130 - case SIOCDEVPRIVATE + 5:
35131 - netif_stop_queue (dev);
35132 - break;
35133 - case SIOCDEVPRIVATE + 6:
35134 - netif_wake_queue (dev);
35135 - break;
35136 - case SIOCDEVPRIVATE + 7:
35137 - printk
35138 - ("tx_full=%x cur_tx=%lx old_tx=%lx cur_rx=%lx old_rx=%lx\n",
35139 - netif_queue_stopped(dev), np->cur_tx, np->old_tx, np->cur_rx,
35140 - np->old_rx);
35141 - break;
35142 - case SIOCDEVPRIVATE + 8:
35143 - printk("TX ring:\n");
35144 - for (i = 0; i < TX_RING_SIZE; i++) {
35145 - desc = &np->tx_ring[i];
35146 - printk
35147 - ("%02x:cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x",
35148 - i,
35149 - (u32) (np->tx_ring_dma + i * sizeof (*desc)),
35150 - (u32)le64_to_cpu(desc->next_desc),
35151 - (u32)le64_to_cpu(desc->status),
35152 - (u32)(le64_to_cpu(desc->fraginfo) >> 32),
35153 - (u32)le64_to_cpu(desc->fraginfo));
35154 - printk ("\n");
35155 - }
35156 - printk ("\n");
35157 - break;
35158 -
35159 default:
35160 return -EOPNOTSUPP;
35161 }
35162 diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
35163 index ba0adca..30c2da3 100644
35164 --- a/drivers/net/ethernet/dlink/dl2k.h
35165 +++ b/drivers/net/ethernet/dlink/dl2k.h
35166 @@ -365,13 +365,6 @@ struct ioctl_data {
35167 char *data;
35168 };
35169
35170 -struct mii_data {
35171 - __u16 reserved;
35172 - __u16 reg_num;
35173 - __u16 in_value;
35174 - __u16 out_value;
35175 -};
35176 -
35177 /* The Rx and Tx buffer descriptors. */
35178 struct netdev_desc {
35179 __le64 next_desc;
35180 diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
35181 index 28a3a9b..d96cb63 100644
35182 --- a/drivers/net/ethernet/dlink/sundance.c
35183 +++ b/drivers/net/ethernet/dlink/sundance.c
35184 @@ -218,7 +218,7 @@ enum {
35185 struct pci_id_info {
35186 const char *name;
35187 };
35188 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
35189 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
35190 {"D-Link DFE-550TX FAST Ethernet Adapter"},
35191 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
35192 {"D-Link DFE-580TX 4 port Server Adapter"},
35193 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
35194 index e703d64..d62ecf9 100644
35195 --- a/drivers/net/ethernet/emulex/benet/be_main.c
35196 +++ b/drivers/net/ethernet/emulex/benet/be_main.c
35197 @@ -402,7 +402,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
35198
35199 if (wrapped)
35200 newacc += 65536;
35201 - ACCESS_ONCE(*acc) = newacc;
35202 + ACCESS_ONCE_RW(*acc) = newacc;
35203 }
35204
35205 void be_parse_stats(struct be_adapter *adapter)
35206 diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
35207 index 47f85c3..82ab6c4 100644
35208 --- a/drivers/net/ethernet/faraday/ftgmac100.c
35209 +++ b/drivers/net/ethernet/faraday/ftgmac100.c
35210 @@ -31,6 +31,8 @@
35211 #include <linux/netdevice.h>
35212 #include <linux/phy.h>
35213 #include <linux/platform_device.h>
35214 +#include <linux/interrupt.h>
35215 +#include <linux/irqreturn.h>
35216 #include <net/ip.h>
35217
35218 #include "ftgmac100.h"
35219 diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
35220 index bb336a0..4b472da 100644
35221 --- a/drivers/net/ethernet/faraday/ftmac100.c
35222 +++ b/drivers/net/ethernet/faraday/ftmac100.c
35223 @@ -31,6 +31,8 @@
35224 #include <linux/module.h>
35225 #include <linux/netdevice.h>
35226 #include <linux/platform_device.h>
35227 +#include <linux/interrupt.h>
35228 +#include <linux/irqreturn.h>
35229
35230 #include "ftmac100.h"
35231
35232 diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
35233 index c82d444..0007fb4 100644
35234 --- a/drivers/net/ethernet/fealnx.c
35235 +++ b/drivers/net/ethernet/fealnx.c
35236 @@ -150,7 +150,7 @@ struct chip_info {
35237 int flags;
35238 };
35239
35240 -static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
35241 +static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
35242 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
35243 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
35244 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
35245 diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
35246 index e1159e5..e18684d 100644
35247 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
35248 +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
35249 @@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
35250 {
35251 struct e1000_hw *hw = &adapter->hw;
35252 struct e1000_mac_info *mac = &hw->mac;
35253 - struct e1000_mac_operations *func = &mac->ops;
35254 + e1000_mac_operations_no_const *func = &mac->ops;
35255
35256 /* Set media type */
35257 switch (adapter->pdev->device) {
35258 diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
35259 index a3e65fd..f451444 100644
35260 --- a/drivers/net/ethernet/intel/e1000e/82571.c
35261 +++ b/drivers/net/ethernet/intel/e1000e/82571.c
35262 @@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
35263 {
35264 struct e1000_hw *hw = &adapter->hw;
35265 struct e1000_mac_info *mac = &hw->mac;
35266 - struct e1000_mac_operations *func = &mac->ops;
35267 + e1000_mac_operations_no_const *func = &mac->ops;
35268 u32 swsm = 0;
35269 u32 swsm2 = 0;
35270 bool force_clear_smbi = false;
35271 diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
35272 index 2967039..ca8c40c 100644
35273 --- a/drivers/net/ethernet/intel/e1000e/hw.h
35274 +++ b/drivers/net/ethernet/intel/e1000e/hw.h
35275 @@ -778,6 +778,7 @@ struct e1000_mac_operations {
35276 void (*write_vfta)(struct e1000_hw *, u32, u32);
35277 s32 (*read_mac_addr)(struct e1000_hw *);
35278 };
35279 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
35280
35281 /*
35282 * When to use various PHY register access functions:
35283 @@ -818,6 +819,7 @@ struct e1000_phy_operations {
35284 void (*power_up)(struct e1000_hw *);
35285 void (*power_down)(struct e1000_hw *);
35286 };
35287 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
35288
35289 /* Function pointers for the NVM. */
35290 struct e1000_nvm_operations {
35291 @@ -829,9 +831,10 @@ struct e1000_nvm_operations {
35292 s32 (*validate)(struct e1000_hw *);
35293 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
35294 };
35295 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
35296
35297 struct e1000_mac_info {
35298 - struct e1000_mac_operations ops;
35299 + e1000_mac_operations_no_const ops;
35300 u8 addr[ETH_ALEN];
35301 u8 perm_addr[ETH_ALEN];
35302
35303 @@ -872,7 +875,7 @@ struct e1000_mac_info {
35304 };
35305
35306 struct e1000_phy_info {
35307 - struct e1000_phy_operations ops;
35308 + e1000_phy_operations_no_const ops;
35309
35310 enum e1000_phy_type type;
35311
35312 @@ -906,7 +909,7 @@ struct e1000_phy_info {
35313 };
35314
35315 struct e1000_nvm_info {
35316 - struct e1000_nvm_operations ops;
35317 + e1000_nvm_operations_no_const ops;
35318
35319 enum e1000_nvm_type type;
35320 enum e1000_nvm_override override;
35321 diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
35322 index f67cbd3..cef9e3d 100644
35323 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h
35324 +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
35325 @@ -314,6 +314,7 @@ struct e1000_mac_operations {
35326 s32 (*read_mac_addr)(struct e1000_hw *);
35327 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
35328 };
35329 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
35330
35331 struct e1000_phy_operations {
35332 s32 (*acquire)(struct e1000_hw *);
35333 @@ -330,6 +331,7 @@ struct e1000_phy_operations {
35334 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
35335 s32 (*write_reg)(struct e1000_hw *, u32, u16);
35336 };
35337 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
35338
35339 struct e1000_nvm_operations {
35340 s32 (*acquire)(struct e1000_hw *);
35341 @@ -339,6 +341,7 @@ struct e1000_nvm_operations {
35342 s32 (*update)(struct e1000_hw *);
35343 s32 (*validate)(struct e1000_hw *);
35344 };
35345 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
35346
35347 struct e1000_info {
35348 s32 (*get_invariants)(struct e1000_hw *);
35349 @@ -350,7 +353,7 @@ struct e1000_info {
35350 extern const struct e1000_info e1000_82575_info;
35351
35352 struct e1000_mac_info {
35353 - struct e1000_mac_operations ops;
35354 + e1000_mac_operations_no_const ops;
35355
35356 u8 addr[6];
35357 u8 perm_addr[6];
35358 @@ -388,7 +391,7 @@ struct e1000_mac_info {
35359 };
35360
35361 struct e1000_phy_info {
35362 - struct e1000_phy_operations ops;
35363 + e1000_phy_operations_no_const ops;
35364
35365 enum e1000_phy_type type;
35366
35367 @@ -423,7 +426,7 @@ struct e1000_phy_info {
35368 };
35369
35370 struct e1000_nvm_info {
35371 - struct e1000_nvm_operations ops;
35372 + e1000_nvm_operations_no_const ops;
35373 enum e1000_nvm_type type;
35374 enum e1000_nvm_override override;
35375
35376 @@ -468,6 +471,7 @@ struct e1000_mbx_operations {
35377 s32 (*check_for_ack)(struct e1000_hw *, u16);
35378 s32 (*check_for_rst)(struct e1000_hw *, u16);
35379 };
35380 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
35381
35382 struct e1000_mbx_stats {
35383 u32 msgs_tx;
35384 @@ -479,7 +483,7 @@ struct e1000_mbx_stats {
35385 };
35386
35387 struct e1000_mbx_info {
35388 - struct e1000_mbx_operations ops;
35389 + e1000_mbx_operations_no_const ops;
35390 struct e1000_mbx_stats stats;
35391 u32 timeout;
35392 u32 usec_delay;
35393 diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
35394 index 57db3c6..aa825fc 100644
35395 --- a/drivers/net/ethernet/intel/igbvf/vf.h
35396 +++ b/drivers/net/ethernet/intel/igbvf/vf.h
35397 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
35398 s32 (*read_mac_addr)(struct e1000_hw *);
35399 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
35400 };
35401 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
35402
35403 struct e1000_mac_info {
35404 - struct e1000_mac_operations ops;
35405 + e1000_mac_operations_no_const ops;
35406 u8 addr[6];
35407 u8 perm_addr[6];
35408
35409 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
35410 s32 (*check_for_ack)(struct e1000_hw *);
35411 s32 (*check_for_rst)(struct e1000_hw *);
35412 };
35413 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
35414
35415 struct e1000_mbx_stats {
35416 u32 msgs_tx;
35417 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
35418 };
35419
35420 struct e1000_mbx_info {
35421 - struct e1000_mbx_operations ops;
35422 + e1000_mbx_operations_no_const ops;
35423 struct e1000_mbx_stats stats;
35424 u32 timeout;
35425 u32 usec_delay;
35426 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
35427 index 9b95bef..7e254ee 100644
35428 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
35429 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
35430 @@ -2708,6 +2708,7 @@ struct ixgbe_eeprom_operations {
35431 s32 (*update_checksum)(struct ixgbe_hw *);
35432 u16 (*calc_checksum)(struct ixgbe_hw *);
35433 };
35434 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
35435
35436 struct ixgbe_mac_operations {
35437 s32 (*init_hw)(struct ixgbe_hw *);
35438 @@ -2769,6 +2770,7 @@ struct ixgbe_mac_operations {
35439 /* Manageability interface */
35440 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
35441 };
35442 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
35443
35444 struct ixgbe_phy_operations {
35445 s32 (*identify)(struct ixgbe_hw *);
35446 @@ -2788,9 +2790,10 @@ struct ixgbe_phy_operations {
35447 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
35448 s32 (*check_overtemp)(struct ixgbe_hw *);
35449 };
35450 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
35451
35452 struct ixgbe_eeprom_info {
35453 - struct ixgbe_eeprom_operations ops;
35454 + ixgbe_eeprom_operations_no_const ops;
35455 enum ixgbe_eeprom_type type;
35456 u32 semaphore_delay;
35457 u16 word_size;
35458 @@ -2800,7 +2803,7 @@ struct ixgbe_eeprom_info {
35459
35460 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
35461 struct ixgbe_mac_info {
35462 - struct ixgbe_mac_operations ops;
35463 + ixgbe_mac_operations_no_const ops;
35464 enum ixgbe_mac_type type;
35465 u8 addr[ETH_ALEN];
35466 u8 perm_addr[ETH_ALEN];
35467 @@ -2828,7 +2831,7 @@ struct ixgbe_mac_info {
35468 };
35469
35470 struct ixgbe_phy_info {
35471 - struct ixgbe_phy_operations ops;
35472 + ixgbe_phy_operations_no_const ops;
35473 struct mdio_if_info mdio;
35474 enum ixgbe_phy_type type;
35475 u32 id;
35476 @@ -2856,6 +2859,7 @@ struct ixgbe_mbx_operations {
35477 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
35478 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
35479 };
35480 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
35481
35482 struct ixgbe_mbx_stats {
35483 u32 msgs_tx;
35484 @@ -2867,7 +2871,7 @@ struct ixgbe_mbx_stats {
35485 };
35486
35487 struct ixgbe_mbx_info {
35488 - struct ixgbe_mbx_operations ops;
35489 + ixgbe_mbx_operations_no_const ops;
35490 struct ixgbe_mbx_stats stats;
35491 u32 timeout;
35492 u32 usec_delay;
35493 diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
35494 index 25c951d..cc7cf33 100644
35495 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h
35496 +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
35497 @@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
35498 s32 (*clear_vfta)(struct ixgbe_hw *);
35499 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
35500 };
35501 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
35502
35503 enum ixgbe_mac_type {
35504 ixgbe_mac_unknown = 0,
35505 @@ -79,7 +80,7 @@ enum ixgbe_mac_type {
35506 };
35507
35508 struct ixgbe_mac_info {
35509 - struct ixgbe_mac_operations ops;
35510 + ixgbe_mac_operations_no_const ops;
35511 u8 addr[6];
35512 u8 perm_addr[6];
35513
35514 @@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
35515 s32 (*check_for_ack)(struct ixgbe_hw *);
35516 s32 (*check_for_rst)(struct ixgbe_hw *);
35517 };
35518 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
35519
35520 struct ixgbe_mbx_stats {
35521 u32 msgs_tx;
35522 @@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
35523 };
35524
35525 struct ixgbe_mbx_info {
35526 - struct ixgbe_mbx_operations ops;
35527 + ixgbe_mbx_operations_no_const ops;
35528 struct ixgbe_mbx_stats stats;
35529 u32 timeout;
35530 u32 udelay;
35531 diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
35532 index 8bf22b6..7f5baaa 100644
35533 --- a/drivers/net/ethernet/mellanox/mlx4/main.c
35534 +++ b/drivers/net/ethernet/mellanox/mlx4/main.c
35535 @@ -41,6 +41,7 @@
35536 #include <linux/slab.h>
35537 #include <linux/io-mapping.h>
35538 #include <linux/delay.h>
35539 +#include <linux/sched.h>
35540
35541 #include <linux/mlx4/device.h>
35542 #include <linux/mlx4/doorbell.h>
35543 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
35544 index 5046a64..71ca936 100644
35545 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
35546 +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
35547 @@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
35548 void (*link_down)(struct __vxge_hw_device *devh);
35549 void (*crit_err)(struct __vxge_hw_device *devh,
35550 enum vxge_hw_event type, u64 ext_data);
35551 -};
35552 +} __no_const;
35553
35554 /*
35555 * struct __vxge_hw_blockpool_entry - Block private data structure
35556 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35557 index 4a518a3..936b334 100644
35558 --- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35559 +++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35560 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
35561 struct vxge_hw_mempool_dma *dma_object,
35562 u32 index,
35563 u32 is_last);
35564 -};
35565 +} __no_const;
35566
35567 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
35568 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
35569 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
35570 index bbacb37..d60887d 100644
35571 --- a/drivers/net/ethernet/realtek/r8169.c
35572 +++ b/drivers/net/ethernet/realtek/r8169.c
35573 @@ -695,17 +695,17 @@ struct rtl8169_private {
35574 struct mdio_ops {
35575 void (*write)(void __iomem *, int, int);
35576 int (*read)(void __iomem *, int);
35577 - } mdio_ops;
35578 + } __no_const mdio_ops;
35579
35580 struct pll_power_ops {
35581 void (*down)(struct rtl8169_private *);
35582 void (*up)(struct rtl8169_private *);
35583 - } pll_power_ops;
35584 + } __no_const pll_power_ops;
35585
35586 struct jumbo_ops {
35587 void (*enable)(struct rtl8169_private *);
35588 void (*disable)(struct rtl8169_private *);
35589 - } jumbo_ops;
35590 + } __no_const jumbo_ops;
35591
35592 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
35593 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
35594 diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
35595 index 5b118cd..858b523 100644
35596 --- a/drivers/net/ethernet/sis/sis190.c
35597 +++ b/drivers/net/ethernet/sis/sis190.c
35598 @@ -1622,7 +1622,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
35599 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
35600 struct net_device *dev)
35601 {
35602 - static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
35603 + static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
35604 struct sis190_private *tp = netdev_priv(dev);
35605 struct pci_dev *isa_bridge;
35606 u8 reg, tmp8;
35607 diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
35608 index c07cfe9..81cbf7e 100644
35609 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
35610 +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
35611 @@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
35612
35613 writel(value, ioaddr + MMC_CNTRL);
35614
35615 - pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
35616 - MMC_CNTRL, value);
35617 +// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
35618 +// MMC_CNTRL, value);
35619 }
35620
35621 /* To mask all all interrupts.*/
35622 diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
35623 index dec5836..6d4db7d 100644
35624 --- a/drivers/net/hyperv/hyperv_net.h
35625 +++ b/drivers/net/hyperv/hyperv_net.h
35626 @@ -97,7 +97,7 @@ struct rndis_device {
35627
35628 enum rndis_device_state state;
35629 bool link_state;
35630 - atomic_t new_req_id;
35631 + atomic_unchecked_t new_req_id;
35632
35633 spinlock_t request_lock;
35634 struct list_head req_list;
35635 diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
35636 index 133b7fb..d58c559 100644
35637 --- a/drivers/net/hyperv/rndis_filter.c
35638 +++ b/drivers/net/hyperv/rndis_filter.c
35639 @@ -96,7 +96,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
35640 * template
35641 */
35642 set = &rndis_msg->msg.set_req;
35643 - set->req_id = atomic_inc_return(&dev->new_req_id);
35644 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35645
35646 /* Add to the request list */
35647 spin_lock_irqsave(&dev->request_lock, flags);
35648 @@ -627,7 +627,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
35649
35650 /* Setup the rndis set */
35651 halt = &request->request_msg.msg.halt_req;
35652 - halt->req_id = atomic_inc_return(&dev->new_req_id);
35653 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35654
35655 /* Ignore return since this msg is optional. */
35656 rndis_filter_send_request(dev, request);
35657 diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
35658 index 58dc117..f140c77 100644
35659 --- a/drivers/net/macvtap.c
35660 +++ b/drivers/net/macvtap.c
35661 @@ -526,6 +526,8 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
35662 }
35663 base = (unsigned long)from->iov_base + offset1;
35664 size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
35665 + if (i + size >= MAX_SKB_FRAGS)
35666 + return -EFAULT;
35667 num_pages = get_user_pages_fast(base, size, 0, &page[i]);
35668 if ((num_pages != size) ||
35669 (num_pages > MAX_SKB_FRAGS - skb_shinfo(skb)->nr_frags))
35670 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
35671 index 3ed983c..a1bb418 100644
35672 --- a/drivers/net/ppp/ppp_generic.c
35673 +++ b/drivers/net/ppp/ppp_generic.c
35674 @@ -986,7 +986,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
35675 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
35676 struct ppp_stats stats;
35677 struct ppp_comp_stats cstats;
35678 - char *vers;
35679
35680 switch (cmd) {
35681 case SIOCGPPPSTATS:
35682 @@ -1008,8 +1007,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
35683 break;
35684
35685 case SIOCGPPPVER:
35686 - vers = PPP_VERSION;
35687 - if (copy_to_user(addr, vers, strlen(vers) + 1))
35688 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
35689 break;
35690 err = 0;
35691 break;
35692 diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
35693 index 515f122..41dd273 100644
35694 --- a/drivers/net/tokenring/abyss.c
35695 +++ b/drivers/net/tokenring/abyss.c
35696 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
35697
35698 static int __init abyss_init (void)
35699 {
35700 - abyss_netdev_ops = tms380tr_netdev_ops;
35701 + pax_open_kernel();
35702 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35703
35704 - abyss_netdev_ops.ndo_open = abyss_open;
35705 - abyss_netdev_ops.ndo_stop = abyss_close;
35706 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
35707 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
35708 + pax_close_kernel();
35709
35710 return pci_register_driver(&abyss_driver);
35711 }
35712 diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
35713 index 6153cfd..cf69c1c 100644
35714 --- a/drivers/net/tokenring/madgemc.c
35715 +++ b/drivers/net/tokenring/madgemc.c
35716 @@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver = {
35717
35718 static int __init madgemc_init (void)
35719 {
35720 - madgemc_netdev_ops = tms380tr_netdev_ops;
35721 - madgemc_netdev_ops.ndo_open = madgemc_open;
35722 - madgemc_netdev_ops.ndo_stop = madgemc_close;
35723 + pax_open_kernel();
35724 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35725 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
35726 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
35727 + pax_close_kernel();
35728
35729 return mca_register_driver (&madgemc_driver);
35730 }
35731 diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
35732 index 8d362e6..f91cc52 100644
35733 --- a/drivers/net/tokenring/proteon.c
35734 +++ b/drivers/net/tokenring/proteon.c
35735 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
35736 struct platform_device *pdev;
35737 int i, num = 0, err = 0;
35738
35739 - proteon_netdev_ops = tms380tr_netdev_ops;
35740 - proteon_netdev_ops.ndo_open = proteon_open;
35741 - proteon_netdev_ops.ndo_stop = tms380tr_close;
35742 + pax_open_kernel();
35743 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35744 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
35745 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
35746 + pax_close_kernel();
35747
35748 err = platform_driver_register(&proteon_driver);
35749 if (err)
35750 diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
35751 index 46db5c5..37c1536 100644
35752 --- a/drivers/net/tokenring/skisa.c
35753 +++ b/drivers/net/tokenring/skisa.c
35754 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
35755 struct platform_device *pdev;
35756 int i, num = 0, err = 0;
35757
35758 - sk_isa_netdev_ops = tms380tr_netdev_ops;
35759 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
35760 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
35761 + pax_open_kernel();
35762 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35763 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
35764 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
35765 + pax_close_kernel();
35766
35767 err = platform_driver_register(&sk_isa_driver);
35768 if (err)
35769 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
35770 index e1324b4..e1b0041 100644
35771 --- a/drivers/net/usb/hso.c
35772 +++ b/drivers/net/usb/hso.c
35773 @@ -71,7 +71,7 @@
35774 #include <asm/byteorder.h>
35775 #include <linux/serial_core.h>
35776 #include <linux/serial.h>
35777 -
35778 +#include <asm/local.h>
35779
35780 #define MOD_AUTHOR "Option Wireless"
35781 #define MOD_DESCRIPTION "USB High Speed Option driver"
35782 @@ -257,7 +257,7 @@ struct hso_serial {
35783
35784 /* from usb_serial_port */
35785 struct tty_struct *tty;
35786 - int open_count;
35787 + local_t open_count;
35788 spinlock_t serial_lock;
35789
35790 int (*write_data) (struct hso_serial *serial);
35791 @@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
35792 struct urb *urb;
35793
35794 urb = serial->rx_urb[0];
35795 - if (serial->open_count > 0) {
35796 + if (local_read(&serial->open_count) > 0) {
35797 count = put_rxbuf_data(urb, serial);
35798 if (count == -1)
35799 return;
35800 @@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
35801 DUMP1(urb->transfer_buffer, urb->actual_length);
35802
35803 /* Anyone listening? */
35804 - if (serial->open_count == 0)
35805 + if (local_read(&serial->open_count) == 0)
35806 return;
35807
35808 if (status == 0) {
35809 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
35810 spin_unlock_irq(&serial->serial_lock);
35811
35812 /* check for port already opened, if not set the termios */
35813 - serial->open_count++;
35814 - if (serial->open_count == 1) {
35815 + if (local_inc_return(&serial->open_count) == 1) {
35816 serial->rx_state = RX_IDLE;
35817 /* Force default termio settings */
35818 _hso_serial_set_termios(tty, NULL);
35819 @@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
35820 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
35821 if (result) {
35822 hso_stop_serial_device(serial->parent);
35823 - serial->open_count--;
35824 + local_dec(&serial->open_count);
35825 kref_put(&serial->parent->ref, hso_serial_ref_free);
35826 }
35827 } else {
35828 @@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
35829
35830 /* reset the rts and dtr */
35831 /* do the actual close */
35832 - serial->open_count--;
35833 + local_dec(&serial->open_count);
35834
35835 - if (serial->open_count <= 0) {
35836 - serial->open_count = 0;
35837 + if (local_read(&serial->open_count) <= 0) {
35838 + local_set(&serial->open_count, 0);
35839 spin_lock_irq(&serial->serial_lock);
35840 if (serial->tty == tty) {
35841 serial->tty->driver_data = NULL;
35842 @@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
35843
35844 /* the actual setup */
35845 spin_lock_irqsave(&serial->serial_lock, flags);
35846 - if (serial->open_count)
35847 + if (local_read(&serial->open_count))
35848 _hso_serial_set_termios(tty, old);
35849 else
35850 tty->termios = old;
35851 @@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
35852 D1("Pending read interrupt on port %d\n", i);
35853 spin_lock(&serial->serial_lock);
35854 if (serial->rx_state == RX_IDLE &&
35855 - serial->open_count > 0) {
35856 + local_read(&serial->open_count) > 0) {
35857 /* Setup and send a ctrl req read on
35858 * port i */
35859 if (!serial->rx_urb_filled[0]) {
35860 @@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
35861 /* Start all serial ports */
35862 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
35863 if (serial_table[i] && (serial_table[i]->interface == iface)) {
35864 - if (dev2ser(serial_table[i])->open_count) {
35865 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
35866 result =
35867 hso_start_serial_device(serial_table[i], GFP_NOIO);
35868 hso_kick_transmit(dev2ser(serial_table[i]));
35869 diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
35870 index efc0111..79c8f5b 100644
35871 --- a/drivers/net/wireless/ath/ath.h
35872 +++ b/drivers/net/wireless/ath/ath.h
35873 @@ -119,6 +119,7 @@ struct ath_ops {
35874 void (*write_flush) (void *);
35875 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
35876 };
35877 +typedef struct ath_ops __no_const ath_ops_no_const;
35878
35879 struct ath_common;
35880 struct ath_bus_ops;
35881 diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
35882 index 8c5ce8b..abf101b 100644
35883 --- a/drivers/net/wireless/ath/ath5k/debug.c
35884 +++ b/drivers/net/wireless/ath/ath5k/debug.c
35885 @@ -343,6 +343,9 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
35886
35887 static ssize_t write_file_debug(struct file *file,
35888 const char __user *userbuf,
35889 + size_t count, loff_t *ppos) __size_overflow(3);
35890 +static ssize_t write_file_debug(struct file *file,
35891 + const char __user *userbuf,
35892 size_t count, loff_t *ppos)
35893 {
35894 struct ath5k_hw *ah = file->private_data;
35895 diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35896 index 7b6417b..ab5db98 100644
35897 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35898 +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35899 @@ -183,8 +183,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35900 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
35901 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
35902
35903 - ACCESS_ONCE(ads->ds_link) = i->link;
35904 - ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
35905 + ACCESS_ONCE_RW(ads->ds_link) = i->link;
35906 + ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
35907
35908 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
35909 ctl6 = SM(i->keytype, AR_EncrType);
35910 @@ -198,26 +198,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35911
35912 if ((i->is_first || i->is_last) &&
35913 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
35914 - ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
35915 + ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
35916 | set11nTries(i->rates, 1)
35917 | set11nTries(i->rates, 2)
35918 | set11nTries(i->rates, 3)
35919 | (i->dur_update ? AR_DurUpdateEna : 0)
35920 | SM(0, AR_BurstDur);
35921
35922 - ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
35923 + ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
35924 | set11nRate(i->rates, 1)
35925 | set11nRate(i->rates, 2)
35926 | set11nRate(i->rates, 3);
35927 } else {
35928 - ACCESS_ONCE(ads->ds_ctl2) = 0;
35929 - ACCESS_ONCE(ads->ds_ctl3) = 0;
35930 + ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
35931 + ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
35932 }
35933
35934 if (!i->is_first) {
35935 - ACCESS_ONCE(ads->ds_ctl0) = 0;
35936 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
35937 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
35938 + ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
35939 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
35940 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
35941 return;
35942 }
35943
35944 @@ -242,7 +242,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35945 break;
35946 }
35947
35948 - ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
35949 + ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
35950 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
35951 | SM(i->txpower, AR_XmitPower)
35952 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
35953 @@ -252,19 +252,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35954 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
35955 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
35956
35957 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
35958 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
35959 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
35960 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
35961
35962 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
35963 return;
35964
35965 - ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
35966 + ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
35967 | set11nPktDurRTSCTS(i->rates, 1);
35968
35969 - ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
35970 + ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
35971 | set11nPktDurRTSCTS(i->rates, 3);
35972
35973 - ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
35974 + ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
35975 | set11nRateFlags(i->rates, 1)
35976 | set11nRateFlags(i->rates, 2)
35977 | set11nRateFlags(i->rates, 3)
35978 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35979 index 09b8c9d..905339e 100644
35980 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35981 +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35982 @@ -35,47 +35,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35983 (i->qcu << AR_TxQcuNum_S) | 0x17;
35984
35985 checksum += val;
35986 - ACCESS_ONCE(ads->info) = val;
35987 + ACCESS_ONCE_RW(ads->info) = val;
35988
35989 checksum += i->link;
35990 - ACCESS_ONCE(ads->link) = i->link;
35991 + ACCESS_ONCE_RW(ads->link) = i->link;
35992
35993 checksum += i->buf_addr[0];
35994 - ACCESS_ONCE(ads->data0) = i->buf_addr[0];
35995 + ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
35996 checksum += i->buf_addr[1];
35997 - ACCESS_ONCE(ads->data1) = i->buf_addr[1];
35998 + ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
35999 checksum += i->buf_addr[2];
36000 - ACCESS_ONCE(ads->data2) = i->buf_addr[2];
36001 + ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
36002 checksum += i->buf_addr[3];
36003 - ACCESS_ONCE(ads->data3) = i->buf_addr[3];
36004 + ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
36005
36006 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
36007 - ACCESS_ONCE(ads->ctl3) = val;
36008 + ACCESS_ONCE_RW(ads->ctl3) = val;
36009 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
36010 - ACCESS_ONCE(ads->ctl5) = val;
36011 + ACCESS_ONCE_RW(ads->ctl5) = val;
36012 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
36013 - ACCESS_ONCE(ads->ctl7) = val;
36014 + ACCESS_ONCE_RW(ads->ctl7) = val;
36015 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
36016 - ACCESS_ONCE(ads->ctl9) = val;
36017 + ACCESS_ONCE_RW(ads->ctl9) = val;
36018
36019 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
36020 - ACCESS_ONCE(ads->ctl10) = checksum;
36021 + ACCESS_ONCE_RW(ads->ctl10) = checksum;
36022
36023 if (i->is_first || i->is_last) {
36024 - ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
36025 + ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
36026 | set11nTries(i->rates, 1)
36027 | set11nTries(i->rates, 2)
36028 | set11nTries(i->rates, 3)
36029 | (i->dur_update ? AR_DurUpdateEna : 0)
36030 | SM(0, AR_BurstDur);
36031
36032 - ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
36033 + ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
36034 | set11nRate(i->rates, 1)
36035 | set11nRate(i->rates, 2)
36036 | set11nRate(i->rates, 3);
36037 } else {
36038 - ACCESS_ONCE(ads->ctl13) = 0;
36039 - ACCESS_ONCE(ads->ctl14) = 0;
36040 + ACCESS_ONCE_RW(ads->ctl13) = 0;
36041 + ACCESS_ONCE_RW(ads->ctl14) = 0;
36042 }
36043
36044 ads->ctl20 = 0;
36045 @@ -84,17 +84,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36046
36047 ctl17 = SM(i->keytype, AR_EncrType);
36048 if (!i->is_first) {
36049 - ACCESS_ONCE(ads->ctl11) = 0;
36050 - ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
36051 - ACCESS_ONCE(ads->ctl15) = 0;
36052 - ACCESS_ONCE(ads->ctl16) = 0;
36053 - ACCESS_ONCE(ads->ctl17) = ctl17;
36054 - ACCESS_ONCE(ads->ctl18) = 0;
36055 - ACCESS_ONCE(ads->ctl19) = 0;
36056 + ACCESS_ONCE_RW(ads->ctl11) = 0;
36057 + ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
36058 + ACCESS_ONCE_RW(ads->ctl15) = 0;
36059 + ACCESS_ONCE_RW(ads->ctl16) = 0;
36060 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
36061 + ACCESS_ONCE_RW(ads->ctl18) = 0;
36062 + ACCESS_ONCE_RW(ads->ctl19) = 0;
36063 return;
36064 }
36065
36066 - ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
36067 + ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
36068 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
36069 | SM(i->txpower, AR_XmitPower)
36070 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
36071 @@ -130,22 +130,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
36072 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
36073 ctl12 |= SM(val, AR_PAPRDChainMask);
36074
36075 - ACCESS_ONCE(ads->ctl12) = ctl12;
36076 - ACCESS_ONCE(ads->ctl17) = ctl17;
36077 + ACCESS_ONCE_RW(ads->ctl12) = ctl12;
36078 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
36079
36080 - ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
36081 + ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
36082 | set11nPktDurRTSCTS(i->rates, 1);
36083
36084 - ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
36085 + ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
36086 | set11nPktDurRTSCTS(i->rates, 3);
36087
36088 - ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
36089 + ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
36090 | set11nRateFlags(i->rates, 1)
36091 | set11nRateFlags(i->rates, 2)
36092 | set11nRateFlags(i->rates, 3)
36093 | SM(i->rtscts_rate, AR_RTSCTSRate);
36094
36095 - ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
36096 + ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
36097 }
36098
36099 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
36100 diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
36101 index 68d972b..1d9205b 100644
36102 --- a/drivers/net/wireless/ath/ath9k/debug.c
36103 +++ b/drivers/net/wireless/ath/ath9k/debug.c
36104 @@ -60,6 +60,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
36105 }
36106
36107 static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
36108 + size_t count, loff_t *ppos) __size_overflow(3);
36109 +static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
36110 size_t count, loff_t *ppos)
36111 {
36112 struct ath_softc *sc = file->private_data;
36113 diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
36114 index d3ff33c..c98bcda 100644
36115 --- a/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
36116 +++ b/drivers/net/wireless/ath/ath9k/htc_drv_debug.c
36117 @@ -464,6 +464,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
36118 }
36119
36120 static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
36121 + size_t count, loff_t *ppos) __size_overflow(3);
36122 +static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
36123 size_t count, loff_t *ppos)
36124 {
36125 struct ath9k_htc_priv *priv = file->private_data;
36126 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
36127 index c8261d4..8d88929 100644
36128 --- a/drivers/net/wireless/ath/ath9k/hw.h
36129 +++ b/drivers/net/wireless/ath/ath9k/hw.h
36130 @@ -773,7 +773,7 @@ struct ath_hw_private_ops {
36131
36132 /* ANI */
36133 void (*ani_cache_ini_regs)(struct ath_hw *ah);
36134 -};
36135 +} __no_const;
36136
36137 /**
36138 * struct ath_hw_ops - callbacks used by hardware code and driver code
36139 @@ -803,7 +803,7 @@ struct ath_hw_ops {
36140 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
36141 struct ath_hw_antcomb_conf *antconf);
36142
36143 -};
36144 +} __no_const;
36145
36146 struct ath_nf_limits {
36147 s16 max;
36148 @@ -823,7 +823,7 @@ enum ath_cal_list {
36149 #define AH_FASTCC 0x4
36150
36151 struct ath_hw {
36152 - struct ath_ops reg_ops;
36153 + ath_ops_no_const reg_ops;
36154
36155 struct ieee80211_hw *hw;
36156 struct ath_common common;
36157 diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
36158 index af00e2c..ab04d34 100644
36159 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
36160 +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
36161 @@ -545,7 +545,7 @@ struct phy_func_ptr {
36162 void (*carrsuppr)(struct brcms_phy *);
36163 s32 (*rxsigpwr)(struct brcms_phy *, s32);
36164 void (*detach)(struct brcms_phy *);
36165 -};
36166 +} __no_const;
36167
36168 struct brcms_phy {
36169 struct brcms_phy_pub pubpi_ro;
36170 diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
36171 index a2ec369..36fdf14 100644
36172 --- a/drivers/net/wireless/iwlegacy/3945-mac.c
36173 +++ b/drivers/net/wireless/iwlegacy/3945-mac.c
36174 @@ -3646,7 +3646,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
36175 */
36176 if (il3945_mod_params.disable_hw_scan) {
36177 D_INFO("Disabling hw_scan\n");
36178 - il3945_hw_ops.hw_scan = NULL;
36179 + pax_open_kernel();
36180 + *(void **)&il3945_hw_ops.hw_scan = NULL;
36181 + pax_close_kernel();
36182 }
36183
36184 D_INFO("*** LOAD DRIVER ***\n");
36185 diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
36186 index f8fc239..8cade22 100644
36187 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h
36188 +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
36189 @@ -86,8 +86,8 @@ do { \
36190 } while (0)
36191
36192 #else
36193 -#define IWL_DEBUG(m, level, fmt, args...)
36194 -#define IWL_DEBUG_LIMIT(m, level, fmt, args...)
36195 +#define IWL_DEBUG(m, level, fmt, args...) do {} while (0)
36196 +#define IWL_DEBUG_LIMIT(m, level, fmt, args...) do {} while (0)
36197 #define iwl_print_hex_dump(m, level, p, len)
36198 #define IWL_DEBUG_QUIET_RFKILL(p, fmt, args...) \
36199 do { \
36200 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
36201 index 4b9e730..7603659 100644
36202 --- a/drivers/net/wireless/mac80211_hwsim.c
36203 +++ b/drivers/net/wireless/mac80211_hwsim.c
36204 @@ -1677,9 +1677,11 @@ static int __init init_mac80211_hwsim(void)
36205 return -EINVAL;
36206
36207 if (fake_hw_scan) {
36208 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
36209 - mac80211_hwsim_ops.sw_scan_start = NULL;
36210 - mac80211_hwsim_ops.sw_scan_complete = NULL;
36211 + pax_open_kernel();
36212 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
36213 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
36214 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
36215 + pax_close_kernel();
36216 }
36217
36218 spin_lock_init(&hwsim_radio_lock);
36219 diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
36220 index 3186aa4..b35b09f 100644
36221 --- a/drivers/net/wireless/mwifiex/main.h
36222 +++ b/drivers/net/wireless/mwifiex/main.h
36223 @@ -536,7 +536,7 @@ struct mwifiex_if_ops {
36224 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
36225 int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
36226 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
36227 -};
36228 +} __no_const;
36229
36230 struct mwifiex_adapter {
36231 u8 iface_type;
36232 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
36233 index a330c69..a81540f 100644
36234 --- a/drivers/net/wireless/rndis_wlan.c
36235 +++ b/drivers/net/wireless/rndis_wlan.c
36236 @@ -1278,7 +1278,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
36237
36238 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
36239
36240 - if (rts_threshold < 0 || rts_threshold > 2347)
36241 + if (rts_threshold > 2347)
36242 rts_threshold = 2347;
36243
36244 tmp = cpu_to_le32(rts_threshold);
36245 diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
36246 index a77f1bb..c608b2b 100644
36247 --- a/drivers/net/wireless/wl1251/wl1251.h
36248 +++ b/drivers/net/wireless/wl1251/wl1251.h
36249 @@ -266,7 +266,7 @@ struct wl1251_if_operations {
36250 void (*reset)(struct wl1251 *wl);
36251 void (*enable_irq)(struct wl1251 *wl);
36252 void (*disable_irq)(struct wl1251 *wl);
36253 -};
36254 +} __no_const;
36255
36256 struct wl1251 {
36257 struct ieee80211_hw *hw;
36258 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
36259 index f34b5b2..b5abb9f 100644
36260 --- a/drivers/oprofile/buffer_sync.c
36261 +++ b/drivers/oprofile/buffer_sync.c
36262 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
36263 if (cookie == NO_COOKIE)
36264 offset = pc;
36265 if (cookie == INVALID_COOKIE) {
36266 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
36267 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
36268 offset = pc;
36269 }
36270 if (cookie != last_cookie) {
36271 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
36272 /* add userspace sample */
36273
36274 if (!mm) {
36275 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
36276 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
36277 return 0;
36278 }
36279
36280 cookie = lookup_dcookie(mm, s->eip, &offset);
36281
36282 if (cookie == INVALID_COOKIE) {
36283 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
36284 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
36285 return 0;
36286 }
36287
36288 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
36289 /* ignore backtraces if failed to add a sample */
36290 if (state == sb_bt_start) {
36291 state = sb_bt_ignore;
36292 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
36293 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
36294 }
36295 }
36296 release_mm(mm);
36297 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
36298 index c0cc4e7..44d4e54 100644
36299 --- a/drivers/oprofile/event_buffer.c
36300 +++ b/drivers/oprofile/event_buffer.c
36301 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
36302 }
36303
36304 if (buffer_pos == buffer_size) {
36305 - atomic_inc(&oprofile_stats.event_lost_overflow);
36306 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
36307 return;
36308 }
36309
36310 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
36311 index ed2c3ec..deda85a 100644
36312 --- a/drivers/oprofile/oprof.c
36313 +++ b/drivers/oprofile/oprof.c
36314 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
36315 if (oprofile_ops.switch_events())
36316 return;
36317
36318 - atomic_inc(&oprofile_stats.multiplex_counter);
36319 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
36320 start_switch_worker();
36321 }
36322
36323 diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
36324 index 84a208d..f07d177 100644
36325 --- a/drivers/oprofile/oprofile_files.c
36326 +++ b/drivers/oprofile/oprofile_files.c
36327 @@ -36,6 +36,8 @@ static ssize_t timeout_read(struct file *file, char __user *buf,
36328
36329
36330 static ssize_t timeout_write(struct file *file, char const __user *buf,
36331 + size_t count, loff_t *offset) __size_overflow(3);
36332 +static ssize_t timeout_write(struct file *file, char const __user *buf,
36333 size_t count, loff_t *offset)
36334 {
36335 unsigned long val;
36336 @@ -72,6 +74,7 @@ static ssize_t depth_read(struct file *file, char __user *buf, size_t count, lof
36337 }
36338
36339
36340 +static ssize_t depth_write(struct file *file, char const __user *buf, size_t count, loff_t *offset) __size_overflow(3);
36341 static ssize_t depth_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
36342 {
36343 unsigned long val;
36344 @@ -126,12 +129,14 @@ static const struct file_operations cpu_type_fops = {
36345 };
36346
36347
36348 +static ssize_t enable_read(struct file *file, char __user *buf, size_t count, loff_t *offset) __size_overflow(3);
36349 static ssize_t enable_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
36350 {
36351 return oprofilefs_ulong_to_user(oprofile_started, buf, count, offset);
36352 }
36353
36354
36355 +static ssize_t enable_write(struct file *file, char const __user *buf, size_t count, loff_t *offset) __size_overflow(3);
36356 static ssize_t enable_write(struct file *file, char const __user *buf, size_t count, loff_t *offset)
36357 {
36358 unsigned long val;
36359 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
36360 index 917d28e..d62d981 100644
36361 --- a/drivers/oprofile/oprofile_stats.c
36362 +++ b/drivers/oprofile/oprofile_stats.c
36363 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
36364 cpu_buf->sample_invalid_eip = 0;
36365 }
36366
36367 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
36368 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
36369 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
36370 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
36371 - atomic_set(&oprofile_stats.multiplex_counter, 0);
36372 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
36373 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
36374 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
36375 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
36376 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
36377 }
36378
36379
36380 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
36381 index 38b6fc0..b5cbfce 100644
36382 --- a/drivers/oprofile/oprofile_stats.h
36383 +++ b/drivers/oprofile/oprofile_stats.h
36384 @@ -13,11 +13,11 @@
36385 #include <linux/atomic.h>
36386
36387 struct oprofile_stat_struct {
36388 - atomic_t sample_lost_no_mm;
36389 - atomic_t sample_lost_no_mapping;
36390 - atomic_t bt_lost_no_mapping;
36391 - atomic_t event_lost_overflow;
36392 - atomic_t multiplex_counter;
36393 + atomic_unchecked_t sample_lost_no_mm;
36394 + atomic_unchecked_t sample_lost_no_mapping;
36395 + atomic_unchecked_t bt_lost_no_mapping;
36396 + atomic_unchecked_t event_lost_overflow;
36397 + atomic_unchecked_t multiplex_counter;
36398 };
36399
36400 extern struct oprofile_stat_struct oprofile_stats;
36401 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
36402 index 2f0aa0f..d5246c3 100644
36403 --- a/drivers/oprofile/oprofilefs.c
36404 +++ b/drivers/oprofile/oprofilefs.c
36405 @@ -97,6 +97,7 @@ static ssize_t ulong_read_file(struct file *file, char __user *buf, size_t count
36406 }
36407
36408
36409 +static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset) __size_overflow(3);
36410 static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset)
36411 {
36412 unsigned long value;
36413 @@ -193,7 +194,7 @@ static const struct file_operations atomic_ro_fops = {
36414
36415
36416 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
36417 - char const *name, atomic_t *val)
36418 + char const *name, atomic_unchecked_t *val)
36419 {
36420 return __oprofilefs_create_file(sb, root, name,
36421 &atomic_ro_fops, 0444, val);
36422 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
36423 index 3f56bc0..707d642 100644
36424 --- a/drivers/parport/procfs.c
36425 +++ b/drivers/parport/procfs.c
36426 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
36427
36428 *ppos += len;
36429
36430 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
36431 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
36432 }
36433
36434 #ifdef CONFIG_PARPORT_1284
36435 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
36436
36437 *ppos += len;
36438
36439 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
36440 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
36441 }
36442 #endif /* IEEE1284.3 support. */
36443
36444 diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
36445 index 9fff878..ad0ad53 100644
36446 --- a/drivers/pci/hotplug/cpci_hotplug.h
36447 +++ b/drivers/pci/hotplug/cpci_hotplug.h
36448 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
36449 int (*hardware_test) (struct slot* slot, u32 value);
36450 u8 (*get_power) (struct slot* slot);
36451 int (*set_power) (struct slot* slot, int value);
36452 -};
36453 +} __no_const;
36454
36455 struct cpci_hp_controller {
36456 unsigned int irq;
36457 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
36458 index 76ba8a1..20ca857 100644
36459 --- a/drivers/pci/hotplug/cpqphp_nvram.c
36460 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
36461 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
36462
36463 void compaq_nvram_init (void __iomem *rom_start)
36464 {
36465 +
36466 +#ifndef CONFIG_PAX_KERNEXEC
36467 if (rom_start) {
36468 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
36469 }
36470 +#endif
36471 +
36472 dbg("int15 entry = %p\n", compaq_int15_entry_point);
36473
36474 /* initialize our int15 lock */
36475 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
36476 index 2275162..95f1a92 100644
36477 --- a/drivers/pci/pcie/aspm.c
36478 +++ b/drivers/pci/pcie/aspm.c
36479 @@ -27,9 +27,9 @@
36480 #define MODULE_PARAM_PREFIX "pcie_aspm."
36481
36482 /* Note: those are not register definitions */
36483 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
36484 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
36485 -#define ASPM_STATE_L1 (4) /* L1 state */
36486 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
36487 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
36488 +#define ASPM_STATE_L1 (4U) /* L1 state */
36489 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
36490 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
36491
36492 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
36493 index 71eac9c..2de27ef 100644
36494 --- a/drivers/pci/probe.c
36495 +++ b/drivers/pci/probe.c
36496 @@ -136,7 +136,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
36497 u32 l, sz, mask;
36498 u16 orig_cmd;
36499
36500 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
36501 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
36502
36503 if (!dev->mmio_always_on) {
36504 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
36505 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
36506 index 27911b5..5b6db88 100644
36507 --- a/drivers/pci/proc.c
36508 +++ b/drivers/pci/proc.c
36509 @@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
36510 static int __init pci_proc_init(void)
36511 {
36512 struct pci_dev *dev = NULL;
36513 +
36514 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
36515 +#ifdef CONFIG_GRKERNSEC_PROC_USER
36516 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
36517 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
36518 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
36519 +#endif
36520 +#else
36521 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
36522 +#endif
36523 proc_create("devices", 0, proc_bus_pci_dir,
36524 &proc_bus_pci_dev_operations);
36525 proc_initialized = 1;
36526 diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
36527 index 6f966d6..68e18ed 100644
36528 --- a/drivers/platform/x86/asus_acpi.c
36529 +++ b/drivers/platform/x86/asus_acpi.c
36530 @@ -887,6 +887,8 @@ static int lcd_proc_open(struct inode *inode, struct file *file)
36531 }
36532
36533 static ssize_t lcd_proc_write(struct file *file, const char __user *buffer,
36534 + size_t count, loff_t *pos) __size_overflow(3);
36535 +static ssize_t lcd_proc_write(struct file *file, const char __user *buffer,
36536 size_t count, loff_t *pos)
36537 {
36538 int rv, value;
36539 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
36540 index ea0c607..58c4628 100644
36541 --- a/drivers/platform/x86/thinkpad_acpi.c
36542 +++ b/drivers/platform/x86/thinkpad_acpi.c
36543 @@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
36544 return 0;
36545 }
36546
36547 -void static hotkey_mask_warn_incomplete_mask(void)
36548 +static void hotkey_mask_warn_incomplete_mask(void)
36549 {
36550 /* log only what the user can fix... */
36551 const u32 wantedmask = hotkey_driver_mask &
36552 @@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
36553 }
36554 }
36555
36556 -static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36557 - struct tp_nvram_state *newn,
36558 - const u32 event_mask)
36559 -{
36560 -
36561 #define TPACPI_COMPARE_KEY(__scancode, __member) \
36562 do { \
36563 if ((event_mask & (1 << __scancode)) && \
36564 @@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36565 tpacpi_hotkey_send_key(__scancode); \
36566 } while (0)
36567
36568 - void issue_volchange(const unsigned int oldvol,
36569 - const unsigned int newvol)
36570 - {
36571 - unsigned int i = oldvol;
36572 +static void issue_volchange(const unsigned int oldvol,
36573 + const unsigned int newvol,
36574 + const u32 event_mask)
36575 +{
36576 + unsigned int i = oldvol;
36577
36578 - while (i > newvol) {
36579 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
36580 - i--;
36581 - }
36582 - while (i < newvol) {
36583 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
36584 - i++;
36585 - }
36586 + while (i > newvol) {
36587 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
36588 + i--;
36589 }
36590 + while (i < newvol) {
36591 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
36592 + i++;
36593 + }
36594 +}
36595
36596 - void issue_brightnesschange(const unsigned int oldbrt,
36597 - const unsigned int newbrt)
36598 - {
36599 - unsigned int i = oldbrt;
36600 +static void issue_brightnesschange(const unsigned int oldbrt,
36601 + const unsigned int newbrt,
36602 + const u32 event_mask)
36603 +{
36604 + unsigned int i = oldbrt;
36605
36606 - while (i > newbrt) {
36607 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
36608 - i--;
36609 - }
36610 - while (i < newbrt) {
36611 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
36612 - i++;
36613 - }
36614 + while (i > newbrt) {
36615 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
36616 + i--;
36617 + }
36618 + while (i < newbrt) {
36619 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
36620 + i++;
36621 }
36622 +}
36623
36624 +static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36625 + struct tp_nvram_state *newn,
36626 + const u32 event_mask)
36627 +{
36628 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
36629 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
36630 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
36631 @@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36632 oldn->volume_level != newn->volume_level) {
36633 /* recently muted, or repeated mute keypress, or
36634 * multiple presses ending in mute */
36635 - issue_volchange(oldn->volume_level, newn->volume_level);
36636 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
36637 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
36638 }
36639 } else {
36640 @@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36641 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
36642 }
36643 if (oldn->volume_level != newn->volume_level) {
36644 - issue_volchange(oldn->volume_level, newn->volume_level);
36645 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
36646 } else if (oldn->volume_toggle != newn->volume_toggle) {
36647 /* repeated vol up/down keypress at end of scale ? */
36648 if (newn->volume_level == 0)
36649 @@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36650 /* handle brightness */
36651 if (oldn->brightness_level != newn->brightness_level) {
36652 issue_brightnesschange(oldn->brightness_level,
36653 - newn->brightness_level);
36654 + newn->brightness_level,
36655 + event_mask);
36656 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
36657 /* repeated key presses that didn't change state */
36658 if (newn->brightness_level == 0)
36659 @@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36660 && !tp_features.bright_unkfw)
36661 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
36662 }
36663 +}
36664
36665 #undef TPACPI_COMPARE_KEY
36666 #undef TPACPI_MAY_SEND_KEY
36667 -}
36668
36669 /*
36670 * Polling driver
36671 diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
36672 index dcdc1f4..85cee16 100644
36673 --- a/drivers/platform/x86/toshiba_acpi.c
36674 +++ b/drivers/platform/x86/toshiba_acpi.c
36675 @@ -517,6 +517,8 @@ static int set_lcd_status(struct backlight_device *bd)
36676 }
36677
36678 static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
36679 + size_t count, loff_t *pos) __size_overflow(3);
36680 +static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
36681 size_t count, loff_t *pos)
36682 {
36683 struct toshiba_acpi_dev *dev = PDE(file->f_path.dentry->d_inode)->data;
36684 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
36685 index b859d16..5cc6b1a 100644
36686 --- a/drivers/pnp/pnpbios/bioscalls.c
36687 +++ b/drivers/pnp/pnpbios/bioscalls.c
36688 @@ -59,7 +59,7 @@ do { \
36689 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
36690 } while(0)
36691
36692 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
36693 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
36694 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
36695
36696 /*
36697 @@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
36698
36699 cpu = get_cpu();
36700 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
36701 +
36702 + pax_open_kernel();
36703 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
36704 + pax_close_kernel();
36705
36706 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
36707 spin_lock_irqsave(&pnp_bios_lock, flags);
36708 @@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
36709 :"memory");
36710 spin_unlock_irqrestore(&pnp_bios_lock, flags);
36711
36712 + pax_open_kernel();
36713 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
36714 + pax_close_kernel();
36715 +
36716 put_cpu();
36717
36718 /* If we get here and this is set then the PnP BIOS faulted on us. */
36719 @@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
36720 return status;
36721 }
36722
36723 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
36724 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
36725 {
36726 int i;
36727
36728 @@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
36729 pnp_bios_callpoint.offset = header->fields.pm16offset;
36730 pnp_bios_callpoint.segment = PNP_CS16;
36731
36732 + pax_open_kernel();
36733 +
36734 for_each_possible_cpu(i) {
36735 struct desc_struct *gdt = get_cpu_gdt_table(i);
36736 if (!gdt)
36737 @@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
36738 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
36739 (unsigned long)__va(header->fields.pm16dseg));
36740 }
36741 +
36742 + pax_close_kernel();
36743 }
36744 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
36745 index b0ecacb..7c9da2e 100644
36746 --- a/drivers/pnp/resource.c
36747 +++ b/drivers/pnp/resource.c
36748 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
36749 return 1;
36750
36751 /* check if the resource is valid */
36752 - if (*irq < 0 || *irq > 15)
36753 + if (*irq > 15)
36754 return 0;
36755
36756 /* check if the resource is reserved */
36757 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
36758 return 1;
36759
36760 /* check if the resource is valid */
36761 - if (*dma < 0 || *dma == 4 || *dma > 7)
36762 + if (*dma == 4 || *dma > 7)
36763 return 0;
36764
36765 /* check if the resource is reserved */
36766 diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
36767 index 1ed6ea0..77c0bd2 100644
36768 --- a/drivers/power/bq27x00_battery.c
36769 +++ b/drivers/power/bq27x00_battery.c
36770 @@ -72,7 +72,7 @@
36771 struct bq27x00_device_info;
36772 struct bq27x00_access_methods {
36773 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
36774 -};
36775 +} __no_const;
36776
36777 enum bq27x00_chip { BQ27000, BQ27500 };
36778
36779 diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
36780 index a838e66..a9e1665 100644
36781 --- a/drivers/regulator/max8660.c
36782 +++ b/drivers/regulator/max8660.c
36783 @@ -383,8 +383,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
36784 max8660->shadow_regs[MAX8660_OVER1] = 5;
36785 } else {
36786 /* Otherwise devices can be toggled via software */
36787 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
36788 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
36789 + pax_open_kernel();
36790 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
36791 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
36792 + pax_close_kernel();
36793 }
36794
36795 /*
36796 diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
36797 index e8cfc99..072aee2 100644
36798 --- a/drivers/regulator/mc13892-regulator.c
36799 +++ b/drivers/regulator/mc13892-regulator.c
36800 @@ -574,10 +574,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
36801 }
36802 mc13xxx_unlock(mc13892);
36803
36804 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
36805 + pax_open_kernel();
36806 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
36807 = mc13892_vcam_set_mode;
36808 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
36809 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
36810 = mc13892_vcam_get_mode;
36811 + pax_close_kernel();
36812
36813 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
36814 ARRAY_SIZE(mc13892_regulators));
36815 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
36816 index cace6d3..f623fda 100644
36817 --- a/drivers/rtc/rtc-dev.c
36818 +++ b/drivers/rtc/rtc-dev.c
36819 @@ -14,6 +14,7 @@
36820 #include <linux/module.h>
36821 #include <linux/rtc.h>
36822 #include <linux/sched.h>
36823 +#include <linux/grsecurity.h>
36824 #include "rtc-core.h"
36825
36826 static dev_t rtc_devt;
36827 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
36828 if (copy_from_user(&tm, uarg, sizeof(tm)))
36829 return -EFAULT;
36830
36831 + gr_log_timechange();
36832 +
36833 return rtc_set_time(rtc, &tm);
36834
36835 case RTC_PIE_ON:
36836 diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
36837 index ffb5878..e6d785c 100644
36838 --- a/drivers/scsi/aacraid/aacraid.h
36839 +++ b/drivers/scsi/aacraid/aacraid.h
36840 @@ -492,7 +492,7 @@ struct adapter_ops
36841 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
36842 /* Administrative operations */
36843 int (*adapter_comm)(struct aac_dev * dev, int comm);
36844 -};
36845 +} __no_const;
36846
36847 /*
36848 * Define which interrupt handler needs to be installed
36849 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
36850 index 705e13e..91c873c 100644
36851 --- a/drivers/scsi/aacraid/linit.c
36852 +++ b/drivers/scsi/aacraid/linit.c
36853 @@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
36854 #elif defined(__devinitconst)
36855 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
36856 #else
36857 -static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
36858 +static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
36859 #endif
36860 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
36861 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
36862 diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
36863 index d5ff142..49c0ebb 100644
36864 --- a/drivers/scsi/aic94xx/aic94xx_init.c
36865 +++ b/drivers/scsi/aic94xx/aic94xx_init.c
36866 @@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
36867 .lldd_control_phy = asd_control_phy,
36868 };
36869
36870 -static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
36871 +static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
36872 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
36873 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
36874 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
36875 diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
36876 index a796de9..1ef20e1 100644
36877 --- a/drivers/scsi/bfa/bfa.h
36878 +++ b/drivers/scsi/bfa/bfa.h
36879 @@ -196,7 +196,7 @@ struct bfa_hwif_s {
36880 u32 *end);
36881 int cpe_vec_q0;
36882 int rme_vec_q0;
36883 -};
36884 +} __no_const;
36885 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
36886
36887 struct bfa_faa_cbfn_s {
36888 diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
36889 index f0f80e2..8ec946b 100644
36890 --- a/drivers/scsi/bfa/bfa_fcpim.c
36891 +++ b/drivers/scsi/bfa/bfa_fcpim.c
36892 @@ -3715,7 +3715,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
36893
36894 bfa_iotag_attach(fcp);
36895
36896 - fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
36897 + fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
36898 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
36899 (fcp->num_itns * sizeof(struct bfa_itn_s));
36900 memset(fcp->itn_arr, 0,
36901 @@ -3773,7 +3773,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
36902 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
36903 {
36904 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
36905 - struct bfa_itn_s *itn;
36906 + bfa_itn_s_no_const *itn;
36907
36908 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
36909 itn->isr = isr;
36910 diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
36911 index 36f26da..38a34a8 100644
36912 --- a/drivers/scsi/bfa/bfa_fcpim.h
36913 +++ b/drivers/scsi/bfa/bfa_fcpim.h
36914 @@ -37,6 +37,7 @@ struct bfa_iotag_s {
36915 struct bfa_itn_s {
36916 bfa_isr_func_t isr;
36917 };
36918 +typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
36919
36920 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
36921 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
36922 @@ -147,7 +148,7 @@ struct bfa_fcp_mod_s {
36923 struct list_head iotag_tio_free_q; /* free IO resources */
36924 struct list_head iotag_unused_q; /* unused IO resources*/
36925 struct bfa_iotag_s *iotag_arr;
36926 - struct bfa_itn_s *itn_arr;
36927 + bfa_itn_s_no_const *itn_arr;
36928 int num_ioim_reqs;
36929 int num_fwtio_reqs;
36930 int num_itns;
36931 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
36932 index 546d46b..642fa5b 100644
36933 --- a/drivers/scsi/bfa/bfa_ioc.h
36934 +++ b/drivers/scsi/bfa/bfa_ioc.h
36935 @@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
36936 bfa_ioc_disable_cbfn_t disable_cbfn;
36937 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
36938 bfa_ioc_reset_cbfn_t reset_cbfn;
36939 -};
36940 +} __no_const;
36941
36942 /*
36943 * IOC event notification mechanism.
36944 @@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
36945 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
36946 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
36947 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
36948 -};
36949 +} __no_const;
36950
36951 /*
36952 * Queue element to wait for room in request queue. FIFO order is
36953 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
36954 index 351dc0b..951dc32 100644
36955 --- a/drivers/scsi/hosts.c
36956 +++ b/drivers/scsi/hosts.c
36957 @@ -42,7 +42,7 @@
36958 #include "scsi_logging.h"
36959
36960
36961 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
36962 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
36963
36964
36965 static void scsi_host_cls_release(struct device *dev)
36966 @@ -357,7 +357,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
36967 * subtract one because we increment first then return, but we need to
36968 * know what the next host number was before increment
36969 */
36970 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
36971 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
36972 shost->dma_channel = 0xff;
36973
36974 /* These three are default values which can be overridden */
36975 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
36976 index b96962c..0c82ec2 100644
36977 --- a/drivers/scsi/hpsa.c
36978 +++ b/drivers/scsi/hpsa.c
36979 @@ -507,7 +507,7 @@ static inline u32 next_command(struct ctlr_info *h)
36980 u32 a;
36981
36982 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
36983 - return h->access.command_completed(h);
36984 + return h->access->command_completed(h);
36985
36986 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
36987 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
36988 @@ -2991,7 +2991,7 @@ static void start_io(struct ctlr_info *h)
36989 while (!list_empty(&h->reqQ)) {
36990 c = list_entry(h->reqQ.next, struct CommandList, list);
36991 /* can't do anything if fifo is full */
36992 - if ((h->access.fifo_full(h))) {
36993 + if ((h->access->fifo_full(h))) {
36994 dev_warn(&h->pdev->dev, "fifo full\n");
36995 break;
36996 }
36997 @@ -3001,7 +3001,7 @@ static void start_io(struct ctlr_info *h)
36998 h->Qdepth--;
36999
37000 /* Tell the controller execute command */
37001 - h->access.submit_command(h, c);
37002 + h->access->submit_command(h, c);
37003
37004 /* Put job onto the completed Q */
37005 addQ(&h->cmpQ, c);
37006 @@ -3010,17 +3010,17 @@ static void start_io(struct ctlr_info *h)
37007
37008 static inline unsigned long get_next_completion(struct ctlr_info *h)
37009 {
37010 - return h->access.command_completed(h);
37011 + return h->access->command_completed(h);
37012 }
37013
37014 static inline bool interrupt_pending(struct ctlr_info *h)
37015 {
37016 - return h->access.intr_pending(h);
37017 + return h->access->intr_pending(h);
37018 }
37019
37020 static inline long interrupt_not_for_us(struct ctlr_info *h)
37021 {
37022 - return (h->access.intr_pending(h) == 0) ||
37023 + return (h->access->intr_pending(h) == 0) ||
37024 (h->interrupts_enabled == 0);
37025 }
37026
37027 @@ -3919,7 +3919,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
37028 if (prod_index < 0)
37029 return -ENODEV;
37030 h->product_name = products[prod_index].product_name;
37031 - h->access = *(products[prod_index].access);
37032 + h->access = products[prod_index].access;
37033
37034 if (hpsa_board_disabled(h->pdev)) {
37035 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
37036 @@ -4164,7 +4164,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
37037
37038 assert_spin_locked(&lockup_detector_lock);
37039 remove_ctlr_from_lockup_detector_list(h);
37040 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
37041 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
37042 spin_lock_irqsave(&h->lock, flags);
37043 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
37044 spin_unlock_irqrestore(&h->lock, flags);
37045 @@ -4344,7 +4344,7 @@ reinit_after_soft_reset:
37046 }
37047
37048 /* make sure the board interrupts are off */
37049 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
37050 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
37051
37052 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
37053 goto clean2;
37054 @@ -4378,7 +4378,7 @@ reinit_after_soft_reset:
37055 * fake ones to scoop up any residual completions.
37056 */
37057 spin_lock_irqsave(&h->lock, flags);
37058 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
37059 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
37060 spin_unlock_irqrestore(&h->lock, flags);
37061 free_irq(h->intr[h->intr_mode], h);
37062 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
37063 @@ -4397,9 +4397,9 @@ reinit_after_soft_reset:
37064 dev_info(&h->pdev->dev, "Board READY.\n");
37065 dev_info(&h->pdev->dev,
37066 "Waiting for stale completions to drain.\n");
37067 - h->access.set_intr_mask(h, HPSA_INTR_ON);
37068 + h->access->set_intr_mask(h, HPSA_INTR_ON);
37069 msleep(10000);
37070 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
37071 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
37072
37073 rc = controller_reset_failed(h->cfgtable);
37074 if (rc)
37075 @@ -4420,7 +4420,7 @@ reinit_after_soft_reset:
37076 }
37077
37078 /* Turn the interrupts on so we can service requests */
37079 - h->access.set_intr_mask(h, HPSA_INTR_ON);
37080 + h->access->set_intr_mask(h, HPSA_INTR_ON);
37081
37082 hpsa_hba_inquiry(h);
37083 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
37084 @@ -4472,7 +4472,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
37085 * To write all data in the battery backed cache to disks
37086 */
37087 hpsa_flush_cache(h);
37088 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
37089 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
37090 free_irq(h->intr[h->intr_mode], h);
37091 #ifdef CONFIG_PCI_MSI
37092 if (h->msix_vector)
37093 @@ -4636,7 +4636,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
37094 return;
37095 }
37096 /* Change the access methods to the performant access methods */
37097 - h->access = SA5_performant_access;
37098 + h->access = &SA5_performant_access;
37099 h->transMethod = CFGTBL_Trans_Performant;
37100 }
37101
37102 diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
37103 index 91edafb..a9b88ec 100644
37104 --- a/drivers/scsi/hpsa.h
37105 +++ b/drivers/scsi/hpsa.h
37106 @@ -73,7 +73,7 @@ struct ctlr_info {
37107 unsigned int msix_vector;
37108 unsigned int msi_vector;
37109 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
37110 - struct access_method access;
37111 + struct access_method *access;
37112
37113 /* queue and queue Info */
37114 struct list_head reqQ;
37115 diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
37116 index f2df059..a3a9930 100644
37117 --- a/drivers/scsi/ips.h
37118 +++ b/drivers/scsi/ips.h
37119 @@ -1027,7 +1027,7 @@ typedef struct {
37120 int (*intr)(struct ips_ha *);
37121 void (*enableint)(struct ips_ha *);
37122 uint32_t (*statupd)(struct ips_ha *);
37123 -} ips_hw_func_t;
37124 +} __no_const ips_hw_func_t;
37125
37126 typedef struct ips_ha {
37127 uint8_t ha_id[IPS_MAX_CHANNELS+1];
37128 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
37129 index 4d70d96..84d0573 100644
37130 --- a/drivers/scsi/libfc/fc_exch.c
37131 +++ b/drivers/scsi/libfc/fc_exch.c
37132 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
37133 * all together if not used XXX
37134 */
37135 struct {
37136 - atomic_t no_free_exch;
37137 - atomic_t no_free_exch_xid;
37138 - atomic_t xid_not_found;
37139 - atomic_t xid_busy;
37140 - atomic_t seq_not_found;
37141 - atomic_t non_bls_resp;
37142 + atomic_unchecked_t no_free_exch;
37143 + atomic_unchecked_t no_free_exch_xid;
37144 + atomic_unchecked_t xid_not_found;
37145 + atomic_unchecked_t xid_busy;
37146 + atomic_unchecked_t seq_not_found;
37147 + atomic_unchecked_t non_bls_resp;
37148 } stats;
37149 };
37150
37151 @@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
37152 /* allocate memory for exchange */
37153 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
37154 if (!ep) {
37155 - atomic_inc(&mp->stats.no_free_exch);
37156 + atomic_inc_unchecked(&mp->stats.no_free_exch);
37157 goto out;
37158 }
37159 memset(ep, 0, sizeof(*ep));
37160 @@ -780,7 +780,7 @@ out:
37161 return ep;
37162 err:
37163 spin_unlock_bh(&pool->lock);
37164 - atomic_inc(&mp->stats.no_free_exch_xid);
37165 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
37166 mempool_free(ep, mp->ep_pool);
37167 return NULL;
37168 }
37169 @@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
37170 xid = ntohs(fh->fh_ox_id); /* we originated exch */
37171 ep = fc_exch_find(mp, xid);
37172 if (!ep) {
37173 - atomic_inc(&mp->stats.xid_not_found);
37174 + atomic_inc_unchecked(&mp->stats.xid_not_found);
37175 reject = FC_RJT_OX_ID;
37176 goto out;
37177 }
37178 @@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
37179 ep = fc_exch_find(mp, xid);
37180 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
37181 if (ep) {
37182 - atomic_inc(&mp->stats.xid_busy);
37183 + atomic_inc_unchecked(&mp->stats.xid_busy);
37184 reject = FC_RJT_RX_ID;
37185 goto rel;
37186 }
37187 @@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
37188 }
37189 xid = ep->xid; /* get our XID */
37190 } else if (!ep) {
37191 - atomic_inc(&mp->stats.xid_not_found);
37192 + atomic_inc_unchecked(&mp->stats.xid_not_found);
37193 reject = FC_RJT_RX_ID; /* XID not found */
37194 goto out;
37195 }
37196 @@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
37197 } else {
37198 sp = &ep->seq;
37199 if (sp->id != fh->fh_seq_id) {
37200 - atomic_inc(&mp->stats.seq_not_found);
37201 + atomic_inc_unchecked(&mp->stats.seq_not_found);
37202 if (f_ctl & FC_FC_END_SEQ) {
37203 /*
37204 * Update sequence_id based on incoming last
37205 @@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
37206
37207 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
37208 if (!ep) {
37209 - atomic_inc(&mp->stats.xid_not_found);
37210 + atomic_inc_unchecked(&mp->stats.xid_not_found);
37211 goto out;
37212 }
37213 if (ep->esb_stat & ESB_ST_COMPLETE) {
37214 - atomic_inc(&mp->stats.xid_not_found);
37215 + atomic_inc_unchecked(&mp->stats.xid_not_found);
37216 goto rel;
37217 }
37218 if (ep->rxid == FC_XID_UNKNOWN)
37219 ep->rxid = ntohs(fh->fh_rx_id);
37220 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
37221 - atomic_inc(&mp->stats.xid_not_found);
37222 + atomic_inc_unchecked(&mp->stats.xid_not_found);
37223 goto rel;
37224 }
37225 if (ep->did != ntoh24(fh->fh_s_id) &&
37226 ep->did != FC_FID_FLOGI) {
37227 - atomic_inc(&mp->stats.xid_not_found);
37228 + atomic_inc_unchecked(&mp->stats.xid_not_found);
37229 goto rel;
37230 }
37231 sof = fr_sof(fp);
37232 @@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
37233 sp->ssb_stat |= SSB_ST_RESP;
37234 sp->id = fh->fh_seq_id;
37235 } else if (sp->id != fh->fh_seq_id) {
37236 - atomic_inc(&mp->stats.seq_not_found);
37237 + atomic_inc_unchecked(&mp->stats.seq_not_found);
37238 goto rel;
37239 }
37240
37241 @@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
37242 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
37243
37244 if (!sp)
37245 - atomic_inc(&mp->stats.xid_not_found);
37246 + atomic_inc_unchecked(&mp->stats.xid_not_found);
37247 else
37248 - atomic_inc(&mp->stats.non_bls_resp);
37249 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
37250
37251 fc_frame_free(fp);
37252 }
37253 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
37254 index db9238f..4378ed2 100644
37255 --- a/drivers/scsi/libsas/sas_ata.c
37256 +++ b/drivers/scsi/libsas/sas_ata.c
37257 @@ -368,7 +368,7 @@ static struct ata_port_operations sas_sata_ops = {
37258 .postreset = ata_std_postreset,
37259 .error_handler = ata_std_error_handler,
37260 .post_internal_cmd = sas_ata_post_internal,
37261 - .qc_defer = ata_std_qc_defer,
37262 + .qc_defer = ata_std_qc_defer,
37263 .qc_prep = ata_noop_qc_prep,
37264 .qc_issue = sas_ata_qc_issue,
37265 .qc_fill_rtf = sas_ata_qc_fill_rtf,
37266 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
37267 index 825f930..ce42672 100644
37268 --- a/drivers/scsi/lpfc/lpfc.h
37269 +++ b/drivers/scsi/lpfc/lpfc.h
37270 @@ -413,7 +413,7 @@ struct lpfc_vport {
37271 struct dentry *debug_nodelist;
37272 struct dentry *vport_debugfs_root;
37273 struct lpfc_debugfs_trc *disc_trc;
37274 - atomic_t disc_trc_cnt;
37275 + atomic_unchecked_t disc_trc_cnt;
37276 #endif
37277 uint8_t stat_data_enabled;
37278 uint8_t stat_data_blocked;
37279 @@ -821,8 +821,8 @@ struct lpfc_hba {
37280 struct timer_list fabric_block_timer;
37281 unsigned long bit_flags;
37282 #define FABRIC_COMANDS_BLOCKED 0
37283 - atomic_t num_rsrc_err;
37284 - atomic_t num_cmd_success;
37285 + atomic_unchecked_t num_rsrc_err;
37286 + atomic_unchecked_t num_cmd_success;
37287 unsigned long last_rsrc_error_time;
37288 unsigned long last_ramp_down_time;
37289 unsigned long last_ramp_up_time;
37290 @@ -852,7 +852,7 @@ struct lpfc_hba {
37291
37292 struct dentry *debug_slow_ring_trc;
37293 struct lpfc_debugfs_trc *slow_ring_trc;
37294 - atomic_t slow_ring_trc_cnt;
37295 + atomic_unchecked_t slow_ring_trc_cnt;
37296 /* iDiag debugfs sub-directory */
37297 struct dentry *idiag_root;
37298 struct dentry *idiag_pci_cfg;
37299 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
37300 index 3587a3f..d45b81b 100644
37301 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
37302 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
37303 @@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
37304
37305 #include <linux/debugfs.h>
37306
37307 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
37308 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
37309 static unsigned long lpfc_debugfs_start_time = 0L;
37310
37311 /* iDiag */
37312 @@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
37313 lpfc_debugfs_enable = 0;
37314
37315 len = 0;
37316 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
37317 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
37318 (lpfc_debugfs_max_disc_trc - 1);
37319 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
37320 dtp = vport->disc_trc + i;
37321 @@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
37322 lpfc_debugfs_enable = 0;
37323
37324 len = 0;
37325 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
37326 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
37327 (lpfc_debugfs_max_slow_ring_trc - 1);
37328 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
37329 dtp = phba->slow_ring_trc + i;
37330 @@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
37331 !vport || !vport->disc_trc)
37332 return;
37333
37334 - index = atomic_inc_return(&vport->disc_trc_cnt) &
37335 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
37336 (lpfc_debugfs_max_disc_trc - 1);
37337 dtp = vport->disc_trc + index;
37338 dtp->fmt = fmt;
37339 dtp->data1 = data1;
37340 dtp->data2 = data2;
37341 dtp->data3 = data3;
37342 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
37343 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
37344 dtp->jif = jiffies;
37345 #endif
37346 return;
37347 @@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
37348 !phba || !phba->slow_ring_trc)
37349 return;
37350
37351 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
37352 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
37353 (lpfc_debugfs_max_slow_ring_trc - 1);
37354 dtp = phba->slow_ring_trc + index;
37355 dtp->fmt = fmt;
37356 dtp->data1 = data1;
37357 dtp->data2 = data2;
37358 dtp->data3 = data3;
37359 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
37360 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
37361 dtp->jif = jiffies;
37362 #endif
37363 return;
37364 @@ -4040,7 +4040,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
37365 "slow_ring buffer\n");
37366 goto debug_failed;
37367 }
37368 - atomic_set(&phba->slow_ring_trc_cnt, 0);
37369 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
37370 memset(phba->slow_ring_trc, 0,
37371 (sizeof(struct lpfc_debugfs_trc) *
37372 lpfc_debugfs_max_slow_ring_trc));
37373 @@ -4086,7 +4086,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
37374 "buffer\n");
37375 goto debug_failed;
37376 }
37377 - atomic_set(&vport->disc_trc_cnt, 0);
37378 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
37379
37380 snprintf(name, sizeof(name), "discovery_trace");
37381 vport->debug_disc_trc =
37382 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
37383 index dfea2da..8e17227 100644
37384 --- a/drivers/scsi/lpfc/lpfc_init.c
37385 +++ b/drivers/scsi/lpfc/lpfc_init.c
37386 @@ -10145,8 +10145,10 @@ lpfc_init(void)
37387 printk(LPFC_COPYRIGHT "\n");
37388
37389 if (lpfc_enable_npiv) {
37390 - lpfc_transport_functions.vport_create = lpfc_vport_create;
37391 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
37392 + pax_open_kernel();
37393 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
37394 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
37395 + pax_close_kernel();
37396 }
37397 lpfc_transport_template =
37398 fc_attach_transport(&lpfc_transport_functions);
37399 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
37400 index c60f5d0..751535c 100644
37401 --- a/drivers/scsi/lpfc/lpfc_scsi.c
37402 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
37403 @@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
37404 uint32_t evt_posted;
37405
37406 spin_lock_irqsave(&phba->hbalock, flags);
37407 - atomic_inc(&phba->num_rsrc_err);
37408 + atomic_inc_unchecked(&phba->num_rsrc_err);
37409 phba->last_rsrc_error_time = jiffies;
37410
37411 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
37412 @@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
37413 unsigned long flags;
37414 struct lpfc_hba *phba = vport->phba;
37415 uint32_t evt_posted;
37416 - atomic_inc(&phba->num_cmd_success);
37417 + atomic_inc_unchecked(&phba->num_cmd_success);
37418
37419 if (vport->cfg_lun_queue_depth <= queue_depth)
37420 return;
37421 @@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
37422 unsigned long num_rsrc_err, num_cmd_success;
37423 int i;
37424
37425 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
37426 - num_cmd_success = atomic_read(&phba->num_cmd_success);
37427 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
37428 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
37429
37430 vports = lpfc_create_vport_work_array(phba);
37431 if (vports != NULL)
37432 @@ -411,8 +411,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
37433 }
37434 }
37435 lpfc_destroy_vport_work_array(phba, vports);
37436 - atomic_set(&phba->num_rsrc_err, 0);
37437 - atomic_set(&phba->num_cmd_success, 0);
37438 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
37439 + atomic_set_unchecked(&phba->num_cmd_success, 0);
37440 }
37441
37442 /**
37443 @@ -446,8 +446,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
37444 }
37445 }
37446 lpfc_destroy_vport_work_array(phba, vports);
37447 - atomic_set(&phba->num_rsrc_err, 0);
37448 - atomic_set(&phba->num_cmd_success, 0);
37449 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
37450 + atomic_set_unchecked(&phba->num_cmd_success, 0);
37451 }
37452
37453 /**
37454 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
37455 index ea8a0b4..812a124 100644
37456 --- a/drivers/scsi/pmcraid.c
37457 +++ b/drivers/scsi/pmcraid.c
37458 @@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
37459 res->scsi_dev = scsi_dev;
37460 scsi_dev->hostdata = res;
37461 res->change_detected = 0;
37462 - atomic_set(&res->read_failures, 0);
37463 - atomic_set(&res->write_failures, 0);
37464 + atomic_set_unchecked(&res->read_failures, 0);
37465 + atomic_set_unchecked(&res->write_failures, 0);
37466 rc = 0;
37467 }
37468 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
37469 @@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
37470
37471 /* If this was a SCSI read/write command keep count of errors */
37472 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
37473 - atomic_inc(&res->read_failures);
37474 + atomic_inc_unchecked(&res->read_failures);
37475 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
37476 - atomic_inc(&res->write_failures);
37477 + atomic_inc_unchecked(&res->write_failures);
37478
37479 if (!RES_IS_GSCSI(res->cfg_entry) &&
37480 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
37481 @@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
37482 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
37483 * hrrq_id assigned here in queuecommand
37484 */
37485 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
37486 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
37487 pinstance->num_hrrq;
37488 cmd->cmd_done = pmcraid_io_done;
37489
37490 @@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
37491 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
37492 * hrrq_id assigned here in queuecommand
37493 */
37494 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
37495 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
37496 pinstance->num_hrrq;
37497
37498 if (request_size) {
37499 @@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
37500
37501 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
37502 /* add resources only after host is added into system */
37503 - if (!atomic_read(&pinstance->expose_resources))
37504 + if (!atomic_read_unchecked(&pinstance->expose_resources))
37505 return;
37506
37507 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
37508 @@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
37509 init_waitqueue_head(&pinstance->reset_wait_q);
37510
37511 atomic_set(&pinstance->outstanding_cmds, 0);
37512 - atomic_set(&pinstance->last_message_id, 0);
37513 - atomic_set(&pinstance->expose_resources, 0);
37514 + atomic_set_unchecked(&pinstance->last_message_id, 0);
37515 + atomic_set_unchecked(&pinstance->expose_resources, 0);
37516
37517 INIT_LIST_HEAD(&pinstance->free_res_q);
37518 INIT_LIST_HEAD(&pinstance->used_res_q);
37519 @@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
37520 /* Schedule worker thread to handle CCN and take care of adding and
37521 * removing devices to OS
37522 */
37523 - atomic_set(&pinstance->expose_resources, 1);
37524 + atomic_set_unchecked(&pinstance->expose_resources, 1);
37525 schedule_work(&pinstance->worker_q);
37526 return rc;
37527
37528 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
37529 index ca496c7..9c791d5 100644
37530 --- a/drivers/scsi/pmcraid.h
37531 +++ b/drivers/scsi/pmcraid.h
37532 @@ -748,7 +748,7 @@ struct pmcraid_instance {
37533 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
37534
37535 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
37536 - atomic_t last_message_id;
37537 + atomic_unchecked_t last_message_id;
37538
37539 /* configuration table */
37540 struct pmcraid_config_table *cfg_table;
37541 @@ -777,7 +777,7 @@ struct pmcraid_instance {
37542 atomic_t outstanding_cmds;
37543
37544 /* should add/delete resources to mid-layer now ?*/
37545 - atomic_t expose_resources;
37546 + atomic_unchecked_t expose_resources;
37547
37548
37549
37550 @@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
37551 struct pmcraid_config_table_entry_ext cfg_entry_ext;
37552 };
37553 struct scsi_device *scsi_dev; /* Link scsi_device structure */
37554 - atomic_t read_failures; /* count of failed READ commands */
37555 - atomic_t write_failures; /* count of failed WRITE commands */
37556 + atomic_unchecked_t read_failures; /* count of failed READ commands */
37557 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
37558
37559 /* To indicate add/delete/modify during CCN */
37560 u8 change_detected;
37561 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
37562 index af1003f..be55a75 100644
37563 --- a/drivers/scsi/qla2xxx/qla_def.h
37564 +++ b/drivers/scsi/qla2xxx/qla_def.h
37565 @@ -2247,7 +2247,7 @@ struct isp_operations {
37566 int (*start_scsi) (srb_t *);
37567 int (*abort_isp) (struct scsi_qla_host *);
37568 int (*iospace_config)(struct qla_hw_data*);
37569 -};
37570 +} __no_const;
37571
37572 /* MSI-X Support *************************************************************/
37573
37574 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
37575 index bfe6854..ceac088 100644
37576 --- a/drivers/scsi/qla4xxx/ql4_def.h
37577 +++ b/drivers/scsi/qla4xxx/ql4_def.h
37578 @@ -261,7 +261,7 @@ struct ddb_entry {
37579 * (4000 only) */
37580 atomic_t relogin_timer; /* Max Time to wait for
37581 * relogin to complete */
37582 - atomic_t relogin_retry_count; /* Num of times relogin has been
37583 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
37584 * retried */
37585 uint32_t default_time2wait; /* Default Min time between
37586 * relogins (+aens) */
37587 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
37588 index ce6d3b7..73fac54 100644
37589 --- a/drivers/scsi/qla4xxx/ql4_os.c
37590 +++ b/drivers/scsi/qla4xxx/ql4_os.c
37591 @@ -2178,12 +2178,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
37592 */
37593 if (!iscsi_is_session_online(cls_sess)) {
37594 /* Reset retry relogin timer */
37595 - atomic_inc(&ddb_entry->relogin_retry_count);
37596 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
37597 DEBUG2(ql4_printk(KERN_INFO, ha,
37598 "%s: index[%d] relogin timed out-retrying"
37599 " relogin (%d), retry (%d)\n", __func__,
37600 ddb_entry->fw_ddb_index,
37601 - atomic_read(&ddb_entry->relogin_retry_count),
37602 + atomic_read_unchecked(&ddb_entry->relogin_retry_count),
37603 ddb_entry->default_time2wait + 4));
37604 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
37605 atomic_set(&ddb_entry->retry_relogin_timer,
37606 @@ -3953,7 +3953,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
37607
37608 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
37609 atomic_set(&ddb_entry->relogin_timer, 0);
37610 - atomic_set(&ddb_entry->relogin_retry_count, 0);
37611 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
37612 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
37613 ddb_entry->default_relogin_timeout =
37614 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
37615 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
37616 index 2aeb2e9..46e3925 100644
37617 --- a/drivers/scsi/scsi.c
37618 +++ b/drivers/scsi/scsi.c
37619 @@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
37620 unsigned long timeout;
37621 int rtn = 0;
37622
37623 - atomic_inc(&cmd->device->iorequest_cnt);
37624 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37625
37626 /* check if the device is still usable */
37627 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
37628 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
37629 index b2c95db..227d74e 100644
37630 --- a/drivers/scsi/scsi_lib.c
37631 +++ b/drivers/scsi/scsi_lib.c
37632 @@ -1411,7 +1411,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
37633 shost = sdev->host;
37634 scsi_init_cmd_errh(cmd);
37635 cmd->result = DID_NO_CONNECT << 16;
37636 - atomic_inc(&cmd->device->iorequest_cnt);
37637 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37638
37639 /*
37640 * SCSI request completion path will do scsi_device_unbusy(),
37641 @@ -1437,9 +1437,9 @@ static void scsi_softirq_done(struct request *rq)
37642
37643 INIT_LIST_HEAD(&cmd->eh_entry);
37644
37645 - atomic_inc(&cmd->device->iodone_cnt);
37646 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
37647 if (cmd->result)
37648 - atomic_inc(&cmd->device->ioerr_cnt);
37649 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
37650
37651 disposition = scsi_decide_disposition(cmd);
37652 if (disposition != SUCCESS &&
37653 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
37654 index 04c2a27..9d8bd66 100644
37655 --- a/drivers/scsi/scsi_sysfs.c
37656 +++ b/drivers/scsi/scsi_sysfs.c
37657 @@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
37658 char *buf) \
37659 { \
37660 struct scsi_device *sdev = to_scsi_device(dev); \
37661 - unsigned long long count = atomic_read(&sdev->field); \
37662 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
37663 return snprintf(buf, 20, "0x%llx\n", count); \
37664 } \
37665 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
37666 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
37667 index 84a1fdf..693b0d6 100644
37668 --- a/drivers/scsi/scsi_tgt_lib.c
37669 +++ b/drivers/scsi/scsi_tgt_lib.c
37670 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
37671 int err;
37672
37673 dprintk("%lx %u\n", uaddr, len);
37674 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
37675 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
37676 if (err) {
37677 /*
37678 * TODO: need to fixup sg_tablesize, max_segment_size,
37679 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
37680 index f59d4a0..1d89407 100644
37681 --- a/drivers/scsi/scsi_transport_fc.c
37682 +++ b/drivers/scsi/scsi_transport_fc.c
37683 @@ -484,7 +484,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
37684 * Netlink Infrastructure
37685 */
37686
37687 -static atomic_t fc_event_seq;
37688 +static atomic_unchecked_t fc_event_seq;
37689
37690 /**
37691 * fc_get_event_number - Obtain the next sequential FC event number
37692 @@ -497,7 +497,7 @@ static atomic_t fc_event_seq;
37693 u32
37694 fc_get_event_number(void)
37695 {
37696 - return atomic_add_return(1, &fc_event_seq);
37697 + return atomic_add_return_unchecked(1, &fc_event_seq);
37698 }
37699 EXPORT_SYMBOL(fc_get_event_number);
37700
37701 @@ -645,7 +645,7 @@ static __init int fc_transport_init(void)
37702 {
37703 int error;
37704
37705 - atomic_set(&fc_event_seq, 0);
37706 + atomic_set_unchecked(&fc_event_seq, 0);
37707
37708 error = transport_class_register(&fc_host_class);
37709 if (error)
37710 @@ -835,7 +835,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
37711 char *cp;
37712
37713 *val = simple_strtoul(buf, &cp, 0);
37714 - if ((*cp && (*cp != '\n')) || (*val < 0))
37715 + if (*cp && (*cp != '\n'))
37716 return -EINVAL;
37717 /*
37718 * Check for overflow; dev_loss_tmo is u32
37719 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
37720 index e3e3c7d..ebdab62 100644
37721 --- a/drivers/scsi/scsi_transport_iscsi.c
37722 +++ b/drivers/scsi/scsi_transport_iscsi.c
37723 @@ -79,7 +79,7 @@ struct iscsi_internal {
37724 struct transport_container session_cont;
37725 };
37726
37727 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
37728 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
37729 static struct workqueue_struct *iscsi_eh_timer_workq;
37730
37731 static DEFINE_IDA(iscsi_sess_ida);
37732 @@ -1063,7 +1063,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
37733 int err;
37734
37735 ihost = shost->shost_data;
37736 - session->sid = atomic_add_return(1, &iscsi_session_nr);
37737 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
37738
37739 if (target_id == ISCSI_MAX_TARGET) {
37740 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
37741 @@ -2680,7 +2680,7 @@ static __init int iscsi_transport_init(void)
37742 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
37743 ISCSI_TRANSPORT_VERSION);
37744
37745 - atomic_set(&iscsi_session_nr, 0);
37746 + atomic_set_unchecked(&iscsi_session_nr, 0);
37747
37748 err = class_register(&iscsi_transport_class);
37749 if (err)
37750 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
37751 index 21a045e..ec89e03 100644
37752 --- a/drivers/scsi/scsi_transport_srp.c
37753 +++ b/drivers/scsi/scsi_transport_srp.c
37754 @@ -33,7 +33,7 @@
37755 #include "scsi_transport_srp_internal.h"
37756
37757 struct srp_host_attrs {
37758 - atomic_t next_port_id;
37759 + atomic_unchecked_t next_port_id;
37760 };
37761 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
37762
37763 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
37764 struct Scsi_Host *shost = dev_to_shost(dev);
37765 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
37766
37767 - atomic_set(&srp_host->next_port_id, 0);
37768 + atomic_set_unchecked(&srp_host->next_port_id, 0);
37769 return 0;
37770 }
37771
37772 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
37773 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
37774 rport->roles = ids->roles;
37775
37776 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
37777 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
37778 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
37779
37780 transport_setup_device(&rport->dev);
37781 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
37782 index eacd46b..e3f4d62 100644
37783 --- a/drivers/scsi/sg.c
37784 +++ b/drivers/scsi/sg.c
37785 @@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
37786 sdp->disk->disk_name,
37787 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
37788 NULL,
37789 - (char *)arg);
37790 + (char __user *)arg);
37791 case BLKTRACESTART:
37792 return blk_trace_startstop(sdp->device->request_queue, 1);
37793 case BLKTRACESTOP:
37794 @@ -2312,7 +2312,7 @@ struct sg_proc_leaf {
37795 const struct file_operations * fops;
37796 };
37797
37798 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
37799 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
37800 {"allow_dio", &adio_fops},
37801 {"debug", &debug_fops},
37802 {"def_reserved_size", &dressz_fops},
37803 @@ -2332,7 +2332,7 @@ sg_proc_init(void)
37804 if (!sg_proc_sgp)
37805 return 1;
37806 for (k = 0; k < num_leaves; ++k) {
37807 - struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
37808 + const struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
37809 umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
37810 proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
37811 }
37812 diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
37813 index f64250e..1ee3049 100644
37814 --- a/drivers/spi/spi-dw-pci.c
37815 +++ b/drivers/spi/spi-dw-pci.c
37816 @@ -149,7 +149,7 @@ static int spi_resume(struct pci_dev *pdev)
37817 #define spi_resume NULL
37818 #endif
37819
37820 -static const struct pci_device_id pci_ids[] __devinitdata = {
37821 +static const struct pci_device_id pci_ids[] __devinitconst = {
37822 /* Intel MID platform SPI controller 0 */
37823 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
37824 {},
37825 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
37826 index b2ccdea..84cde75 100644
37827 --- a/drivers/spi/spi.c
37828 +++ b/drivers/spi/spi.c
37829 @@ -1024,7 +1024,7 @@ int spi_bus_unlock(struct spi_master *master)
37830 EXPORT_SYMBOL_GPL(spi_bus_unlock);
37831
37832 /* portable code must never pass more than 32 bytes */
37833 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
37834 +#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
37835
37836 static u8 *buf;
37837
37838 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
37839 index 400df8c..065d4f4 100644
37840 --- a/drivers/staging/octeon/ethernet-rx.c
37841 +++ b/drivers/staging/octeon/ethernet-rx.c
37842 @@ -420,11 +420,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
37843 /* Increment RX stats for virtual ports */
37844 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
37845 #ifdef CONFIG_64BIT
37846 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
37847 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
37848 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
37849 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
37850 #else
37851 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
37852 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
37853 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
37854 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
37855 #endif
37856 }
37857 netif_receive_skb(skb);
37858 @@ -436,9 +436,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
37859 dev->name);
37860 */
37861 #ifdef CONFIG_64BIT
37862 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
37863 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37864 #else
37865 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
37866 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
37867 #endif
37868 dev_kfree_skb_irq(skb);
37869 }
37870 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
37871 index 9112cd8..92f8d51 100644
37872 --- a/drivers/staging/octeon/ethernet.c
37873 +++ b/drivers/staging/octeon/ethernet.c
37874 @@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
37875 * since the RX tasklet also increments it.
37876 */
37877 #ifdef CONFIG_64BIT
37878 - atomic64_add(rx_status.dropped_packets,
37879 - (atomic64_t *)&priv->stats.rx_dropped);
37880 + atomic64_add_unchecked(rx_status.dropped_packets,
37881 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37882 #else
37883 - atomic_add(rx_status.dropped_packets,
37884 - (atomic_t *)&priv->stats.rx_dropped);
37885 + atomic_add_unchecked(rx_status.dropped_packets,
37886 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
37887 #endif
37888 }
37889
37890 diff --git a/drivers/staging/rtl8192e/rtllib_module.c b/drivers/staging/rtl8192e/rtllib_module.c
37891 index f9dae95..ff48901 100644
37892 --- a/drivers/staging/rtl8192e/rtllib_module.c
37893 +++ b/drivers/staging/rtl8192e/rtllib_module.c
37894 @@ -215,6 +215,8 @@ static int show_debug_level(char *page, char **start, off_t offset,
37895 }
37896
37897 static int store_debug_level(struct file *file, const char __user *buffer,
37898 + unsigned long count, void *data) __size_overflow(3);
37899 +static int store_debug_level(struct file *file, const char __user *buffer,
37900 unsigned long count, void *data)
37901 {
37902 char buf[] = "0x00000000";
37903 diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
37904 index e3d47bc..85f4d0d 100644
37905 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
37906 +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
37907 @@ -250,6 +250,8 @@ static int show_debug_level(char *page, char **start, off_t offset,
37908 }
37909
37910 static int store_debug_level(struct file *file, const char *buffer,
37911 + unsigned long count, void *data) __size_overflow(3);
37912 +static int store_debug_level(struct file *file, const char *buffer,
37913 unsigned long count, void *data)
37914 {
37915 char buf[] = "0x00000000";
37916 diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
37917 index 86308a0..feaa925 100644
37918 --- a/drivers/staging/rtl8712/rtl871x_io.h
37919 +++ b/drivers/staging/rtl8712/rtl871x_io.h
37920 @@ -108,7 +108,7 @@ struct _io_ops {
37921 u8 *pmem);
37922 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
37923 u8 *pmem);
37924 -};
37925 +} __no_const;
37926
37927 struct io_req {
37928 struct list_head list;
37929 diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
37930 index c7b5e8b..783d6cb 100644
37931 --- a/drivers/staging/sbe-2t3e3/netdev.c
37932 +++ b/drivers/staging/sbe-2t3e3/netdev.c
37933 @@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
37934 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
37935
37936 if (rlen)
37937 - if (copy_to_user(data, &resp, rlen))
37938 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
37939 return -EFAULT;
37940
37941 return 0;
37942 diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
37943 index 42cdafe..2769103 100644
37944 --- a/drivers/staging/speakup/speakup_soft.c
37945 +++ b/drivers/staging/speakup/speakup_soft.c
37946 @@ -241,11 +241,11 @@ static ssize_t softsynth_read(struct file *fp, char *buf, size_t count,
37947 break;
37948 } else if (!initialized) {
37949 if (*init) {
37950 - ch = *init;
37951 init++;
37952 } else {
37953 initialized = 1;
37954 }
37955 + ch = *init;
37956 } else {
37957 ch = synth_buffer_getc();
37958 }
37959 diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
37960 index b8f8c48..1fc5025 100644
37961 --- a/drivers/staging/usbip/usbip_common.h
37962 +++ b/drivers/staging/usbip/usbip_common.h
37963 @@ -289,7 +289,7 @@ struct usbip_device {
37964 void (*shutdown)(struct usbip_device *);
37965 void (*reset)(struct usbip_device *);
37966 void (*unusable)(struct usbip_device *);
37967 - } eh_ops;
37968 + } __no_const eh_ops;
37969 };
37970
37971 /* usbip_common.c */
37972 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
37973 index 88b3298..3783eee 100644
37974 --- a/drivers/staging/usbip/vhci.h
37975 +++ b/drivers/staging/usbip/vhci.h
37976 @@ -88,7 +88,7 @@ struct vhci_hcd {
37977 unsigned resuming:1;
37978 unsigned long re_timeout;
37979
37980 - atomic_t seqnum;
37981 + atomic_unchecked_t seqnum;
37982
37983 /*
37984 * NOTE:
37985 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
37986 index 2ee97e2..0420b86 100644
37987 --- a/drivers/staging/usbip/vhci_hcd.c
37988 +++ b/drivers/staging/usbip/vhci_hcd.c
37989 @@ -527,7 +527,7 @@ static void vhci_tx_urb(struct urb *urb)
37990 return;
37991 }
37992
37993 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
37994 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37995 if (priv->seqnum == 0xffff)
37996 dev_info(&urb->dev->dev, "seqnum max\n");
37997
37998 @@ -779,7 +779,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
37999 return -ENOMEM;
38000 }
38001
38002 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
38003 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
38004 if (unlink->seqnum == 0xffff)
38005 pr_info("seqnum max\n");
38006
38007 @@ -969,7 +969,7 @@ static int vhci_start(struct usb_hcd *hcd)
38008 vdev->rhport = rhport;
38009 }
38010
38011 - atomic_set(&vhci->seqnum, 0);
38012 + atomic_set_unchecked(&vhci->seqnum, 0);
38013 spin_lock_init(&vhci->lock);
38014
38015 hcd->power_budget = 0; /* no limit */
38016 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
38017 index 3f511b4..d3dbc1e 100644
38018 --- a/drivers/staging/usbip/vhci_rx.c
38019 +++ b/drivers/staging/usbip/vhci_rx.c
38020 @@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
38021 if (!urb) {
38022 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
38023 pr_info("max seqnum %d\n",
38024 - atomic_read(&the_controller->seqnum));
38025 + atomic_read_unchecked(&the_controller->seqnum));
38026 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
38027 return;
38028 }
38029 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
38030 index 7735027..30eed13 100644
38031 --- a/drivers/staging/vt6655/hostap.c
38032 +++ b/drivers/staging/vt6655/hostap.c
38033 @@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
38034 *
38035 */
38036
38037 +static net_device_ops_no_const apdev_netdev_ops;
38038 +
38039 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
38040 {
38041 PSDevice apdev_priv;
38042 struct net_device *dev = pDevice->dev;
38043 int ret;
38044 - const struct net_device_ops apdev_netdev_ops = {
38045 - .ndo_start_xmit = pDevice->tx_80211,
38046 - };
38047
38048 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
38049
38050 @@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
38051 *apdev_priv = *pDevice;
38052 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
38053
38054 + /* only half broken now */
38055 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
38056 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
38057
38058 pDevice->apdev->type = ARPHRD_IEEE80211;
38059 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
38060 index 51b5adf..098e320 100644
38061 --- a/drivers/staging/vt6656/hostap.c
38062 +++ b/drivers/staging/vt6656/hostap.c
38063 @@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
38064 *
38065 */
38066
38067 +static net_device_ops_no_const apdev_netdev_ops;
38068 +
38069 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
38070 {
38071 PSDevice apdev_priv;
38072 struct net_device *dev = pDevice->dev;
38073 int ret;
38074 - const struct net_device_ops apdev_netdev_ops = {
38075 - .ndo_start_xmit = pDevice->tx_80211,
38076 - };
38077
38078 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
38079
38080 @@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
38081 *apdev_priv = *pDevice;
38082 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
38083
38084 + /* only half broken now */
38085 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
38086 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
38087
38088 pDevice->apdev->type = ARPHRD_IEEE80211;
38089 diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
38090 index 7843dfd..3db105f 100644
38091 --- a/drivers/staging/wlan-ng/hfa384x_usb.c
38092 +++ b/drivers/staging/wlan-ng/hfa384x_usb.c
38093 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
38094
38095 struct usbctlx_completor {
38096 int (*complete) (struct usbctlx_completor *);
38097 -};
38098 +} __no_const;
38099
38100 static int
38101 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
38102 diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
38103 index 1ca66ea..76f1343 100644
38104 --- a/drivers/staging/zcache/tmem.c
38105 +++ b/drivers/staging/zcache/tmem.c
38106 @@ -39,7 +39,7 @@
38107 * A tmem host implementation must use this function to register callbacks
38108 * for memory allocation.
38109 */
38110 -static struct tmem_hostops tmem_hostops;
38111 +static tmem_hostops_no_const tmem_hostops;
38112
38113 static void tmem_objnode_tree_init(void);
38114
38115 @@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
38116 * A tmem host implementation must use this function to register
38117 * callbacks for a page-accessible memory (PAM) implementation
38118 */
38119 -static struct tmem_pamops tmem_pamops;
38120 +static tmem_pamops_no_const tmem_pamops;
38121
38122 void tmem_register_pamops(struct tmem_pamops *m)
38123 {
38124 diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
38125 index ed147c4..94fc3c6 100644
38126 --- a/drivers/staging/zcache/tmem.h
38127 +++ b/drivers/staging/zcache/tmem.h
38128 @@ -180,6 +180,7 @@ struct tmem_pamops {
38129 void (*new_obj)(struct tmem_obj *);
38130 int (*replace_in_obj)(void *, struct tmem_obj *);
38131 };
38132 +typedef struct tmem_pamops __no_const tmem_pamops_no_const;
38133 extern void tmem_register_pamops(struct tmem_pamops *m);
38134
38135 /* memory allocation methods provided by the host implementation */
38136 @@ -189,6 +190,7 @@ struct tmem_hostops {
38137 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
38138 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
38139 };
38140 +typedef struct tmem_hostops __no_const tmem_hostops_no_const;
38141 extern void tmem_register_hostops(struct tmem_hostops *m);
38142
38143 /* core tmem accessor functions */
38144 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
38145 index 97c74ee..7f6d77d 100644
38146 --- a/drivers/target/iscsi/iscsi_target.c
38147 +++ b/drivers/target/iscsi/iscsi_target.c
38148 @@ -1361,7 +1361,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
38149 * outstanding_r2ts reaches zero, go ahead and send the delayed
38150 * TASK_ABORTED status.
38151 */
38152 - if (atomic_read(&se_cmd->t_transport_aborted) != 0) {
38153 + if (atomic_read_unchecked(&se_cmd->t_transport_aborted) != 0) {
38154 if (hdr->flags & ISCSI_FLAG_CMD_FINAL)
38155 if (--cmd->outstanding_r2ts < 1) {
38156 iscsit_stop_dataout_timer(cmd);
38157 diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
38158 index dcb0618..97e3d85 100644
38159 --- a/drivers/target/target_core_tmr.c
38160 +++ b/drivers/target/target_core_tmr.c
38161 @@ -260,7 +260,7 @@ static void core_tmr_drain_task_list(
38162 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
38163 cmd->t_task_list_num,
38164 atomic_read(&cmd->t_task_cdbs_left),
38165 - atomic_read(&cmd->t_task_cdbs_sent),
38166 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
38167 atomic_read(&cmd->t_transport_active),
38168 atomic_read(&cmd->t_transport_stop),
38169 atomic_read(&cmd->t_transport_sent));
38170 @@ -291,7 +291,7 @@ static void core_tmr_drain_task_list(
38171 pr_debug("LUN_RESET: got t_transport_active = 1 for"
38172 " task: %p, t_fe_count: %d dev: %p\n", task,
38173 fe_count, dev);
38174 - atomic_set(&cmd->t_transport_aborted, 1);
38175 + atomic_set_unchecked(&cmd->t_transport_aborted, 1);
38176 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
38177
38178 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
38179 @@ -299,7 +299,7 @@ static void core_tmr_drain_task_list(
38180 }
38181 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
38182 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
38183 - atomic_set(&cmd->t_transport_aborted, 1);
38184 + atomic_set_unchecked(&cmd->t_transport_aborted, 1);
38185 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
38186
38187 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
38188 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
38189 index cd5cd95..5249d30 100644
38190 --- a/drivers/target/target_core_transport.c
38191 +++ b/drivers/target/target_core_transport.c
38192 @@ -1330,7 +1330,7 @@ struct se_device *transport_add_device_to_core_hba(
38193 spin_lock_init(&dev->se_port_lock);
38194 spin_lock_init(&dev->se_tmr_lock);
38195 spin_lock_init(&dev->qf_cmd_lock);
38196 - atomic_set(&dev->dev_ordered_id, 0);
38197 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
38198
38199 se_dev_set_default_attribs(dev, dev_limits);
38200
38201 @@ -1517,7 +1517,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
38202 * Used to determine when ORDERED commands should go from
38203 * Dormant to Active status.
38204 */
38205 - cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
38206 + cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
38207 smp_mb__after_atomic_inc();
38208 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
38209 cmd->se_ordered_id, cmd->sam_task_attr,
38210 @@ -1862,7 +1862,7 @@ static void transport_generic_request_failure(struct se_cmd *cmd)
38211 " t_transport_active: %d t_transport_stop: %d"
38212 " t_transport_sent: %d\n", cmd->t_task_list_num,
38213 atomic_read(&cmd->t_task_cdbs_left),
38214 - atomic_read(&cmd->t_task_cdbs_sent),
38215 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
38216 atomic_read(&cmd->t_task_cdbs_ex_left),
38217 atomic_read(&cmd->t_transport_active),
38218 atomic_read(&cmd->t_transport_stop),
38219 @@ -2121,9 +2121,9 @@ check_depth:
38220 cmd = task->task_se_cmd;
38221 spin_lock_irqsave(&cmd->t_state_lock, flags);
38222 task->task_flags |= (TF_ACTIVE | TF_SENT);
38223 - atomic_inc(&cmd->t_task_cdbs_sent);
38224 + atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
38225
38226 - if (atomic_read(&cmd->t_task_cdbs_sent) ==
38227 + if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
38228 cmd->t_task_list_num)
38229 atomic_set(&cmd->t_transport_sent, 1);
38230
38231 @@ -4348,7 +4348,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
38232 atomic_set(&cmd->transport_lun_stop, 0);
38233 }
38234 if (!atomic_read(&cmd->t_transport_active) ||
38235 - atomic_read(&cmd->t_transport_aborted)) {
38236 + atomic_read_unchecked(&cmd->t_transport_aborted)) {
38237 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
38238 return false;
38239 }
38240 @@ -4597,7 +4597,7 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
38241 {
38242 int ret = 0;
38243
38244 - if (atomic_read(&cmd->t_transport_aborted) != 0) {
38245 + if (atomic_read_unchecked(&cmd->t_transport_aborted) != 0) {
38246 if (!send_status ||
38247 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
38248 return 1;
38249 @@ -4634,7 +4634,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
38250 */
38251 if (cmd->data_direction == DMA_TO_DEVICE) {
38252 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
38253 - atomic_inc(&cmd->t_transport_aborted);
38254 + atomic_inc_unchecked(&cmd->t_transport_aborted);
38255 smp_mb__after_atomic_inc();
38256 }
38257 }
38258 diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
38259 index b9040be..e3f5aab 100644
38260 --- a/drivers/tty/hvc/hvcs.c
38261 +++ b/drivers/tty/hvc/hvcs.c
38262 @@ -83,6 +83,7 @@
38263 #include <asm/hvcserver.h>
38264 #include <asm/uaccess.h>
38265 #include <asm/vio.h>
38266 +#include <asm/local.h>
38267
38268 /*
38269 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
38270 @@ -270,7 +271,7 @@ struct hvcs_struct {
38271 unsigned int index;
38272
38273 struct tty_struct *tty;
38274 - int open_count;
38275 + local_t open_count;
38276
38277 /*
38278 * Used to tell the driver kernel_thread what operations need to take
38279 @@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
38280
38281 spin_lock_irqsave(&hvcsd->lock, flags);
38282
38283 - if (hvcsd->open_count > 0) {
38284 + if (local_read(&hvcsd->open_count) > 0) {
38285 spin_unlock_irqrestore(&hvcsd->lock, flags);
38286 printk(KERN_INFO "HVCS: vterm state unchanged. "
38287 "The hvcs device node is still in use.\n");
38288 @@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
38289 if ((retval = hvcs_partner_connect(hvcsd)))
38290 goto error_release;
38291
38292 - hvcsd->open_count = 1;
38293 + local_set(&hvcsd->open_count, 1);
38294 hvcsd->tty = tty;
38295 tty->driver_data = hvcsd;
38296
38297 @@ -1179,7 +1180,7 @@ fast_open:
38298
38299 spin_lock_irqsave(&hvcsd->lock, flags);
38300 kref_get(&hvcsd->kref);
38301 - hvcsd->open_count++;
38302 + local_inc(&hvcsd->open_count);
38303 hvcsd->todo_mask |= HVCS_SCHED_READ;
38304 spin_unlock_irqrestore(&hvcsd->lock, flags);
38305
38306 @@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
38307 hvcsd = tty->driver_data;
38308
38309 spin_lock_irqsave(&hvcsd->lock, flags);
38310 - if (--hvcsd->open_count == 0) {
38311 + if (local_dec_and_test(&hvcsd->open_count)) {
38312
38313 vio_disable_interrupts(hvcsd->vdev);
38314
38315 @@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
38316 free_irq(irq, hvcsd);
38317 kref_put(&hvcsd->kref, destroy_hvcs_struct);
38318 return;
38319 - } else if (hvcsd->open_count < 0) {
38320 + } else if (local_read(&hvcsd->open_count) < 0) {
38321 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
38322 " is missmanaged.\n",
38323 - hvcsd->vdev->unit_address, hvcsd->open_count);
38324 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
38325 }
38326
38327 spin_unlock_irqrestore(&hvcsd->lock, flags);
38328 @@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struct * tty)
38329
38330 spin_lock_irqsave(&hvcsd->lock, flags);
38331 /* Preserve this so that we know how many kref refs to put */
38332 - temp_open_count = hvcsd->open_count;
38333 + temp_open_count = local_read(&hvcsd->open_count);
38334
38335 /*
38336 * Don't kref put inside the spinlock because the destruction
38337 @@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struct * tty)
38338 hvcsd->tty->driver_data = NULL;
38339 hvcsd->tty = NULL;
38340
38341 - hvcsd->open_count = 0;
38342 + local_set(&hvcsd->open_count, 0);
38343
38344 /* This will drop any buffered data on the floor which is OK in a hangup
38345 * scenario. */
38346 @@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct *tty,
38347 * the middle of a write operation? This is a crummy place to do this
38348 * but we want to keep it all in the spinlock.
38349 */
38350 - if (hvcsd->open_count <= 0) {
38351 + if (local_read(&hvcsd->open_count) <= 0) {
38352 spin_unlock_irqrestore(&hvcsd->lock, flags);
38353 return -ENODEV;
38354 }
38355 @@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_struct *tty)
38356 {
38357 struct hvcs_struct *hvcsd = tty->driver_data;
38358
38359 - if (!hvcsd || hvcsd->open_count <= 0)
38360 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
38361 return 0;
38362
38363 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
38364 diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
38365 index ef92869..f4ebd88 100644
38366 --- a/drivers/tty/ipwireless/tty.c
38367 +++ b/drivers/tty/ipwireless/tty.c
38368 @@ -29,6 +29,7 @@
38369 #include <linux/tty_driver.h>
38370 #include <linux/tty_flip.h>
38371 #include <linux/uaccess.h>
38372 +#include <asm/local.h>
38373
38374 #include "tty.h"
38375 #include "network.h"
38376 @@ -51,7 +52,7 @@ struct ipw_tty {
38377 int tty_type;
38378 struct ipw_network *network;
38379 struct tty_struct *linux_tty;
38380 - int open_count;
38381 + local_t open_count;
38382 unsigned int control_lines;
38383 struct mutex ipw_tty_mutex;
38384 int tx_bytes_queued;
38385 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
38386 mutex_unlock(&tty->ipw_tty_mutex);
38387 return -ENODEV;
38388 }
38389 - if (tty->open_count == 0)
38390 + if (local_read(&tty->open_count) == 0)
38391 tty->tx_bytes_queued = 0;
38392
38393 - tty->open_count++;
38394 + local_inc(&tty->open_count);
38395
38396 tty->linux_tty = linux_tty;
38397 linux_tty->driver_data = tty;
38398 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
38399
38400 static void do_ipw_close(struct ipw_tty *tty)
38401 {
38402 - tty->open_count--;
38403 -
38404 - if (tty->open_count == 0) {
38405 + if (local_dec_return(&tty->open_count) == 0) {
38406 struct tty_struct *linux_tty = tty->linux_tty;
38407
38408 if (linux_tty != NULL) {
38409 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
38410 return;
38411
38412 mutex_lock(&tty->ipw_tty_mutex);
38413 - if (tty->open_count == 0) {
38414 + if (local_read(&tty->open_count) == 0) {
38415 mutex_unlock(&tty->ipw_tty_mutex);
38416 return;
38417 }
38418 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
38419 return;
38420 }
38421
38422 - if (!tty->open_count) {
38423 + if (!local_read(&tty->open_count)) {
38424 mutex_unlock(&tty->ipw_tty_mutex);
38425 return;
38426 }
38427 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
38428 return -ENODEV;
38429
38430 mutex_lock(&tty->ipw_tty_mutex);
38431 - if (!tty->open_count) {
38432 + if (!local_read(&tty->open_count)) {
38433 mutex_unlock(&tty->ipw_tty_mutex);
38434 return -EINVAL;
38435 }
38436 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
38437 if (!tty)
38438 return -ENODEV;
38439
38440 - if (!tty->open_count)
38441 + if (!local_read(&tty->open_count))
38442 return -EINVAL;
38443
38444 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
38445 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
38446 if (!tty)
38447 return 0;
38448
38449 - if (!tty->open_count)
38450 + if (!local_read(&tty->open_count))
38451 return 0;
38452
38453 return tty->tx_bytes_queued;
38454 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
38455 if (!tty)
38456 return -ENODEV;
38457
38458 - if (!tty->open_count)
38459 + if (!local_read(&tty->open_count))
38460 return -EINVAL;
38461
38462 return get_control_lines(tty);
38463 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
38464 if (!tty)
38465 return -ENODEV;
38466
38467 - if (!tty->open_count)
38468 + if (!local_read(&tty->open_count))
38469 return -EINVAL;
38470
38471 return set_control_lines(tty, set, clear);
38472 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
38473 if (!tty)
38474 return -ENODEV;
38475
38476 - if (!tty->open_count)
38477 + if (!local_read(&tty->open_count))
38478 return -EINVAL;
38479
38480 /* FIXME: Exactly how is the tty object locked here .. */
38481 @@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
38482 against a parallel ioctl etc */
38483 mutex_lock(&ttyj->ipw_tty_mutex);
38484 }
38485 - while (ttyj->open_count)
38486 + while (local_read(&ttyj->open_count))
38487 do_ipw_close(ttyj);
38488 ipwireless_disassociate_network_ttys(network,
38489 ttyj->channel_idx);
38490 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
38491 index fc7bbba..9527e93 100644
38492 --- a/drivers/tty/n_gsm.c
38493 +++ b/drivers/tty/n_gsm.c
38494 @@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
38495 kref_init(&dlci->ref);
38496 mutex_init(&dlci->mutex);
38497 dlci->fifo = &dlci->_fifo;
38498 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
38499 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
38500 kfree(dlci);
38501 return NULL;
38502 }
38503 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
38504 index d2256d0..97476fa 100644
38505 --- a/drivers/tty/n_tty.c
38506 +++ b/drivers/tty/n_tty.c
38507 @@ -2123,6 +2123,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
38508 {
38509 *ops = tty_ldisc_N_TTY;
38510 ops->owner = NULL;
38511 - ops->refcount = ops->flags = 0;
38512 + atomic_set(&ops->refcount, 0);
38513 + ops->flags = 0;
38514 }
38515 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
38516 diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
38517 index d8653ab..f8afd9d 100644
38518 --- a/drivers/tty/pty.c
38519 +++ b/drivers/tty/pty.c
38520 @@ -765,8 +765,10 @@ static void __init unix98_pty_init(void)
38521 register_sysctl_table(pty_root_table);
38522
38523 /* Now create the /dev/ptmx special device */
38524 + pax_open_kernel();
38525 tty_default_fops(&ptmx_fops);
38526 - ptmx_fops.open = ptmx_open;
38527 + *(void **)&ptmx_fops.open = ptmx_open;
38528 + pax_close_kernel();
38529
38530 cdev_init(&ptmx_cdev, &ptmx_fops);
38531 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
38532 diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
38533 index 2b42a01..32a2ed3 100644
38534 --- a/drivers/tty/serial/kgdboc.c
38535 +++ b/drivers/tty/serial/kgdboc.c
38536 @@ -24,8 +24,9 @@
38537 #define MAX_CONFIG_LEN 40
38538
38539 static struct kgdb_io kgdboc_io_ops;
38540 +static struct kgdb_io kgdboc_io_ops_console;
38541
38542 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
38543 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
38544 static int configured = -1;
38545
38546 static char config[MAX_CONFIG_LEN];
38547 @@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
38548 kgdboc_unregister_kbd();
38549 if (configured == 1)
38550 kgdb_unregister_io_module(&kgdboc_io_ops);
38551 + else if (configured == 2)
38552 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
38553 }
38554
38555 static int configure_kgdboc(void)
38556 @@ -157,13 +160,13 @@ static int configure_kgdboc(void)
38557 int err;
38558 char *cptr = config;
38559 struct console *cons;
38560 + int is_console = 0;
38561
38562 err = kgdboc_option_setup(config);
38563 if (err || !strlen(config) || isspace(config[0]))
38564 goto noconfig;
38565
38566 err = -ENODEV;
38567 - kgdboc_io_ops.is_console = 0;
38568 kgdb_tty_driver = NULL;
38569
38570 kgdboc_use_kms = 0;
38571 @@ -184,7 +187,7 @@ static int configure_kgdboc(void)
38572 int idx;
38573 if (cons->device && cons->device(cons, &idx) == p &&
38574 idx == tty_line) {
38575 - kgdboc_io_ops.is_console = 1;
38576 + is_console = 1;
38577 break;
38578 }
38579 cons = cons->next;
38580 @@ -194,12 +197,16 @@ static int configure_kgdboc(void)
38581 kgdb_tty_line = tty_line;
38582
38583 do_register:
38584 - err = kgdb_register_io_module(&kgdboc_io_ops);
38585 + if (is_console) {
38586 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
38587 + configured = 2;
38588 + } else {
38589 + err = kgdb_register_io_module(&kgdboc_io_ops);
38590 + configured = 1;
38591 + }
38592 if (err)
38593 goto noconfig;
38594
38595 - configured = 1;
38596 -
38597 return 0;
38598
38599 noconfig:
38600 @@ -213,7 +220,7 @@ noconfig:
38601 static int __init init_kgdboc(void)
38602 {
38603 /* Already configured? */
38604 - if (configured == 1)
38605 + if (configured >= 1)
38606 return 0;
38607
38608 return configure_kgdboc();
38609 @@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
38610 if (config[len - 1] == '\n')
38611 config[len - 1] = '\0';
38612
38613 - if (configured == 1)
38614 + if (configured >= 1)
38615 cleanup_kgdboc();
38616
38617 /* Go and configure with the new params. */
38618 @@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
38619 .post_exception = kgdboc_post_exp_handler,
38620 };
38621
38622 +static struct kgdb_io kgdboc_io_ops_console = {
38623 + .name = "kgdboc",
38624 + .read_char = kgdboc_get_char,
38625 + .write_char = kgdboc_put_char,
38626 + .pre_exception = kgdboc_pre_exp_handler,
38627 + .post_exception = kgdboc_post_exp_handler,
38628 + .is_console = 1
38629 +};
38630 +
38631 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
38632 /* This is only available if kgdboc is a built in for early debugging */
38633 static int __init kgdboc_early_init(char *opt)
38634 diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
38635 index 7867b7c..b3c119d 100644
38636 --- a/drivers/tty/sysrq.c
38637 +++ b/drivers/tty/sysrq.c
38638 @@ -862,7 +862,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
38639 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
38640 size_t count, loff_t *ppos)
38641 {
38642 - if (count) {
38643 + if (count && capable(CAP_SYS_ADMIN)) {
38644 char c;
38645
38646 if (get_user(c, buf))
38647 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
38648 index e41b9bb..84002fb 100644
38649 --- a/drivers/tty/tty_io.c
38650 +++ b/drivers/tty/tty_io.c
38651 @@ -3291,7 +3291,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
38652
38653 void tty_default_fops(struct file_operations *fops)
38654 {
38655 - *fops = tty_fops;
38656 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
38657 }
38658
38659 /*
38660 diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
38661 index 24b95db..9c078d0 100644
38662 --- a/drivers/tty/tty_ldisc.c
38663 +++ b/drivers/tty/tty_ldisc.c
38664 @@ -57,7 +57,7 @@ static void put_ldisc(struct tty_ldisc *ld)
38665 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
38666 struct tty_ldisc_ops *ldo = ld->ops;
38667
38668 - ldo->refcount--;
38669 + atomic_dec(&ldo->refcount);
38670 module_put(ldo->owner);
38671 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38672
38673 @@ -92,7 +92,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
38674 spin_lock_irqsave(&tty_ldisc_lock, flags);
38675 tty_ldiscs[disc] = new_ldisc;
38676 new_ldisc->num = disc;
38677 - new_ldisc->refcount = 0;
38678 + atomic_set(&new_ldisc->refcount, 0);
38679 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38680
38681 return ret;
38682 @@ -120,7 +120,7 @@ int tty_unregister_ldisc(int disc)
38683 return -EINVAL;
38684
38685 spin_lock_irqsave(&tty_ldisc_lock, flags);
38686 - if (tty_ldiscs[disc]->refcount)
38687 + if (atomic_read(&tty_ldiscs[disc]->refcount))
38688 ret = -EBUSY;
38689 else
38690 tty_ldiscs[disc] = NULL;
38691 @@ -141,7 +141,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
38692 if (ldops) {
38693 ret = ERR_PTR(-EAGAIN);
38694 if (try_module_get(ldops->owner)) {
38695 - ldops->refcount++;
38696 + atomic_inc(&ldops->refcount);
38697 ret = ldops;
38698 }
38699 }
38700 @@ -154,7 +154,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
38701 unsigned long flags;
38702
38703 spin_lock_irqsave(&tty_ldisc_lock, flags);
38704 - ldops->refcount--;
38705 + atomic_dec(&ldops->refcount);
38706 module_put(ldops->owner);
38707 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38708 }
38709 diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
38710 index a605549..6bd3c96 100644
38711 --- a/drivers/tty/vt/keyboard.c
38712 +++ b/drivers/tty/vt/keyboard.c
38713 @@ -657,6 +657,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
38714 kbd->kbdmode == VC_OFF) &&
38715 value != KVAL(K_SAK))
38716 return; /* SAK is allowed even in raw mode */
38717 +
38718 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
38719 + {
38720 + void *func = fn_handler[value];
38721 + if (func == fn_show_state || func == fn_show_ptregs ||
38722 + func == fn_show_mem)
38723 + return;
38724 + }
38725 +#endif
38726 +
38727 fn_handler[value](vc);
38728 }
38729
38730 diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
38731 index 65447c5..0526f0a 100644
38732 --- a/drivers/tty/vt/vt_ioctl.c
38733 +++ b/drivers/tty/vt/vt_ioctl.c
38734 @@ -207,9 +207,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
38735 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
38736 return -EFAULT;
38737
38738 - if (!capable(CAP_SYS_TTY_CONFIG))
38739 - perm = 0;
38740 -
38741 switch (cmd) {
38742 case KDGKBENT:
38743 key_map = key_maps[s];
38744 @@ -221,6 +218,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
38745 val = (i ? K_HOLE : K_NOSUCHMAP);
38746 return put_user(val, &user_kbe->kb_value);
38747 case KDSKBENT:
38748 + if (!capable(CAP_SYS_TTY_CONFIG))
38749 + perm = 0;
38750 +
38751 if (!perm)
38752 return -EPERM;
38753 if (!i && v == K_NOSUCHMAP) {
38754 @@ -322,9 +322,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
38755 int i, j, k;
38756 int ret;
38757
38758 - if (!capable(CAP_SYS_TTY_CONFIG))
38759 - perm = 0;
38760 -
38761 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
38762 if (!kbs) {
38763 ret = -ENOMEM;
38764 @@ -358,6 +355,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
38765 kfree(kbs);
38766 return ((p && *p) ? -EOVERFLOW : 0);
38767 case KDSKBSENT:
38768 + if (!capable(CAP_SYS_TTY_CONFIG))
38769 + perm = 0;
38770 +
38771 if (!perm) {
38772 ret = -EPERM;
38773 goto reterr;
38774 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
38775 index a783d53..cb30d94 100644
38776 --- a/drivers/uio/uio.c
38777 +++ b/drivers/uio/uio.c
38778 @@ -25,6 +25,7 @@
38779 #include <linux/kobject.h>
38780 #include <linux/cdev.h>
38781 #include <linux/uio_driver.h>
38782 +#include <asm/local.h>
38783
38784 #define UIO_MAX_DEVICES (1U << MINORBITS)
38785
38786 @@ -32,10 +33,10 @@ struct uio_device {
38787 struct module *owner;
38788 struct device *dev;
38789 int minor;
38790 - atomic_t event;
38791 + atomic_unchecked_t event;
38792 struct fasync_struct *async_queue;
38793 wait_queue_head_t wait;
38794 - int vma_count;
38795 + local_t vma_count;
38796 struct uio_info *info;
38797 struct kobject *map_dir;
38798 struct kobject *portio_dir;
38799 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
38800 struct device_attribute *attr, char *buf)
38801 {
38802 struct uio_device *idev = dev_get_drvdata(dev);
38803 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
38804 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
38805 }
38806
38807 static struct device_attribute uio_class_attributes[] = {
38808 @@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
38809 {
38810 struct uio_device *idev = info->uio_dev;
38811
38812 - atomic_inc(&idev->event);
38813 + atomic_inc_unchecked(&idev->event);
38814 wake_up_interruptible(&idev->wait);
38815 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
38816 }
38817 @@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
38818 }
38819
38820 listener->dev = idev;
38821 - listener->event_count = atomic_read(&idev->event);
38822 + listener->event_count = atomic_read_unchecked(&idev->event);
38823 filep->private_data = listener;
38824
38825 if (idev->info->open) {
38826 @@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
38827 return -EIO;
38828
38829 poll_wait(filep, &idev->wait, wait);
38830 - if (listener->event_count != atomic_read(&idev->event))
38831 + if (listener->event_count != atomic_read_unchecked(&idev->event))
38832 return POLLIN | POLLRDNORM;
38833 return 0;
38834 }
38835 @@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
38836 do {
38837 set_current_state(TASK_INTERRUPTIBLE);
38838
38839 - event_count = atomic_read(&idev->event);
38840 + event_count = atomic_read_unchecked(&idev->event);
38841 if (event_count != listener->event_count) {
38842 if (copy_to_user(buf, &event_count, count))
38843 retval = -EFAULT;
38844 @@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
38845 static void uio_vma_open(struct vm_area_struct *vma)
38846 {
38847 struct uio_device *idev = vma->vm_private_data;
38848 - idev->vma_count++;
38849 + local_inc(&idev->vma_count);
38850 }
38851
38852 static void uio_vma_close(struct vm_area_struct *vma)
38853 {
38854 struct uio_device *idev = vma->vm_private_data;
38855 - idev->vma_count--;
38856 + local_dec(&idev->vma_count);
38857 }
38858
38859 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
38860 @@ -821,7 +822,7 @@ int __uio_register_device(struct module *owner,
38861 idev->owner = owner;
38862 idev->info = info;
38863 init_waitqueue_head(&idev->wait);
38864 - atomic_set(&idev->event, 0);
38865 + atomic_set_unchecked(&idev->event, 0);
38866
38867 ret = uio_get_minor(idev);
38868 if (ret)
38869 diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
38870 index 98b89fe..aff824e 100644
38871 --- a/drivers/usb/atm/cxacru.c
38872 +++ b/drivers/usb/atm/cxacru.c
38873 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
38874 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
38875 if (ret < 2)
38876 return -EINVAL;
38877 - if (index < 0 || index > 0x7f)
38878 + if (index > 0x7f)
38879 return -EINVAL;
38880 pos += tmp;
38881
38882 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
38883 index d3448ca..d2864ca 100644
38884 --- a/drivers/usb/atm/usbatm.c
38885 +++ b/drivers/usb/atm/usbatm.c
38886 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38887 if (printk_ratelimit())
38888 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
38889 __func__, vpi, vci);
38890 - atomic_inc(&vcc->stats->rx_err);
38891 + atomic_inc_unchecked(&vcc->stats->rx_err);
38892 return;
38893 }
38894
38895 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38896 if (length > ATM_MAX_AAL5_PDU) {
38897 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
38898 __func__, length, vcc);
38899 - atomic_inc(&vcc->stats->rx_err);
38900 + atomic_inc_unchecked(&vcc->stats->rx_err);
38901 goto out;
38902 }
38903
38904 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38905 if (sarb->len < pdu_length) {
38906 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
38907 __func__, pdu_length, sarb->len, vcc);
38908 - atomic_inc(&vcc->stats->rx_err);
38909 + atomic_inc_unchecked(&vcc->stats->rx_err);
38910 goto out;
38911 }
38912
38913 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
38914 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
38915 __func__, vcc);
38916 - atomic_inc(&vcc->stats->rx_err);
38917 + atomic_inc_unchecked(&vcc->stats->rx_err);
38918 goto out;
38919 }
38920
38921 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38922 if (printk_ratelimit())
38923 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
38924 __func__, length);
38925 - atomic_inc(&vcc->stats->rx_drop);
38926 + atomic_inc_unchecked(&vcc->stats->rx_drop);
38927 goto out;
38928 }
38929
38930 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38931
38932 vcc->push(vcc, skb);
38933
38934 - atomic_inc(&vcc->stats->rx);
38935 + atomic_inc_unchecked(&vcc->stats->rx);
38936 out:
38937 skb_trim(sarb, 0);
38938 }
38939 @@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
38940 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
38941
38942 usbatm_pop(vcc, skb);
38943 - atomic_inc(&vcc->stats->tx);
38944 + atomic_inc_unchecked(&vcc->stats->tx);
38945
38946 skb = skb_dequeue(&instance->sndqueue);
38947 }
38948 @@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
38949 if (!left--)
38950 return sprintf(page,
38951 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
38952 - atomic_read(&atm_dev->stats.aal5.tx),
38953 - atomic_read(&atm_dev->stats.aal5.tx_err),
38954 - atomic_read(&atm_dev->stats.aal5.rx),
38955 - atomic_read(&atm_dev->stats.aal5.rx_err),
38956 - atomic_read(&atm_dev->stats.aal5.rx_drop));
38957 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
38958 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
38959 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
38960 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
38961 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
38962
38963 if (!left--) {
38964 if (instance->disconnected)
38965 diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
38966 index d956965..4179a77 100644
38967 --- a/drivers/usb/core/devices.c
38968 +++ b/drivers/usb/core/devices.c
38969 @@ -126,7 +126,7 @@ static const char format_endpt[] =
38970 * time it gets called.
38971 */
38972 static struct device_connect_event {
38973 - atomic_t count;
38974 + atomic_unchecked_t count;
38975 wait_queue_head_t wait;
38976 } device_event = {
38977 .count = ATOMIC_INIT(1),
38978 @@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
38979
38980 void usbfs_conn_disc_event(void)
38981 {
38982 - atomic_add(2, &device_event.count);
38983 + atomic_add_unchecked(2, &device_event.count);
38984 wake_up(&device_event.wait);
38985 }
38986
38987 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
38988
38989 poll_wait(file, &device_event.wait, wait);
38990
38991 - event_count = atomic_read(&device_event.count);
38992 + event_count = atomic_read_unchecked(&device_event.count);
38993 if (file->f_version != event_count) {
38994 file->f_version = event_count;
38995 return POLLIN | POLLRDNORM;
38996 diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
38997 index 1fc8f12..20647c1 100644
38998 --- a/drivers/usb/early/ehci-dbgp.c
38999 +++ b/drivers/usb/early/ehci-dbgp.c
39000 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
39001
39002 #ifdef CONFIG_KGDB
39003 static struct kgdb_io kgdbdbgp_io_ops;
39004 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
39005 +static struct kgdb_io kgdbdbgp_io_ops_console;
39006 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
39007 #else
39008 #define dbgp_kgdb_mode (0)
39009 #endif
39010 @@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
39011 .write_char = kgdbdbgp_write_char,
39012 };
39013
39014 +static struct kgdb_io kgdbdbgp_io_ops_console = {
39015 + .name = "kgdbdbgp",
39016 + .read_char = kgdbdbgp_read_char,
39017 + .write_char = kgdbdbgp_write_char,
39018 + .is_console = 1
39019 +};
39020 +
39021 static int kgdbdbgp_wait_time;
39022
39023 static int __init kgdbdbgp_parse_config(char *str)
39024 @@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
39025 ptr++;
39026 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
39027 }
39028 - kgdb_register_io_module(&kgdbdbgp_io_ops);
39029 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
39030 + if (early_dbgp_console.index != -1)
39031 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
39032 + else
39033 + kgdb_register_io_module(&kgdbdbgp_io_ops);
39034
39035 return 0;
39036 }
39037 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
39038 index d6bea3e..60b250e 100644
39039 --- a/drivers/usb/wusbcore/wa-hc.h
39040 +++ b/drivers/usb/wusbcore/wa-hc.h
39041 @@ -192,7 +192,7 @@ struct wahc {
39042 struct list_head xfer_delayed_list;
39043 spinlock_t xfer_list_lock;
39044 struct work_struct xfer_work;
39045 - atomic_t xfer_id_count;
39046 + atomic_unchecked_t xfer_id_count;
39047 };
39048
39049
39050 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
39051 INIT_LIST_HEAD(&wa->xfer_delayed_list);
39052 spin_lock_init(&wa->xfer_list_lock);
39053 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
39054 - atomic_set(&wa->xfer_id_count, 1);
39055 + atomic_set_unchecked(&wa->xfer_id_count, 1);
39056 }
39057
39058 /**
39059 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
39060 index 57c01ab..8a05959 100644
39061 --- a/drivers/usb/wusbcore/wa-xfer.c
39062 +++ b/drivers/usb/wusbcore/wa-xfer.c
39063 @@ -296,7 +296,7 @@ out:
39064 */
39065 static void wa_xfer_id_init(struct wa_xfer *xfer)
39066 {
39067 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
39068 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
39069 }
39070
39071 /*
39072 diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
39073 index c14c42b..f955cc2 100644
39074 --- a/drivers/vhost/vhost.c
39075 +++ b/drivers/vhost/vhost.c
39076 @@ -629,7 +629,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
39077 return 0;
39078 }
39079
39080 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
39081 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
39082 {
39083 struct file *eventfp, *filep = NULL,
39084 *pollstart = NULL, *pollstop = NULL;
39085 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
39086 index b0b2ac3..89a4399 100644
39087 --- a/drivers/video/aty/aty128fb.c
39088 +++ b/drivers/video/aty/aty128fb.c
39089 @@ -148,7 +148,7 @@ enum {
39090 };
39091
39092 /* Must match above enum */
39093 -static const char *r128_family[] __devinitdata = {
39094 +static const char *r128_family[] __devinitconst = {
39095 "AGP",
39096 "PCI",
39097 "PRO AGP",
39098 diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
39099 index 5c3960d..15cf8fc 100644
39100 --- a/drivers/video/fbcmap.c
39101 +++ b/drivers/video/fbcmap.c
39102 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
39103 rc = -ENODEV;
39104 goto out;
39105 }
39106 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
39107 - !info->fbops->fb_setcmap)) {
39108 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
39109 rc = -EINVAL;
39110 goto out1;
39111 }
39112 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
39113 index c6ce416..3b9b642 100644
39114 --- a/drivers/video/fbmem.c
39115 +++ b/drivers/video/fbmem.c
39116 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
39117 image->dx += image->width + 8;
39118 }
39119 } else if (rotate == FB_ROTATE_UD) {
39120 - for (x = 0; x < num && image->dx >= 0; x++) {
39121 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
39122 info->fbops->fb_imageblit(info, image);
39123 image->dx -= image->width + 8;
39124 }
39125 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
39126 image->dy += image->height + 8;
39127 }
39128 } else if (rotate == FB_ROTATE_CCW) {
39129 - for (x = 0; x < num && image->dy >= 0; x++) {
39130 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
39131 info->fbops->fb_imageblit(info, image);
39132 image->dy -= image->height + 8;
39133 }
39134 @@ -1157,7 +1157,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
39135 return -EFAULT;
39136 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
39137 return -EINVAL;
39138 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
39139 + if (con2fb.framebuffer >= FB_MAX)
39140 return -EINVAL;
39141 if (!registered_fb[con2fb.framebuffer])
39142 request_module("fb%d", con2fb.framebuffer);
39143 diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
39144 index 5a5d092..265c5ed 100644
39145 --- a/drivers/video/geode/gx1fb_core.c
39146 +++ b/drivers/video/geode/gx1fb_core.c
39147 @@ -29,7 +29,7 @@ static int crt_option = 1;
39148 static char panel_option[32] = "";
39149
39150 /* Modes relevant to the GX1 (taken from modedb.c) */
39151 -static const struct fb_videomode __devinitdata gx1_modedb[] = {
39152 +static const struct fb_videomode __devinitconst gx1_modedb[] = {
39153 /* 640x480-60 VESA */
39154 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
39155 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
39156 diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
39157 index 0fad23f..0e9afa4 100644
39158 --- a/drivers/video/gxt4500.c
39159 +++ b/drivers/video/gxt4500.c
39160 @@ -156,7 +156,7 @@ struct gxt4500_par {
39161 static char *mode_option;
39162
39163 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
39164 -static const struct fb_videomode defaultmode __devinitdata = {
39165 +static const struct fb_videomode defaultmode __devinitconst = {
39166 .refresh = 60,
39167 .xres = 1280,
39168 .yres = 1024,
39169 @@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
39170 return 0;
39171 }
39172
39173 -static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
39174 +static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
39175 .id = "IBM GXT4500P",
39176 .type = FB_TYPE_PACKED_PIXELS,
39177 .visual = FB_VISUAL_PSEUDOCOLOR,
39178 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
39179 index 7672d2e..b56437f 100644
39180 --- a/drivers/video/i810/i810_accel.c
39181 +++ b/drivers/video/i810/i810_accel.c
39182 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
39183 }
39184 }
39185 printk("ringbuffer lockup!!!\n");
39186 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
39187 i810_report_error(mmio);
39188 par->dev_flags |= LOCKUP;
39189 info->pixmap.scan_align = 1;
39190 diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
39191 index b83f361..2b05a91 100644
39192 --- a/drivers/video/i810/i810_main.c
39193 +++ b/drivers/video/i810/i810_main.c
39194 @@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
39195 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
39196
39197 /* PCI */
39198 -static const char *i810_pci_list[] __devinitdata = {
39199 +static const char *i810_pci_list[] __devinitconst = {
39200 "Intel(R) 810 Framebuffer Device" ,
39201 "Intel(R) 810-DC100 Framebuffer Device" ,
39202 "Intel(R) 810E Framebuffer Device" ,
39203 diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
39204 index de36693..3c63fc2 100644
39205 --- a/drivers/video/jz4740_fb.c
39206 +++ b/drivers/video/jz4740_fb.c
39207 @@ -136,7 +136,7 @@ struct jzfb {
39208 uint32_t pseudo_palette[16];
39209 };
39210
39211 -static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
39212 +static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
39213 .id = "JZ4740 FB",
39214 .type = FB_TYPE_PACKED_PIXELS,
39215 .visual = FB_VISUAL_TRUECOLOR,
39216 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
39217 index 3c14e43..eafa544 100644
39218 --- a/drivers/video/logo/logo_linux_clut224.ppm
39219 +++ b/drivers/video/logo/logo_linux_clut224.ppm
39220 @@ -1,1604 +1,1123 @@
39221 P3
39222 -# Standard 224-color Linux logo
39223 80 80
39224 255
39225 - 0 0 0 0 0 0 0 0 0 0 0 0
39226 - 0 0 0 0 0 0 0 0 0 0 0 0
39227 - 0 0 0 0 0 0 0 0 0 0 0 0
39228 - 0 0 0 0 0 0 0 0 0 0 0 0
39229 - 0 0 0 0 0 0 0 0 0 0 0 0
39230 - 0 0 0 0 0 0 0 0 0 0 0 0
39231 - 0 0 0 0 0 0 0 0 0 0 0 0
39232 - 0 0 0 0 0 0 0 0 0 0 0 0
39233 - 0 0 0 0 0 0 0 0 0 0 0 0
39234 - 6 6 6 6 6 6 10 10 10 10 10 10
39235 - 10 10 10 6 6 6 6 6 6 6 6 6
39236 - 0 0 0 0 0 0 0 0 0 0 0 0
39237 - 0 0 0 0 0 0 0 0 0 0 0 0
39238 - 0 0 0 0 0 0 0 0 0 0 0 0
39239 - 0 0 0 0 0 0 0 0 0 0 0 0
39240 - 0 0 0 0 0 0 0 0 0 0 0 0
39241 - 0 0 0 0 0 0 0 0 0 0 0 0
39242 - 0 0 0 0 0 0 0 0 0 0 0 0
39243 - 0 0 0 0 0 0 0 0 0 0 0 0
39244 - 0 0 0 0 0 0 0 0 0 0 0 0
39245 - 0 0 0 0 0 0 0 0 0 0 0 0
39246 - 0 0 0 0 0 0 0 0 0 0 0 0
39247 - 0 0 0 0 0 0 0 0 0 0 0 0
39248 - 0 0 0 0 0 0 0 0 0 0 0 0
39249 - 0 0 0 0 0 0 0 0 0 0 0 0
39250 - 0 0 0 0 0 0 0 0 0 0 0 0
39251 - 0 0 0 0 0 0 0 0 0 0 0 0
39252 - 0 0 0 0 0 0 0 0 0 0 0 0
39253 - 0 0 0 6 6 6 10 10 10 14 14 14
39254 - 22 22 22 26 26 26 30 30 30 34 34 34
39255 - 30 30 30 30 30 30 26 26 26 18 18 18
39256 - 14 14 14 10 10 10 6 6 6 0 0 0
39257 - 0 0 0 0 0 0 0 0 0 0 0 0
39258 - 0 0 0 0 0 0 0 0 0 0 0 0
39259 - 0 0 0 0 0 0 0 0 0 0 0 0
39260 - 0 0 0 0 0 0 0 0 0 0 0 0
39261 - 0 0 0 0 0 0 0 0 0 0 0 0
39262 - 0 0 0 0 0 0 0 0 0 0 0 0
39263 - 0 0 0 0 0 0 0 0 0 0 0 0
39264 - 0 0 0 0 0 0 0 0 0 0 0 0
39265 - 0 0 0 0 0 0 0 0 0 0 0 0
39266 - 0 0 0 0 0 1 0 0 1 0 0 0
39267 - 0 0 0 0 0 0 0 0 0 0 0 0
39268 - 0 0 0 0 0 0 0 0 0 0 0 0
39269 - 0 0 0 0 0 0 0 0 0 0 0 0
39270 - 0 0 0 0 0 0 0 0 0 0 0 0
39271 - 0 0 0 0 0 0 0 0 0 0 0 0
39272 - 0 0 0 0 0 0 0 0 0 0 0 0
39273 - 6 6 6 14 14 14 26 26 26 42 42 42
39274 - 54 54 54 66 66 66 78 78 78 78 78 78
39275 - 78 78 78 74 74 74 66 66 66 54 54 54
39276 - 42 42 42 26 26 26 18 18 18 10 10 10
39277 - 6 6 6 0 0 0 0 0 0 0 0 0
39278 - 0 0 0 0 0 0 0 0 0 0 0 0
39279 - 0 0 0 0 0 0 0 0 0 0 0 0
39280 - 0 0 0 0 0 0 0 0 0 0 0 0
39281 - 0 0 0 0 0 0 0 0 0 0 0 0
39282 - 0 0 0 0 0 0 0 0 0 0 0 0
39283 - 0 0 0 0 0 0 0 0 0 0 0 0
39284 - 0 0 0 0 0 0 0 0 0 0 0 0
39285 - 0 0 0 0 0 0 0 0 0 0 0 0
39286 - 0 0 1 0 0 0 0 0 0 0 0 0
39287 - 0 0 0 0 0 0 0 0 0 0 0 0
39288 - 0 0 0 0 0 0 0 0 0 0 0 0
39289 - 0 0 0 0 0 0 0 0 0 0 0 0
39290 - 0 0 0 0 0 0 0 0 0 0 0 0
39291 - 0 0 0 0 0 0 0 0 0 0 0 0
39292 - 0 0 0 0 0 0 0 0 0 10 10 10
39293 - 22 22 22 42 42 42 66 66 66 86 86 86
39294 - 66 66 66 38 38 38 38 38 38 22 22 22
39295 - 26 26 26 34 34 34 54 54 54 66 66 66
39296 - 86 86 86 70 70 70 46 46 46 26 26 26
39297 - 14 14 14 6 6 6 0 0 0 0 0 0
39298 - 0 0 0 0 0 0 0 0 0 0 0 0
39299 - 0 0 0 0 0 0 0 0 0 0 0 0
39300 - 0 0 0 0 0 0 0 0 0 0 0 0
39301 - 0 0 0 0 0 0 0 0 0 0 0 0
39302 - 0 0 0 0 0 0 0 0 0 0 0 0
39303 - 0 0 0 0 0 0 0 0 0 0 0 0
39304 - 0 0 0 0 0 0 0 0 0 0 0 0
39305 - 0 0 0 0 0 0 0 0 0 0 0 0
39306 - 0 0 1 0 0 1 0 0 1 0 0 0
39307 - 0 0 0 0 0 0 0 0 0 0 0 0
39308 - 0 0 0 0 0 0 0 0 0 0 0 0
39309 - 0 0 0 0 0 0 0 0 0 0 0 0
39310 - 0 0 0 0 0 0 0 0 0 0 0 0
39311 - 0 0 0 0 0 0 0 0 0 0 0 0
39312 - 0 0 0 0 0 0 10 10 10 26 26 26
39313 - 50 50 50 82 82 82 58 58 58 6 6 6
39314 - 2 2 6 2 2 6 2 2 6 2 2 6
39315 - 2 2 6 2 2 6 2 2 6 2 2 6
39316 - 6 6 6 54 54 54 86 86 86 66 66 66
39317 - 38 38 38 18 18 18 6 6 6 0 0 0
39318 - 0 0 0 0 0 0 0 0 0 0 0 0
39319 - 0 0 0 0 0 0 0 0 0 0 0 0
39320 - 0 0 0 0 0 0 0 0 0 0 0 0
39321 - 0 0 0 0 0 0 0 0 0 0 0 0
39322 - 0 0 0 0 0 0 0 0 0 0 0 0
39323 - 0 0 0 0 0 0 0 0 0 0 0 0
39324 - 0 0 0 0 0 0 0 0 0 0 0 0
39325 - 0 0 0 0 0 0 0 0 0 0 0 0
39326 - 0 0 0 0 0 0 0 0 0 0 0 0
39327 - 0 0 0 0 0 0 0 0 0 0 0 0
39328 - 0 0 0 0 0 0 0 0 0 0 0 0
39329 - 0 0 0 0 0 0 0 0 0 0 0 0
39330 - 0 0 0 0 0 0 0 0 0 0 0 0
39331 - 0 0 0 0 0 0 0 0 0 0 0 0
39332 - 0 0 0 6 6 6 22 22 22 50 50 50
39333 - 78 78 78 34 34 34 2 2 6 2 2 6
39334 - 2 2 6 2 2 6 2 2 6 2 2 6
39335 - 2 2 6 2 2 6 2 2 6 2 2 6
39336 - 2 2 6 2 2 6 6 6 6 70 70 70
39337 - 78 78 78 46 46 46 22 22 22 6 6 6
39338 - 0 0 0 0 0 0 0 0 0 0 0 0
39339 - 0 0 0 0 0 0 0 0 0 0 0 0
39340 - 0 0 0 0 0 0 0 0 0 0 0 0
39341 - 0 0 0 0 0 0 0 0 0 0 0 0
39342 - 0 0 0 0 0 0 0 0 0 0 0 0
39343 - 0 0 0 0 0 0 0 0 0 0 0 0
39344 - 0 0 0 0 0 0 0 0 0 0 0 0
39345 - 0 0 0 0 0 0 0 0 0 0 0 0
39346 - 0 0 1 0 0 1 0 0 1 0 0 0
39347 - 0 0 0 0 0 0 0 0 0 0 0 0
39348 - 0 0 0 0 0 0 0 0 0 0 0 0
39349 - 0 0 0 0 0 0 0 0 0 0 0 0
39350 - 0 0 0 0 0 0 0 0 0 0 0 0
39351 - 0 0 0 0 0 0 0 0 0 0 0 0
39352 - 6 6 6 18 18 18 42 42 42 82 82 82
39353 - 26 26 26 2 2 6 2 2 6 2 2 6
39354 - 2 2 6 2 2 6 2 2 6 2 2 6
39355 - 2 2 6 2 2 6 2 2 6 14 14 14
39356 - 46 46 46 34 34 34 6 6 6 2 2 6
39357 - 42 42 42 78 78 78 42 42 42 18 18 18
39358 - 6 6 6 0 0 0 0 0 0 0 0 0
39359 - 0 0 0 0 0 0 0 0 0 0 0 0
39360 - 0 0 0 0 0 0 0 0 0 0 0 0
39361 - 0 0 0 0 0 0 0 0 0 0 0 0
39362 - 0 0 0 0 0 0 0 0 0 0 0 0
39363 - 0 0 0 0 0 0 0 0 0 0 0 0
39364 - 0 0 0 0 0 0 0 0 0 0 0 0
39365 - 0 0 0 0 0 0 0 0 0 0 0 0
39366 - 0 0 1 0 0 0 0 0 1 0 0 0
39367 - 0 0 0 0 0 0 0 0 0 0 0 0
39368 - 0 0 0 0 0 0 0 0 0 0 0 0
39369 - 0 0 0 0 0 0 0 0 0 0 0 0
39370 - 0 0 0 0 0 0 0 0 0 0 0 0
39371 - 0 0 0 0 0 0 0 0 0 0 0 0
39372 - 10 10 10 30 30 30 66 66 66 58 58 58
39373 - 2 2 6 2 2 6 2 2 6 2 2 6
39374 - 2 2 6 2 2 6 2 2 6 2 2 6
39375 - 2 2 6 2 2 6 2 2 6 26 26 26
39376 - 86 86 86 101 101 101 46 46 46 10 10 10
39377 - 2 2 6 58 58 58 70 70 70 34 34 34
39378 - 10 10 10 0 0 0 0 0 0 0 0 0
39379 - 0 0 0 0 0 0 0 0 0 0 0 0
39380 - 0 0 0 0 0 0 0 0 0 0 0 0
39381 - 0 0 0 0 0 0 0 0 0 0 0 0
39382 - 0 0 0 0 0 0 0 0 0 0 0 0
39383 - 0 0 0 0 0 0 0 0 0 0 0 0
39384 - 0 0 0 0 0 0 0 0 0 0 0 0
39385 - 0 0 0 0 0 0 0 0 0 0 0 0
39386 - 0 0 1 0 0 1 0 0 1 0 0 0
39387 - 0 0 0 0 0 0 0 0 0 0 0 0
39388 - 0 0 0 0 0 0 0 0 0 0 0 0
39389 - 0 0 0 0 0 0 0 0 0 0 0 0
39390 - 0 0 0 0 0 0 0 0 0 0 0 0
39391 - 0 0 0 0 0 0 0 0 0 0 0 0
39392 - 14 14 14 42 42 42 86 86 86 10 10 10
39393 - 2 2 6 2 2 6 2 2 6 2 2 6
39394 - 2 2 6 2 2 6 2 2 6 2 2 6
39395 - 2 2 6 2 2 6 2 2 6 30 30 30
39396 - 94 94 94 94 94 94 58 58 58 26 26 26
39397 - 2 2 6 6 6 6 78 78 78 54 54 54
39398 - 22 22 22 6 6 6 0 0 0 0 0 0
39399 - 0 0 0 0 0 0 0 0 0 0 0 0
39400 - 0 0 0 0 0 0 0 0 0 0 0 0
39401 - 0 0 0 0 0 0 0 0 0 0 0 0
39402 - 0 0 0 0 0 0 0 0 0 0 0 0
39403 - 0 0 0 0 0 0 0 0 0 0 0 0
39404 - 0 0 0 0 0 0 0 0 0 0 0 0
39405 - 0 0 0 0 0 0 0 0 0 0 0 0
39406 - 0 0 0 0 0 0 0 0 0 0 0 0
39407 - 0 0 0 0 0 0 0 0 0 0 0 0
39408 - 0 0 0 0 0 0 0 0 0 0 0 0
39409 - 0 0 0 0 0 0 0 0 0 0 0 0
39410 - 0 0 0 0 0 0 0 0 0 0 0 0
39411 - 0 0 0 0 0 0 0 0 0 6 6 6
39412 - 22 22 22 62 62 62 62 62 62 2 2 6
39413 - 2 2 6 2 2 6 2 2 6 2 2 6
39414 - 2 2 6 2 2 6 2 2 6 2 2 6
39415 - 2 2 6 2 2 6 2 2 6 26 26 26
39416 - 54 54 54 38 38 38 18 18 18 10 10 10
39417 - 2 2 6 2 2 6 34 34 34 82 82 82
39418 - 38 38 38 14 14 14 0 0 0 0 0 0
39419 - 0 0 0 0 0 0 0 0 0 0 0 0
39420 - 0 0 0 0 0 0 0 0 0 0 0 0
39421 - 0 0 0 0 0 0 0 0 0 0 0 0
39422 - 0 0 0 0 0 0 0 0 0 0 0 0
39423 - 0 0 0 0 0 0 0 0 0 0 0 0
39424 - 0 0 0 0 0 0 0 0 0 0 0 0
39425 - 0 0 0 0 0 0 0 0 0 0 0 0
39426 - 0 0 0 0 0 1 0 0 1 0 0 0
39427 - 0 0 0 0 0 0 0 0 0 0 0 0
39428 - 0 0 0 0 0 0 0 0 0 0 0 0
39429 - 0 0 0 0 0 0 0 0 0 0 0 0
39430 - 0 0 0 0 0 0 0 0 0 0 0 0
39431 - 0 0 0 0 0 0 0 0 0 6 6 6
39432 - 30 30 30 78 78 78 30 30 30 2 2 6
39433 - 2 2 6 2 2 6 2 2 6 2 2 6
39434 - 2 2 6 2 2 6 2 2 6 2 2 6
39435 - 2 2 6 2 2 6 2 2 6 10 10 10
39436 - 10 10 10 2 2 6 2 2 6 2 2 6
39437 - 2 2 6 2 2 6 2 2 6 78 78 78
39438 - 50 50 50 18 18 18 6 6 6 0 0 0
39439 - 0 0 0 0 0 0 0 0 0 0 0 0
39440 - 0 0 0 0 0 0 0 0 0 0 0 0
39441 - 0 0 0 0 0 0 0 0 0 0 0 0
39442 - 0 0 0 0 0 0 0 0 0 0 0 0
39443 - 0 0 0 0 0 0 0 0 0 0 0 0
39444 - 0 0 0 0 0 0 0 0 0 0 0 0
39445 - 0 0 0 0 0 0 0 0 0 0 0 0
39446 - 0 0 1 0 0 0 0 0 0 0 0 0
39447 - 0 0 0 0 0 0 0 0 0 0 0 0
39448 - 0 0 0 0 0 0 0 0 0 0 0 0
39449 - 0 0 0 0 0 0 0 0 0 0 0 0
39450 - 0 0 0 0 0 0 0 0 0 0 0 0
39451 - 0 0 0 0 0 0 0 0 0 10 10 10
39452 - 38 38 38 86 86 86 14 14 14 2 2 6
39453 - 2 2 6 2 2 6 2 2 6 2 2 6
39454 - 2 2 6 2 2 6 2 2 6 2 2 6
39455 - 2 2 6 2 2 6 2 2 6 2 2 6
39456 - 2 2 6 2 2 6 2 2 6 2 2 6
39457 - 2 2 6 2 2 6 2 2 6 54 54 54
39458 - 66 66 66 26 26 26 6 6 6 0 0 0
39459 - 0 0 0 0 0 0 0 0 0 0 0 0
39460 - 0 0 0 0 0 0 0 0 0 0 0 0
39461 - 0 0 0 0 0 0 0 0 0 0 0 0
39462 - 0 0 0 0 0 0 0 0 0 0 0 0
39463 - 0 0 0 0 0 0 0 0 0 0 0 0
39464 - 0 0 0 0 0 0 0 0 0 0 0 0
39465 - 0 0 0 0 0 0 0 0 0 0 0 0
39466 - 0 0 0 0 0 1 0 0 1 0 0 0
39467 - 0 0 0 0 0 0 0 0 0 0 0 0
39468 - 0 0 0 0 0 0 0 0 0 0 0 0
39469 - 0 0 0 0 0 0 0 0 0 0 0 0
39470 - 0 0 0 0 0 0 0 0 0 0 0 0
39471 - 0 0 0 0 0 0 0 0 0 14 14 14
39472 - 42 42 42 82 82 82 2 2 6 2 2 6
39473 - 2 2 6 6 6 6 10 10 10 2 2 6
39474 - 2 2 6 2 2 6 2 2 6 2 2 6
39475 - 2 2 6 2 2 6 2 2 6 6 6 6
39476 - 14 14 14 10 10 10 2 2 6 2 2 6
39477 - 2 2 6 2 2 6 2 2 6 18 18 18
39478 - 82 82 82 34 34 34 10 10 10 0 0 0
39479 - 0 0 0 0 0 0 0 0 0 0 0 0
39480 - 0 0 0 0 0 0 0 0 0 0 0 0
39481 - 0 0 0 0 0 0 0 0 0 0 0 0
39482 - 0 0 0 0 0 0 0 0 0 0 0 0
39483 - 0 0 0 0 0 0 0 0 0 0 0 0
39484 - 0 0 0 0 0 0 0 0 0 0 0 0
39485 - 0 0 0 0 0 0 0 0 0 0 0 0
39486 - 0 0 1 0 0 0 0 0 0 0 0 0
39487 - 0 0 0 0 0 0 0 0 0 0 0 0
39488 - 0 0 0 0 0 0 0 0 0 0 0 0
39489 - 0 0 0 0 0 0 0 0 0 0 0 0
39490 - 0 0 0 0 0 0 0 0 0 0 0 0
39491 - 0 0 0 0 0 0 0 0 0 14 14 14
39492 - 46 46 46 86 86 86 2 2 6 2 2 6
39493 - 6 6 6 6 6 6 22 22 22 34 34 34
39494 - 6 6 6 2 2 6 2 2 6 2 2 6
39495 - 2 2 6 2 2 6 18 18 18 34 34 34
39496 - 10 10 10 50 50 50 22 22 22 2 2 6
39497 - 2 2 6 2 2 6 2 2 6 10 10 10
39498 - 86 86 86 42 42 42 14 14 14 0 0 0
39499 - 0 0 0 0 0 0 0 0 0 0 0 0
39500 - 0 0 0 0 0 0 0 0 0 0 0 0
39501 - 0 0 0 0 0 0 0 0 0 0 0 0
39502 - 0 0 0 0 0 0 0 0 0 0 0 0
39503 - 0 0 0 0 0 0 0 0 0 0 0 0
39504 - 0 0 0 0 0 0 0 0 0 0 0 0
39505 - 0 0 0 0 0 0 0 0 0 0 0 0
39506 - 0 0 1 0 0 1 0 0 1 0 0 0
39507 - 0 0 0 0 0 0 0 0 0 0 0 0
39508 - 0 0 0 0 0 0 0 0 0 0 0 0
39509 - 0 0 0 0 0 0 0 0 0 0 0 0
39510 - 0 0 0 0 0 0 0 0 0 0 0 0
39511 - 0 0 0 0 0 0 0 0 0 14 14 14
39512 - 46 46 46 86 86 86 2 2 6 2 2 6
39513 - 38 38 38 116 116 116 94 94 94 22 22 22
39514 - 22 22 22 2 2 6 2 2 6 2 2 6
39515 - 14 14 14 86 86 86 138 138 138 162 162 162
39516 -154 154 154 38 38 38 26 26 26 6 6 6
39517 - 2 2 6 2 2 6 2 2 6 2 2 6
39518 - 86 86 86 46 46 46 14 14 14 0 0 0
39519 - 0 0 0 0 0 0 0 0 0 0 0 0
39520 - 0 0 0 0 0 0 0 0 0 0 0 0
39521 - 0 0 0 0 0 0 0 0 0 0 0 0
39522 - 0 0 0 0 0 0 0 0 0 0 0 0
39523 - 0 0 0 0 0 0 0 0 0 0 0 0
39524 - 0 0 0 0 0 0 0 0 0 0 0 0
39525 - 0 0 0 0 0 0 0 0 0 0 0 0
39526 - 0 0 0 0 0 0 0 0 0 0 0 0
39527 - 0 0 0 0 0 0 0 0 0 0 0 0
39528 - 0 0 0 0 0 0 0 0 0 0 0 0
39529 - 0 0 0 0 0 0 0 0 0 0 0 0
39530 - 0 0 0 0 0 0 0 0 0 0 0 0
39531 - 0 0 0 0 0 0 0 0 0 14 14 14
39532 - 46 46 46 86 86 86 2 2 6 14 14 14
39533 -134 134 134 198 198 198 195 195 195 116 116 116
39534 - 10 10 10 2 2 6 2 2 6 6 6 6
39535 -101 98 89 187 187 187 210 210 210 218 218 218
39536 -214 214 214 134 134 134 14 14 14 6 6 6
39537 - 2 2 6 2 2 6 2 2 6 2 2 6
39538 - 86 86 86 50 50 50 18 18 18 6 6 6
39539 - 0 0 0 0 0 0 0 0 0 0 0 0
39540 - 0 0 0 0 0 0 0 0 0 0 0 0
39541 - 0 0 0 0 0 0 0 0 0 0 0 0
39542 - 0 0 0 0 0 0 0 0 0 0 0 0
39543 - 0 0 0 0 0 0 0 0 0 0 0 0
39544 - 0 0 0 0 0 0 0 0 0 0 0 0
39545 - 0 0 0 0 0 0 0 0 1 0 0 0
39546 - 0 0 1 0 0 1 0 0 1 0 0 0
39547 - 0 0 0 0 0 0 0 0 0 0 0 0
39548 - 0 0 0 0 0 0 0 0 0 0 0 0
39549 - 0 0 0 0 0 0 0 0 0 0 0 0
39550 - 0 0 0 0 0 0 0 0 0 0 0 0
39551 - 0 0 0 0 0 0 0 0 0 14 14 14
39552 - 46 46 46 86 86 86 2 2 6 54 54 54
39553 -218 218 218 195 195 195 226 226 226 246 246 246
39554 - 58 58 58 2 2 6 2 2 6 30 30 30
39555 -210 210 210 253 253 253 174 174 174 123 123 123
39556 -221 221 221 234 234 234 74 74 74 2 2 6
39557 - 2 2 6 2 2 6 2 2 6 2 2 6
39558 - 70 70 70 58 58 58 22 22 22 6 6 6
39559 - 0 0 0 0 0 0 0 0 0 0 0 0
39560 - 0 0 0 0 0 0 0 0 0 0 0 0
39561 - 0 0 0 0 0 0 0 0 0 0 0 0
39562 - 0 0 0 0 0 0 0 0 0 0 0 0
39563 - 0 0 0 0 0 0 0 0 0 0 0 0
39564 - 0 0 0 0 0 0 0 0 0 0 0 0
39565 - 0 0 0 0 0 0 0 0 0 0 0 0
39566 - 0 0 0 0 0 0 0 0 0 0 0 0
39567 - 0 0 0 0 0 0 0 0 0 0 0 0
39568 - 0 0 0 0 0 0 0 0 0 0 0 0
39569 - 0 0 0 0 0 0 0 0 0 0 0 0
39570 - 0 0 0 0 0 0 0 0 0 0 0 0
39571 - 0 0 0 0 0 0 0 0 0 14 14 14
39572 - 46 46 46 82 82 82 2 2 6 106 106 106
39573 -170 170 170 26 26 26 86 86 86 226 226 226
39574 -123 123 123 10 10 10 14 14 14 46 46 46
39575 -231 231 231 190 190 190 6 6 6 70 70 70
39576 - 90 90 90 238 238 238 158 158 158 2 2 6
39577 - 2 2 6 2 2 6 2 2 6 2 2 6
39578 - 70 70 70 58 58 58 22 22 22 6 6 6
39579 - 0 0 0 0 0 0 0 0 0 0 0 0
39580 - 0 0 0 0 0 0 0 0 0 0 0 0
39581 - 0 0 0 0 0 0 0 0 0 0 0 0
39582 - 0 0 0 0 0 0 0 0 0 0 0 0
39583 - 0 0 0 0 0 0 0 0 0 0 0 0
39584 - 0 0 0 0 0 0 0 0 0 0 0 0
39585 - 0 0 0 0 0 0 0 0 1 0 0 0
39586 - 0 0 1 0 0 1 0 0 1 0 0 0
39587 - 0 0 0 0 0 0 0 0 0 0 0 0
39588 - 0 0 0 0 0 0 0 0 0 0 0 0
39589 - 0 0 0 0 0 0 0 0 0 0 0 0
39590 - 0 0 0 0 0 0 0 0 0 0 0 0
39591 - 0 0 0 0 0 0 0 0 0 14 14 14
39592 - 42 42 42 86 86 86 6 6 6 116 116 116
39593 -106 106 106 6 6 6 70 70 70 149 149 149
39594 -128 128 128 18 18 18 38 38 38 54 54 54
39595 -221 221 221 106 106 106 2 2 6 14 14 14
39596 - 46 46 46 190 190 190 198 198 198 2 2 6
39597 - 2 2 6 2 2 6 2 2 6 2 2 6
39598 - 74 74 74 62 62 62 22 22 22 6 6 6
39599 - 0 0 0 0 0 0 0 0 0 0 0 0
39600 - 0 0 0 0 0 0 0 0 0 0 0 0
39601 - 0 0 0 0 0 0 0 0 0 0 0 0
39602 - 0 0 0 0 0 0 0 0 0 0 0 0
39603 - 0 0 0 0 0 0 0 0 0 0 0 0
39604 - 0 0 0 0 0 0 0 0 0 0 0 0
39605 - 0 0 0 0 0 0 0 0 1 0 0 0
39606 - 0 0 1 0 0 0 0 0 1 0 0 0
39607 - 0 0 0 0 0 0 0 0 0 0 0 0
39608 - 0 0 0 0 0 0 0 0 0 0 0 0
39609 - 0 0 0 0 0 0 0 0 0 0 0 0
39610 - 0 0 0 0 0 0 0 0 0 0 0 0
39611 - 0 0 0 0 0 0 0 0 0 14 14 14
39612 - 42 42 42 94 94 94 14 14 14 101 101 101
39613 -128 128 128 2 2 6 18 18 18 116 116 116
39614 -118 98 46 121 92 8 121 92 8 98 78 10
39615 -162 162 162 106 106 106 2 2 6 2 2 6
39616 - 2 2 6 195 195 195 195 195 195 6 6 6
39617 - 2 2 6 2 2 6 2 2 6 2 2 6
39618 - 74 74 74 62 62 62 22 22 22 6 6 6
39619 - 0 0 0 0 0 0 0 0 0 0 0 0
39620 - 0 0 0 0 0 0 0 0 0 0 0 0
39621 - 0 0 0 0 0 0 0 0 0 0 0 0
39622 - 0 0 0 0 0 0 0 0 0 0 0 0
39623 - 0 0 0 0 0 0 0 0 0 0 0 0
39624 - 0 0 0 0 0 0 0 0 0 0 0 0
39625 - 0 0 0 0 0 0 0 0 1 0 0 1
39626 - 0 0 1 0 0 0 0 0 1 0 0 0
39627 - 0 0 0 0 0 0 0 0 0 0 0 0
39628 - 0 0 0 0 0 0 0 0 0 0 0 0
39629 - 0 0 0 0 0 0 0 0 0 0 0 0
39630 - 0 0 0 0 0 0 0 0 0 0 0 0
39631 - 0 0 0 0 0 0 0 0 0 10 10 10
39632 - 38 38 38 90 90 90 14 14 14 58 58 58
39633 -210 210 210 26 26 26 54 38 6 154 114 10
39634 -226 170 11 236 186 11 225 175 15 184 144 12
39635 -215 174 15 175 146 61 37 26 9 2 2 6
39636 - 70 70 70 246 246 246 138 138 138 2 2 6
39637 - 2 2 6 2 2 6 2 2 6 2 2 6
39638 - 70 70 70 66 66 66 26 26 26 6 6 6
39639 - 0 0 0 0 0 0 0 0 0 0 0 0
39640 - 0 0 0 0 0 0 0 0 0 0 0 0
39641 - 0 0 0 0 0 0 0 0 0 0 0 0
39642 - 0 0 0 0 0 0 0 0 0 0 0 0
39643 - 0 0 0 0 0 0 0 0 0 0 0 0
39644 - 0 0 0 0 0 0 0 0 0 0 0 0
39645 - 0 0 0 0 0 0 0 0 0 0 0 0
39646 - 0 0 0 0 0 0 0 0 0 0 0 0
39647 - 0 0 0 0 0 0 0 0 0 0 0 0
39648 - 0 0 0 0 0 0 0 0 0 0 0 0
39649 - 0 0 0 0 0 0 0 0 0 0 0 0
39650 - 0 0 0 0 0 0 0 0 0 0 0 0
39651 - 0 0 0 0 0 0 0 0 0 10 10 10
39652 - 38 38 38 86 86 86 14 14 14 10 10 10
39653 -195 195 195 188 164 115 192 133 9 225 175 15
39654 -239 182 13 234 190 10 232 195 16 232 200 30
39655 -245 207 45 241 208 19 232 195 16 184 144 12
39656 -218 194 134 211 206 186 42 42 42 2 2 6
39657 - 2 2 6 2 2 6 2 2 6 2 2 6
39658 - 50 50 50 74 74 74 30 30 30 6 6 6
39659 - 0 0 0 0 0 0 0 0 0 0 0 0
39660 - 0 0 0 0 0 0 0 0 0 0 0 0
39661 - 0 0 0 0 0 0 0 0 0 0 0 0
39662 - 0 0 0 0 0 0 0 0 0 0 0 0
39663 - 0 0 0 0 0 0 0 0 0 0 0 0
39664 - 0 0 0 0 0 0 0 0 0 0 0 0
39665 - 0 0 0 0 0 0 0 0 0 0 0 0
39666 - 0 0 0 0 0 0 0 0 0 0 0 0
39667 - 0 0 0 0 0 0 0 0 0 0 0 0
39668 - 0 0 0 0 0 0 0 0 0 0 0 0
39669 - 0 0 0 0 0 0 0 0 0 0 0 0
39670 - 0 0 0 0 0 0 0 0 0 0 0 0
39671 - 0 0 0 0 0 0 0 0 0 10 10 10
39672 - 34 34 34 86 86 86 14 14 14 2 2 6
39673 -121 87 25 192 133 9 219 162 10 239 182 13
39674 -236 186 11 232 195 16 241 208 19 244 214 54
39675 -246 218 60 246 218 38 246 215 20 241 208 19
39676 -241 208 19 226 184 13 121 87 25 2 2 6
39677 - 2 2 6 2 2 6 2 2 6 2 2 6
39678 - 50 50 50 82 82 82 34 34 34 10 10 10
39679 - 0 0 0 0 0 0 0 0 0 0 0 0
39680 - 0 0 0 0 0 0 0 0 0 0 0 0
39681 - 0 0 0 0 0 0 0 0 0 0 0 0
39682 - 0 0 0 0 0 0 0 0 0 0 0 0
39683 - 0 0 0 0 0 0 0 0 0 0 0 0
39684 - 0 0 0 0 0 0 0 0 0 0 0 0
39685 - 0 0 0 0 0 0 0 0 0 0 0 0
39686 - 0 0 0 0 0 0 0 0 0 0 0 0
39687 - 0 0 0 0 0 0 0 0 0 0 0 0
39688 - 0 0 0 0 0 0 0 0 0 0 0 0
39689 - 0 0 0 0 0 0 0 0 0 0 0 0
39690 - 0 0 0 0 0 0 0 0 0 0 0 0
39691 - 0 0 0 0 0 0 0 0 0 10 10 10
39692 - 34 34 34 82 82 82 30 30 30 61 42 6
39693 -180 123 7 206 145 10 230 174 11 239 182 13
39694 -234 190 10 238 202 15 241 208 19 246 218 74
39695 -246 218 38 246 215 20 246 215 20 246 215 20
39696 -226 184 13 215 174 15 184 144 12 6 6 6
39697 - 2 2 6 2 2 6 2 2 6 2 2 6
39698 - 26 26 26 94 94 94 42 42 42 14 14 14
39699 - 0 0 0 0 0 0 0 0 0 0 0 0
39700 - 0 0 0 0 0 0 0 0 0 0 0 0
39701 - 0 0 0 0 0 0 0 0 0 0 0 0
39702 - 0 0 0 0 0 0 0 0 0 0 0 0
39703 - 0 0 0 0 0 0 0 0 0 0 0 0
39704 - 0 0 0 0 0 0 0 0 0 0 0 0
39705 - 0 0 0 0 0 0 0 0 0 0 0 0
39706 - 0 0 0 0 0 0 0 0 0 0 0 0
39707 - 0 0 0 0 0 0 0 0 0 0 0 0
39708 - 0 0 0 0 0 0 0 0 0 0 0 0
39709 - 0 0 0 0 0 0 0 0 0 0 0 0
39710 - 0 0 0 0 0 0 0 0 0 0 0 0
39711 - 0 0 0 0 0 0 0 0 0 10 10 10
39712 - 30 30 30 78 78 78 50 50 50 104 69 6
39713 -192 133 9 216 158 10 236 178 12 236 186 11
39714 -232 195 16 241 208 19 244 214 54 245 215 43
39715 -246 215 20 246 215 20 241 208 19 198 155 10
39716 -200 144 11 216 158 10 156 118 10 2 2 6
39717 - 2 2 6 2 2 6 2 2 6 2 2 6
39718 - 6 6 6 90 90 90 54 54 54 18 18 18
39719 - 6 6 6 0 0 0 0 0 0 0 0 0
39720 - 0 0 0 0 0 0 0 0 0 0 0 0
39721 - 0 0 0 0 0 0 0 0 0 0 0 0
39722 - 0 0 0 0 0 0 0 0 0 0 0 0
39723 - 0 0 0 0 0 0 0 0 0 0 0 0
39724 - 0 0 0 0 0 0 0 0 0 0 0 0
39725 - 0 0 0 0 0 0 0 0 0 0 0 0
39726 - 0 0 0 0 0 0 0 0 0 0 0 0
39727 - 0 0 0 0 0 0 0 0 0 0 0 0
39728 - 0 0 0 0 0 0 0 0 0 0 0 0
39729 - 0 0 0 0 0 0 0 0 0 0 0 0
39730 - 0 0 0 0 0 0 0 0 0 0 0 0
39731 - 0 0 0 0 0 0 0 0 0 10 10 10
39732 - 30 30 30 78 78 78 46 46 46 22 22 22
39733 -137 92 6 210 162 10 239 182 13 238 190 10
39734 -238 202 15 241 208 19 246 215 20 246 215 20
39735 -241 208 19 203 166 17 185 133 11 210 150 10
39736 -216 158 10 210 150 10 102 78 10 2 2 6
39737 - 6 6 6 54 54 54 14 14 14 2 2 6
39738 - 2 2 6 62 62 62 74 74 74 30 30 30
39739 - 10 10 10 0 0 0 0 0 0 0 0 0
39740 - 0 0 0 0 0 0 0 0 0 0 0 0
39741 - 0 0 0 0 0 0 0 0 0 0 0 0
39742 - 0 0 0 0 0 0 0 0 0 0 0 0
39743 - 0 0 0 0 0 0 0 0 0 0 0 0
39744 - 0 0 0 0 0 0 0 0 0 0 0 0
39745 - 0 0 0 0 0 0 0 0 0 0 0 0
39746 - 0 0 0 0 0 0 0 0 0 0 0 0
39747 - 0 0 0 0 0 0 0 0 0 0 0 0
39748 - 0 0 0 0 0 0 0 0 0 0 0 0
39749 - 0 0 0 0 0 0 0 0 0 0 0 0
39750 - 0 0 0 0 0 0 0 0 0 0 0 0
39751 - 0 0 0 0 0 0 0 0 0 10 10 10
39752 - 34 34 34 78 78 78 50 50 50 6 6 6
39753 - 94 70 30 139 102 15 190 146 13 226 184 13
39754 -232 200 30 232 195 16 215 174 15 190 146 13
39755 -168 122 10 192 133 9 210 150 10 213 154 11
39756 -202 150 34 182 157 106 101 98 89 2 2 6
39757 - 2 2 6 78 78 78 116 116 116 58 58 58
39758 - 2 2 6 22 22 22 90 90 90 46 46 46
39759 - 18 18 18 6 6 6 0 0 0 0 0 0
39760 - 0 0 0 0 0 0 0 0 0 0 0 0
39761 - 0 0 0 0 0 0 0 0 0 0 0 0
39762 - 0 0 0 0 0 0 0 0 0 0 0 0
39763 - 0 0 0 0 0 0 0 0 0 0 0 0
39764 - 0 0 0 0 0 0 0 0 0 0 0 0
39765 - 0 0 0 0 0 0 0 0 0 0 0 0
39766 - 0 0 0 0 0 0 0 0 0 0 0 0
39767 - 0 0 0 0 0 0 0 0 0 0 0 0
39768 - 0 0 0 0 0 0 0 0 0 0 0 0
39769 - 0 0 0 0 0 0 0 0 0 0 0 0
39770 - 0 0 0 0 0 0 0 0 0 0 0 0
39771 - 0 0 0 0 0 0 0 0 0 10 10 10
39772 - 38 38 38 86 86 86 50 50 50 6 6 6
39773 -128 128 128 174 154 114 156 107 11 168 122 10
39774 -198 155 10 184 144 12 197 138 11 200 144 11
39775 -206 145 10 206 145 10 197 138 11 188 164 115
39776 -195 195 195 198 198 198 174 174 174 14 14 14
39777 - 2 2 6 22 22 22 116 116 116 116 116 116
39778 - 22 22 22 2 2 6 74 74 74 70 70 70
39779 - 30 30 30 10 10 10 0 0 0 0 0 0
39780 - 0 0 0 0 0 0 0 0 0 0 0 0
39781 - 0 0 0 0 0 0 0 0 0 0 0 0
39782 - 0 0 0 0 0 0 0 0 0 0 0 0
39783 - 0 0 0 0 0 0 0 0 0 0 0 0
39784 - 0 0 0 0 0 0 0 0 0 0 0 0
39785 - 0 0 0 0 0 0 0 0 0 0 0 0
39786 - 0 0 0 0 0 0 0 0 0 0 0 0
39787 - 0 0 0 0 0 0 0 0 0 0 0 0
39788 - 0 0 0 0 0 0 0 0 0 0 0 0
39789 - 0 0 0 0 0 0 0 0 0 0 0 0
39790 - 0 0 0 0 0 0 0 0 0 0 0 0
39791 - 0 0 0 0 0 0 6 6 6 18 18 18
39792 - 50 50 50 101 101 101 26 26 26 10 10 10
39793 -138 138 138 190 190 190 174 154 114 156 107 11
39794 -197 138 11 200 144 11 197 138 11 192 133 9
39795 -180 123 7 190 142 34 190 178 144 187 187 187
39796 -202 202 202 221 221 221 214 214 214 66 66 66
39797 - 2 2 6 2 2 6 50 50 50 62 62 62
39798 - 6 6 6 2 2 6 10 10 10 90 90 90
39799 - 50 50 50 18 18 18 6 6 6 0 0 0
39800 - 0 0 0 0 0 0 0 0 0 0 0 0
39801 - 0 0 0 0 0 0 0 0 0 0 0 0
39802 - 0 0 0 0 0 0 0 0 0 0 0 0
39803 - 0 0 0 0 0 0 0 0 0 0 0 0
39804 - 0 0 0 0 0 0 0 0 0 0 0 0
39805 - 0 0 0 0 0 0 0 0 0 0 0 0
39806 - 0 0 0 0 0 0 0 0 0 0 0 0
39807 - 0 0 0 0 0 0 0 0 0 0 0 0
39808 - 0 0 0 0 0 0 0 0 0 0 0 0
39809 - 0 0 0 0 0 0 0 0 0 0 0 0
39810 - 0 0 0 0 0 0 0 0 0 0 0 0
39811 - 0 0 0 0 0 0 10 10 10 34 34 34
39812 - 74 74 74 74 74 74 2 2 6 6 6 6
39813 -144 144 144 198 198 198 190 190 190 178 166 146
39814 -154 121 60 156 107 11 156 107 11 168 124 44
39815 -174 154 114 187 187 187 190 190 190 210 210 210
39816 -246 246 246 253 253 253 253 253 253 182 182 182
39817 - 6 6 6 2 2 6 2 2 6 2 2 6
39818 - 2 2 6 2 2 6 2 2 6 62 62 62
39819 - 74 74 74 34 34 34 14 14 14 0 0 0
39820 - 0 0 0 0 0 0 0 0 0 0 0 0
39821 - 0 0 0 0 0 0 0 0 0 0 0 0
39822 - 0 0 0 0 0 0 0 0 0 0 0 0
39823 - 0 0 0 0 0 0 0 0 0 0 0 0
39824 - 0 0 0 0 0 0 0 0 0 0 0 0
39825 - 0 0 0 0 0 0 0 0 0 0 0 0
39826 - 0 0 0 0 0 0 0 0 0 0 0 0
39827 - 0 0 0 0 0 0 0 0 0 0 0 0
39828 - 0 0 0 0 0 0 0 0 0 0 0 0
39829 - 0 0 0 0 0 0 0 0 0 0 0 0
39830 - 0 0 0 0 0 0 0 0 0 0 0 0
39831 - 0 0 0 10 10 10 22 22 22 54 54 54
39832 - 94 94 94 18 18 18 2 2 6 46 46 46
39833 -234 234 234 221 221 221 190 190 190 190 190 190
39834 -190 190 190 187 187 187 187 187 187 190 190 190
39835 -190 190 190 195 195 195 214 214 214 242 242 242
39836 -253 253 253 253 253 253 253 253 253 253 253 253
39837 - 82 82 82 2 2 6 2 2 6 2 2 6
39838 - 2 2 6 2 2 6 2 2 6 14 14 14
39839 - 86 86 86 54 54 54 22 22 22 6 6 6
39840 - 0 0 0 0 0 0 0 0 0 0 0 0
39841 - 0 0 0 0 0 0 0 0 0 0 0 0
39842 - 0 0 0 0 0 0 0 0 0 0 0 0
39843 - 0 0 0 0 0 0 0 0 0 0 0 0
39844 - 0 0 0 0 0 0 0 0 0 0 0 0
39845 - 0 0 0 0 0 0 0 0 0 0 0 0
39846 - 0 0 0 0 0 0 0 0 0 0 0 0
39847 - 0 0 0 0 0 0 0 0 0 0 0 0
39848 - 0 0 0 0 0 0 0 0 0 0 0 0
39849 - 0 0 0 0 0 0 0 0 0 0 0 0
39850 - 0 0 0 0 0 0 0 0 0 0 0 0
39851 - 6 6 6 18 18 18 46 46 46 90 90 90
39852 - 46 46 46 18 18 18 6 6 6 182 182 182
39853 -253 253 253 246 246 246 206 206 206 190 190 190
39854 -190 190 190 190 190 190 190 190 190 190 190 190
39855 -206 206 206 231 231 231 250 250 250 253 253 253
39856 -253 253 253 253 253 253 253 253 253 253 253 253
39857 -202 202 202 14 14 14 2 2 6 2 2 6
39858 - 2 2 6 2 2 6 2 2 6 2 2 6
39859 - 42 42 42 86 86 86 42 42 42 18 18 18
39860 - 6 6 6 0 0 0 0 0 0 0 0 0
39861 - 0 0 0 0 0 0 0 0 0 0 0 0
39862 - 0 0 0 0 0 0 0 0 0 0 0 0
39863 - 0 0 0 0 0 0 0 0 0 0 0 0
39864 - 0 0 0 0 0 0 0 0 0 0 0 0
39865 - 0 0 0 0 0 0 0 0 0 0 0 0
39866 - 0 0 0 0 0 0 0 0 0 0 0 0
39867 - 0 0 0 0 0 0 0 0 0 0 0 0
39868 - 0 0 0 0 0 0 0 0 0 0 0 0
39869 - 0 0 0 0 0 0 0 0 0 0 0 0
39870 - 0 0 0 0 0 0 0 0 0 6 6 6
39871 - 14 14 14 38 38 38 74 74 74 66 66 66
39872 - 2 2 6 6 6 6 90 90 90 250 250 250
39873 -253 253 253 253 253 253 238 238 238 198 198 198
39874 -190 190 190 190 190 190 195 195 195 221 221 221
39875 -246 246 246 253 253 253 253 253 253 253 253 253
39876 -253 253 253 253 253 253 253 253 253 253 253 253
39877 -253 253 253 82 82 82 2 2 6 2 2 6
39878 - 2 2 6 2 2 6 2 2 6 2 2 6
39879 - 2 2 6 78 78 78 70 70 70 34 34 34
39880 - 14 14 14 6 6 6 0 0 0 0 0 0
39881 - 0 0 0 0 0 0 0 0 0 0 0 0
39882 - 0 0 0 0 0 0 0 0 0 0 0 0
39883 - 0 0 0 0 0 0 0 0 0 0 0 0
39884 - 0 0 0 0 0 0 0 0 0 0 0 0
39885 - 0 0 0 0 0 0 0 0 0 0 0 0
39886 - 0 0 0 0 0 0 0 0 0 0 0 0
39887 - 0 0 0 0 0 0 0 0 0 0 0 0
39888 - 0 0 0 0 0 0 0 0 0 0 0 0
39889 - 0 0 0 0 0 0 0 0 0 0 0 0
39890 - 0 0 0 0 0 0 0 0 0 14 14 14
39891 - 34 34 34 66 66 66 78 78 78 6 6 6
39892 - 2 2 6 18 18 18 218 218 218 253 253 253
39893 -253 253 253 253 253 253 253 253 253 246 246 246
39894 -226 226 226 231 231 231 246 246 246 253 253 253
39895 -253 253 253 253 253 253 253 253 253 253 253 253
39896 -253 253 253 253 253 253 253 253 253 253 253 253
39897 -253 253 253 178 178 178 2 2 6 2 2 6
39898 - 2 2 6 2 2 6 2 2 6 2 2 6
39899 - 2 2 6 18 18 18 90 90 90 62 62 62
39900 - 30 30 30 10 10 10 0 0 0 0 0 0
39901 - 0 0 0 0 0 0 0 0 0 0 0 0
39902 - 0 0 0 0 0 0 0 0 0 0 0 0
39903 - 0 0 0 0 0 0 0 0 0 0 0 0
39904 - 0 0 0 0 0 0 0 0 0 0 0 0
39905 - 0 0 0 0 0 0 0 0 0 0 0 0
39906 - 0 0 0 0 0 0 0 0 0 0 0 0
39907 - 0 0 0 0 0 0 0 0 0 0 0 0
39908 - 0 0 0 0 0 0 0 0 0 0 0 0
39909 - 0 0 0 0 0 0 0 0 0 0 0 0
39910 - 0 0 0 0 0 0 10 10 10 26 26 26
39911 - 58 58 58 90 90 90 18 18 18 2 2 6
39912 - 2 2 6 110 110 110 253 253 253 253 253 253
39913 -253 253 253 253 253 253 253 253 253 253 253 253
39914 -250 250 250 253 253 253 253 253 253 253 253 253
39915 -253 253 253 253 253 253 253 253 253 253 253 253
39916 -253 253 253 253 253 253 253 253 253 253 253 253
39917 -253 253 253 231 231 231 18 18 18 2 2 6
39918 - 2 2 6 2 2 6 2 2 6 2 2 6
39919 - 2 2 6 2 2 6 18 18 18 94 94 94
39920 - 54 54 54 26 26 26 10 10 10 0 0 0
39921 - 0 0 0 0 0 0 0 0 0 0 0 0
39922 - 0 0 0 0 0 0 0 0 0 0 0 0
39923 - 0 0 0 0 0 0 0 0 0 0 0 0
39924 - 0 0 0 0 0 0 0 0 0 0 0 0
39925 - 0 0 0 0 0 0 0 0 0 0 0 0
39926 - 0 0 0 0 0 0 0 0 0 0 0 0
39927 - 0 0 0 0 0 0 0 0 0 0 0 0
39928 - 0 0 0 0 0 0 0 0 0 0 0 0
39929 - 0 0 0 0 0 0 0 0 0 0 0 0
39930 - 0 0 0 6 6 6 22 22 22 50 50 50
39931 - 90 90 90 26 26 26 2 2 6 2 2 6
39932 - 14 14 14 195 195 195 250 250 250 253 253 253
39933 -253 253 253 253 253 253 253 253 253 253 253 253
39934 -253 253 253 253 253 253 253 253 253 253 253 253
39935 -253 253 253 253 253 253 253 253 253 253 253 253
39936 -253 253 253 253 253 253 253 253 253 253 253 253
39937 -250 250 250 242 242 242 54 54 54 2 2 6
39938 - 2 2 6 2 2 6 2 2 6 2 2 6
39939 - 2 2 6 2 2 6 2 2 6 38 38 38
39940 - 86 86 86 50 50 50 22 22 22 6 6 6
39941 - 0 0 0 0 0 0 0 0 0 0 0 0
39942 - 0 0 0 0 0 0 0 0 0 0 0 0
39943 - 0 0 0 0 0 0 0 0 0 0 0 0
39944 - 0 0 0 0 0 0 0 0 0 0 0 0
39945 - 0 0 0 0 0 0 0 0 0 0 0 0
39946 - 0 0 0 0 0 0 0 0 0 0 0 0
39947 - 0 0 0 0 0 0 0 0 0 0 0 0
39948 - 0 0 0 0 0 0 0 0 0 0 0 0
39949 - 0 0 0 0 0 0 0 0 0 0 0 0
39950 - 6 6 6 14 14 14 38 38 38 82 82 82
39951 - 34 34 34 2 2 6 2 2 6 2 2 6
39952 - 42 42 42 195 195 195 246 246 246 253 253 253
39953 -253 253 253 253 253 253 253 253 253 250 250 250
39954 -242 242 242 242 242 242 250 250 250 253 253 253
39955 -253 253 253 253 253 253 253 253 253 253 253 253
39956 -253 253 253 250 250 250 246 246 246 238 238 238
39957 -226 226 226 231 231 231 101 101 101 6 6 6
39958 - 2 2 6 2 2 6 2 2 6 2 2 6
39959 - 2 2 6 2 2 6 2 2 6 2 2 6
39960 - 38 38 38 82 82 82 42 42 42 14 14 14
39961 - 6 6 6 0 0 0 0 0 0 0 0 0
39962 - 0 0 0 0 0 0 0 0 0 0 0 0
39963 - 0 0 0 0 0 0 0 0 0 0 0 0
39964 - 0 0 0 0 0 0 0 0 0 0 0 0
39965 - 0 0 0 0 0 0 0 0 0 0 0 0
39966 - 0 0 0 0 0 0 0 0 0 0 0 0
39967 - 0 0 0 0 0 0 0 0 0 0 0 0
39968 - 0 0 0 0 0 0 0 0 0 0 0 0
39969 - 0 0 0 0 0 0 0 0 0 0 0 0
39970 - 10 10 10 26 26 26 62 62 62 66 66 66
39971 - 2 2 6 2 2 6 2 2 6 6 6 6
39972 - 70 70 70 170 170 170 206 206 206 234 234 234
39973 -246 246 246 250 250 250 250 250 250 238 238 238
39974 -226 226 226 231 231 231 238 238 238 250 250 250
39975 -250 250 250 250 250 250 246 246 246 231 231 231
39976 -214 214 214 206 206 206 202 202 202 202 202 202
39977 -198 198 198 202 202 202 182 182 182 18 18 18
39978 - 2 2 6 2 2 6 2 2 6 2 2 6
39979 - 2 2 6 2 2 6 2 2 6 2 2 6
39980 - 2 2 6 62 62 62 66 66 66 30 30 30
39981 - 10 10 10 0 0 0 0 0 0 0 0 0
39982 - 0 0 0 0 0 0 0 0 0 0 0 0
39983 - 0 0 0 0 0 0 0 0 0 0 0 0
39984 - 0 0 0 0 0 0 0 0 0 0 0 0
39985 - 0 0 0 0 0 0 0 0 0 0 0 0
39986 - 0 0 0 0 0 0 0 0 0 0 0 0
39987 - 0 0 0 0 0 0 0 0 0 0 0 0
39988 - 0 0 0 0 0 0 0 0 0 0 0 0
39989 - 0 0 0 0 0 0 0 0 0 0 0 0
39990 - 14 14 14 42 42 42 82 82 82 18 18 18
39991 - 2 2 6 2 2 6 2 2 6 10 10 10
39992 - 94 94 94 182 182 182 218 218 218 242 242 242
39993 -250 250 250 253 253 253 253 253 253 250 250 250
39994 -234 234 234 253 253 253 253 253 253 253 253 253
39995 -253 253 253 253 253 253 253 253 253 246 246 246
39996 -238 238 238 226 226 226 210 210 210 202 202 202
39997 -195 195 195 195 195 195 210 210 210 158 158 158
39998 - 6 6 6 14 14 14 50 50 50 14 14 14
39999 - 2 2 6 2 2 6 2 2 6 2 2 6
40000 - 2 2 6 6 6 6 86 86 86 46 46 46
40001 - 18 18 18 6 6 6 0 0 0 0 0 0
40002 - 0 0 0 0 0 0 0 0 0 0 0 0
40003 - 0 0 0 0 0 0 0 0 0 0 0 0
40004 - 0 0 0 0 0 0 0 0 0 0 0 0
40005 - 0 0 0 0 0 0 0 0 0 0 0 0
40006 - 0 0 0 0 0 0 0 0 0 0 0 0
40007 - 0 0 0 0 0 0 0 0 0 0 0 0
40008 - 0 0 0 0 0 0 0 0 0 0 0 0
40009 - 0 0 0 0 0 0 0 0 0 6 6 6
40010 - 22 22 22 54 54 54 70 70 70 2 2 6
40011 - 2 2 6 10 10 10 2 2 6 22 22 22
40012 -166 166 166 231 231 231 250 250 250 253 253 253
40013 -253 253 253 253 253 253 253 253 253 250 250 250
40014 -242 242 242 253 253 253 253 253 253 253 253 253
40015 -253 253 253 253 253 253 253 253 253 253 253 253
40016 -253 253 253 253 253 253 253 253 253 246 246 246
40017 -231 231 231 206 206 206 198 198 198 226 226 226
40018 - 94 94 94 2 2 6 6 6 6 38 38 38
40019 - 30 30 30 2 2 6 2 2 6 2 2 6
40020 - 2 2 6 2 2 6 62 62 62 66 66 66
40021 - 26 26 26 10 10 10 0 0 0 0 0 0
40022 - 0 0 0 0 0 0 0 0 0 0 0 0
40023 - 0 0 0 0 0 0 0 0 0 0 0 0
40024 - 0 0 0 0 0 0 0 0 0 0 0 0
40025 - 0 0 0 0 0 0 0 0 0 0 0 0
40026 - 0 0 0 0 0 0 0 0 0 0 0 0
40027 - 0 0 0 0 0 0 0 0 0 0 0 0
40028 - 0 0 0 0 0 0 0 0 0 0 0 0
40029 - 0 0 0 0 0 0 0 0 0 10 10 10
40030 - 30 30 30 74 74 74 50 50 50 2 2 6
40031 - 26 26 26 26 26 26 2 2 6 106 106 106
40032 -238 238 238 253 253 253 253 253 253 253 253 253
40033 -253 253 253 253 253 253 253 253 253 253 253 253
40034 -253 253 253 253 253 253 253 253 253 253 253 253
40035 -253 253 253 253 253 253 253 253 253 253 253 253
40036 -253 253 253 253 253 253 253 253 253 253 253 253
40037 -253 253 253 246 246 246 218 218 218 202 202 202
40038 -210 210 210 14 14 14 2 2 6 2 2 6
40039 - 30 30 30 22 22 22 2 2 6 2 2 6
40040 - 2 2 6 2 2 6 18 18 18 86 86 86
40041 - 42 42 42 14 14 14 0 0 0 0 0 0
40042 - 0 0 0 0 0 0 0 0 0 0 0 0
40043 - 0 0 0 0 0 0 0 0 0 0 0 0
40044 - 0 0 0 0 0 0 0 0 0 0 0 0
40045 - 0 0 0 0 0 0 0 0 0 0 0 0
40046 - 0 0 0 0 0 0 0 0 0 0 0 0
40047 - 0 0 0 0 0 0 0 0 0 0 0 0
40048 - 0 0 0 0 0 0 0 0 0 0 0 0
40049 - 0 0 0 0 0 0 0 0 0 14 14 14
40050 - 42 42 42 90 90 90 22 22 22 2 2 6
40051 - 42 42 42 2 2 6 18 18 18 218 218 218
40052 -253 253 253 253 253 253 253 253 253 253 253 253
40053 -253 253 253 253 253 253 253 253 253 253 253 253
40054 -253 253 253 253 253 253 253 253 253 253 253 253
40055 -253 253 253 253 253 253 253 253 253 253 253 253
40056 -253 253 253 253 253 253 253 253 253 253 253 253
40057 -253 253 253 253 253 253 250 250 250 221 221 221
40058 -218 218 218 101 101 101 2 2 6 14 14 14
40059 - 18 18 18 38 38 38 10 10 10 2 2 6
40060 - 2 2 6 2 2 6 2 2 6 78 78 78
40061 - 58 58 58 22 22 22 6 6 6 0 0 0
40062 - 0 0 0 0 0 0 0 0 0 0 0 0
40063 - 0 0 0 0 0 0 0 0 0 0 0 0
40064 - 0 0 0 0 0 0 0 0 0 0 0 0
40065 - 0 0 0 0 0 0 0 0 0 0 0 0
40066 - 0 0 0 0 0 0 0 0 0 0 0 0
40067 - 0 0 0 0 0 0 0 0 0 0 0 0
40068 - 0 0 0 0 0 0 0 0 0 0 0 0
40069 - 0 0 0 0 0 0 6 6 6 18 18 18
40070 - 54 54 54 82 82 82 2 2 6 26 26 26
40071 - 22 22 22 2 2 6 123 123 123 253 253 253
40072 -253 253 253 253 253 253 253 253 253 253 253 253
40073 -253 253 253 253 253 253 253 253 253 253 253 253
40074 -253 253 253 253 253 253 253 253 253 253 253 253
40075 -253 253 253 253 253 253 253 253 253 253 253 253
40076 -253 253 253 253 253 253 253 253 253 253 253 253
40077 -253 253 253 253 253 253 253 253 253 250 250 250
40078 -238 238 238 198 198 198 6 6 6 38 38 38
40079 - 58 58 58 26 26 26 38 38 38 2 2 6
40080 - 2 2 6 2 2 6 2 2 6 46 46 46
40081 - 78 78 78 30 30 30 10 10 10 0 0 0
40082 - 0 0 0 0 0 0 0 0 0 0 0 0
40083 - 0 0 0 0 0 0 0 0 0 0 0 0
40084 - 0 0 0 0 0 0 0 0 0 0 0 0
40085 - 0 0 0 0 0 0 0 0 0 0 0 0
40086 - 0 0 0 0 0 0 0 0 0 0 0 0
40087 - 0 0 0 0 0 0 0 0 0 0 0 0
40088 - 0 0 0 0 0 0 0 0 0 0 0 0
40089 - 0 0 0 0 0 0 10 10 10 30 30 30
40090 - 74 74 74 58 58 58 2 2 6 42 42 42
40091 - 2 2 6 22 22 22 231 231 231 253 253 253
40092 -253 253 253 253 253 253 253 253 253 253 253 253
40093 -253 253 253 253 253 253 253 253 253 250 250 250
40094 -253 253 253 253 253 253 253 253 253 253 253 253
40095 -253 253 253 253 253 253 253 253 253 253 253 253
40096 -253 253 253 253 253 253 253 253 253 253 253 253
40097 -253 253 253 253 253 253 253 253 253 253 253 253
40098 -253 253 253 246 246 246 46 46 46 38 38 38
40099 - 42 42 42 14 14 14 38 38 38 14 14 14
40100 - 2 2 6 2 2 6 2 2 6 6 6 6
40101 - 86 86 86 46 46 46 14 14 14 0 0 0
40102 - 0 0 0 0 0 0 0 0 0 0 0 0
40103 - 0 0 0 0 0 0 0 0 0 0 0 0
40104 - 0 0 0 0 0 0 0 0 0 0 0 0
40105 - 0 0 0 0 0 0 0 0 0 0 0 0
40106 - 0 0 0 0 0 0 0 0 0 0 0 0
40107 - 0 0 0 0 0 0 0 0 0 0 0 0
40108 - 0 0 0 0 0 0 0 0 0 0 0 0
40109 - 0 0 0 6 6 6 14 14 14 42 42 42
40110 - 90 90 90 18 18 18 18 18 18 26 26 26
40111 - 2 2 6 116 116 116 253 253 253 253 253 253
40112 -253 253 253 253 253 253 253 253 253 253 253 253
40113 -253 253 253 253 253 253 250 250 250 238 238 238
40114 -253 253 253 253 253 253 253 253 253 253 253 253
40115 -253 253 253 253 253 253 253 253 253 253 253 253
40116 -253 253 253 253 253 253 253 253 253 253 253 253
40117 -253 253 253 253 253 253 253 253 253 253 253 253
40118 -253 253 253 253 253 253 94 94 94 6 6 6
40119 - 2 2 6 2 2 6 10 10 10 34 34 34
40120 - 2 2 6 2 2 6 2 2 6 2 2 6
40121 - 74 74 74 58 58 58 22 22 22 6 6 6
40122 - 0 0 0 0 0 0 0 0 0 0 0 0
40123 - 0 0 0 0 0 0 0 0 0 0 0 0
40124 - 0 0 0 0 0 0 0 0 0 0 0 0
40125 - 0 0 0 0 0 0 0 0 0 0 0 0
40126 - 0 0 0 0 0 0 0 0 0 0 0 0
40127 - 0 0 0 0 0 0 0 0 0 0 0 0
40128 - 0 0 0 0 0 0 0 0 0 0 0 0
40129 - 0 0 0 10 10 10 26 26 26 66 66 66
40130 - 82 82 82 2 2 6 38 38 38 6 6 6
40131 - 14 14 14 210 210 210 253 253 253 253 253 253
40132 -253 253 253 253 253 253 253 253 253 253 253 253
40133 -253 253 253 253 253 253 246 246 246 242 242 242
40134 -253 253 253 253 253 253 253 253 253 253 253 253
40135 -253 253 253 253 253 253 253 253 253 253 253 253
40136 -253 253 253 253 253 253 253 253 253 253 253 253
40137 -253 253 253 253 253 253 253 253 253 253 253 253
40138 -253 253 253 253 253 253 144 144 144 2 2 6
40139 - 2 2 6 2 2 6 2 2 6 46 46 46
40140 - 2 2 6 2 2 6 2 2 6 2 2 6
40141 - 42 42 42 74 74 74 30 30 30 10 10 10
40142 - 0 0 0 0 0 0 0 0 0 0 0 0
40143 - 0 0 0 0 0 0 0 0 0 0 0 0
40144 - 0 0 0 0 0 0 0 0 0 0 0 0
40145 - 0 0 0 0 0 0 0 0 0 0 0 0
40146 - 0 0 0 0 0 0 0 0 0 0 0 0
40147 - 0 0 0 0 0 0 0 0 0 0 0 0
40148 - 0 0 0 0 0 0 0 0 0 0 0 0
40149 - 6 6 6 14 14 14 42 42 42 90 90 90
40150 - 26 26 26 6 6 6 42 42 42 2 2 6
40151 - 74 74 74 250 250 250 253 253 253 253 253 253
40152 -253 253 253 253 253 253 253 253 253 253 253 253
40153 -253 253 253 253 253 253 242 242 242 242 242 242
40154 -253 253 253 253 253 253 253 253 253 253 253 253
40155 -253 253 253 253 253 253 253 253 253 253 253 253
40156 -253 253 253 253 253 253 253 253 253 253 253 253
40157 -253 253 253 253 253 253 253 253 253 253 253 253
40158 -253 253 253 253 253 253 182 182 182 2 2 6
40159 - 2 2 6 2 2 6 2 2 6 46 46 46
40160 - 2 2 6 2 2 6 2 2 6 2 2 6
40161 - 10 10 10 86 86 86 38 38 38 10 10 10
40162 - 0 0 0 0 0 0 0 0 0 0 0 0
40163 - 0 0 0 0 0 0 0 0 0 0 0 0
40164 - 0 0 0 0 0 0 0 0 0 0 0 0
40165 - 0 0 0 0 0 0 0 0 0 0 0 0
40166 - 0 0 0 0 0 0 0 0 0 0 0 0
40167 - 0 0 0 0 0 0 0 0 0 0 0 0
40168 - 0 0 0 0 0 0 0 0 0 0 0 0
40169 - 10 10 10 26 26 26 66 66 66 82 82 82
40170 - 2 2 6 22 22 22 18 18 18 2 2 6
40171 -149 149 149 253 253 253 253 253 253 253 253 253
40172 -253 253 253 253 253 253 253 253 253 253 253 253
40173 -253 253 253 253 253 253 234 234 234 242 242 242
40174 -253 253 253 253 253 253 253 253 253 253 253 253
40175 -253 253 253 253 253 253 253 253 253 253 253 253
40176 -253 253 253 253 253 253 253 253 253 253 253 253
40177 -253 253 253 253 253 253 253 253 253 253 253 253
40178 -253 253 253 253 253 253 206 206 206 2 2 6
40179 - 2 2 6 2 2 6 2 2 6 38 38 38
40180 - 2 2 6 2 2 6 2 2 6 2 2 6
40181 - 6 6 6 86 86 86 46 46 46 14 14 14
40182 - 0 0 0 0 0 0 0 0 0 0 0 0
40183 - 0 0 0 0 0 0 0 0 0 0 0 0
40184 - 0 0 0 0 0 0 0 0 0 0 0 0
40185 - 0 0 0 0 0 0 0 0 0 0 0 0
40186 - 0 0 0 0 0 0 0 0 0 0 0 0
40187 - 0 0 0 0 0 0 0 0 0 0 0 0
40188 - 0 0 0 0 0 0 0 0 0 6 6 6
40189 - 18 18 18 46 46 46 86 86 86 18 18 18
40190 - 2 2 6 34 34 34 10 10 10 6 6 6
40191 -210 210 210 253 253 253 253 253 253 253 253 253
40192 -253 253 253 253 253 253 253 253 253 253 253 253
40193 -253 253 253 253 253 253 234 234 234 242 242 242
40194 -253 253 253 253 253 253 253 253 253 253 253 253
40195 -253 253 253 253 253 253 253 253 253 253 253 253
40196 -253 253 253 253 253 253 253 253 253 253 253 253
40197 -253 253 253 253 253 253 253 253 253 253 253 253
40198 -253 253 253 253 253 253 221 221 221 6 6 6
40199 - 2 2 6 2 2 6 6 6 6 30 30 30
40200 - 2 2 6 2 2 6 2 2 6 2 2 6
40201 - 2 2 6 82 82 82 54 54 54 18 18 18
40202 - 6 6 6 0 0 0 0 0 0 0 0 0
40203 - 0 0 0 0 0 0 0 0 0 0 0 0
40204 - 0 0 0 0 0 0 0 0 0 0 0 0
40205 - 0 0 0 0 0 0 0 0 0 0 0 0
40206 - 0 0 0 0 0 0 0 0 0 0 0 0
40207 - 0 0 0 0 0 0 0 0 0 0 0 0
40208 - 0 0 0 0 0 0 0 0 0 10 10 10
40209 - 26 26 26 66 66 66 62 62 62 2 2 6
40210 - 2 2 6 38 38 38 10 10 10 26 26 26
40211 -238 238 238 253 253 253 253 253 253 253 253 253
40212 -253 253 253 253 253 253 253 253 253 253 253 253
40213 -253 253 253 253 253 253 231 231 231 238 238 238
40214 -253 253 253 253 253 253 253 253 253 253 253 253
40215 -253 253 253 253 253 253 253 253 253 253 253 253
40216 -253 253 253 253 253 253 253 253 253 253 253 253
40217 -253 253 253 253 253 253 253 253 253 253 253 253
40218 -253 253 253 253 253 253 231 231 231 6 6 6
40219 - 2 2 6 2 2 6 10 10 10 30 30 30
40220 - 2 2 6 2 2 6 2 2 6 2 2 6
40221 - 2 2 6 66 66 66 58 58 58 22 22 22
40222 - 6 6 6 0 0 0 0 0 0 0 0 0
40223 - 0 0 0 0 0 0 0 0 0 0 0 0
40224 - 0 0 0 0 0 0 0 0 0 0 0 0
40225 - 0 0 0 0 0 0 0 0 0 0 0 0
40226 - 0 0 0 0 0 0 0 0 0 0 0 0
40227 - 0 0 0 0 0 0 0 0 0 0 0 0
40228 - 0 0 0 0 0 0 0 0 0 10 10 10
40229 - 38 38 38 78 78 78 6 6 6 2 2 6
40230 - 2 2 6 46 46 46 14 14 14 42 42 42
40231 -246 246 246 253 253 253 253 253 253 253 253 253
40232 -253 253 253 253 253 253 253 253 253 253 253 253
40233 -253 253 253 253 253 253 231 231 231 242 242 242
40234 -253 253 253 253 253 253 253 253 253 253 253 253
40235 -253 253 253 253 253 253 253 253 253 253 253 253
40236 -253 253 253 253 253 253 253 253 253 253 253 253
40237 -253 253 253 253 253 253 253 253 253 253 253 253
40238 -253 253 253 253 253 253 234 234 234 10 10 10
40239 - 2 2 6 2 2 6 22 22 22 14 14 14
40240 - 2 2 6 2 2 6 2 2 6 2 2 6
40241 - 2 2 6 66 66 66 62 62 62 22 22 22
40242 - 6 6 6 0 0 0 0 0 0 0 0 0
40243 - 0 0 0 0 0 0 0 0 0 0 0 0
40244 - 0 0 0 0 0 0 0 0 0 0 0 0
40245 - 0 0 0 0 0 0 0 0 0 0 0 0
40246 - 0 0 0 0 0 0 0 0 0 0 0 0
40247 - 0 0 0 0 0 0 0 0 0 0 0 0
40248 - 0 0 0 0 0 0 6 6 6 18 18 18
40249 - 50 50 50 74 74 74 2 2 6 2 2 6
40250 - 14 14 14 70 70 70 34 34 34 62 62 62
40251 -250 250 250 253 253 253 253 253 253 253 253 253
40252 -253 253 253 253 253 253 253 253 253 253 253 253
40253 -253 253 253 253 253 253 231 231 231 246 246 246
40254 -253 253 253 253 253 253 253 253 253 253 253 253
40255 -253 253 253 253 253 253 253 253 253 253 253 253
40256 -253 253 253 253 253 253 253 253 253 253 253 253
40257 -253 253 253 253 253 253 253 253 253 253 253 253
40258 -253 253 253 253 253 253 234 234 234 14 14 14
40259 - 2 2 6 2 2 6 30 30 30 2 2 6
40260 - 2 2 6 2 2 6 2 2 6 2 2 6
40261 - 2 2 6 66 66 66 62 62 62 22 22 22
40262 - 6 6 6 0 0 0 0 0 0 0 0 0
40263 - 0 0 0 0 0 0 0 0 0 0 0 0
40264 - 0 0 0 0 0 0 0 0 0 0 0 0
40265 - 0 0 0 0 0 0 0 0 0 0 0 0
40266 - 0 0 0 0 0 0 0 0 0 0 0 0
40267 - 0 0 0 0 0 0 0 0 0 0 0 0
40268 - 0 0 0 0 0 0 6 6 6 18 18 18
40269 - 54 54 54 62 62 62 2 2 6 2 2 6
40270 - 2 2 6 30 30 30 46 46 46 70 70 70
40271 -250 250 250 253 253 253 253 253 253 253 253 253
40272 -253 253 253 253 253 253 253 253 253 253 253 253
40273 -253 253 253 253 253 253 231 231 231 246 246 246
40274 -253 253 253 253 253 253 253 253 253 253 253 253
40275 -253 253 253 253 253 253 253 253 253 253 253 253
40276 -253 253 253 253 253 253 253 253 253 253 253 253
40277 -253 253 253 253 253 253 253 253 253 253 253 253
40278 -253 253 253 253 253 253 226 226 226 10 10 10
40279 - 2 2 6 6 6 6 30 30 30 2 2 6
40280 - 2 2 6 2 2 6 2 2 6 2 2 6
40281 - 2 2 6 66 66 66 58 58 58 22 22 22
40282 - 6 6 6 0 0 0 0 0 0 0 0 0
40283 - 0 0 0 0 0 0 0 0 0 0 0 0
40284 - 0 0 0 0 0 0 0 0 0 0 0 0
40285 - 0 0 0 0 0 0 0 0 0 0 0 0
40286 - 0 0 0 0 0 0 0 0 0 0 0 0
40287 - 0 0 0 0 0 0 0 0 0 0 0 0
40288 - 0 0 0 0 0 0 6 6 6 22 22 22
40289 - 58 58 58 62 62 62 2 2 6 2 2 6
40290 - 2 2 6 2 2 6 30 30 30 78 78 78
40291 -250 250 250 253 253 253 253 253 253 253 253 253
40292 -253 253 253 253 253 253 253 253 253 253 253 253
40293 -253 253 253 253 253 253 231 231 231 246 246 246
40294 -253 253 253 253 253 253 253 253 253 253 253 253
40295 -253 253 253 253 253 253 253 253 253 253 253 253
40296 -253 253 253 253 253 253 253 253 253 253 253 253
40297 -253 253 253 253 253 253 253 253 253 253 253 253
40298 -253 253 253 253 253 253 206 206 206 2 2 6
40299 - 22 22 22 34 34 34 18 14 6 22 22 22
40300 - 26 26 26 18 18 18 6 6 6 2 2 6
40301 - 2 2 6 82 82 82 54 54 54 18 18 18
40302 - 6 6 6 0 0 0 0 0 0 0 0 0
40303 - 0 0 0 0 0 0 0 0 0 0 0 0
40304 - 0 0 0 0 0 0 0 0 0 0 0 0
40305 - 0 0 0 0 0 0 0 0 0 0 0 0
40306 - 0 0 0 0 0 0 0 0 0 0 0 0
40307 - 0 0 0 0 0 0 0 0 0 0 0 0
40308 - 0 0 0 0 0 0 6 6 6 26 26 26
40309 - 62 62 62 106 106 106 74 54 14 185 133 11
40310 -210 162 10 121 92 8 6 6 6 62 62 62
40311 -238 238 238 253 253 253 253 253 253 253 253 253
40312 -253 253 253 253 253 253 253 253 253 253 253 253
40313 -253 253 253 253 253 253 231 231 231 246 246 246
40314 -253 253 253 253 253 253 253 253 253 253 253 253
40315 -253 253 253 253 253 253 253 253 253 253 253 253
40316 -253 253 253 253 253 253 253 253 253 253 253 253
40317 -253 253 253 253 253 253 253 253 253 253 253 253
40318 -253 253 253 253 253 253 158 158 158 18 18 18
40319 - 14 14 14 2 2 6 2 2 6 2 2 6
40320 - 6 6 6 18 18 18 66 66 66 38 38 38
40321 - 6 6 6 94 94 94 50 50 50 18 18 18
40322 - 6 6 6 0 0 0 0 0 0 0 0 0
40323 - 0 0 0 0 0 0 0 0 0 0 0 0
40324 - 0 0 0 0 0 0 0 0 0 0 0 0
40325 - 0 0 0 0 0 0 0 0 0 0 0 0
40326 - 0 0 0 0 0 0 0 0 0 0 0 0
40327 - 0 0 0 0 0 0 0 0 0 6 6 6
40328 - 10 10 10 10 10 10 18 18 18 38 38 38
40329 - 78 78 78 142 134 106 216 158 10 242 186 14
40330 -246 190 14 246 190 14 156 118 10 10 10 10
40331 - 90 90 90 238 238 238 253 253 253 253 253 253
40332 -253 253 253 253 253 253 253 253 253 253 253 253
40333 -253 253 253 253 253 253 231 231 231 250 250 250
40334 -253 253 253 253 253 253 253 253 253 253 253 253
40335 -253 253 253 253 253 253 253 253 253 253 253 253
40336 -253 253 253 253 253 253 253 253 253 253 253 253
40337 -253 253 253 253 253 253 253 253 253 246 230 190
40338 -238 204 91 238 204 91 181 142 44 37 26 9
40339 - 2 2 6 2 2 6 2 2 6 2 2 6
40340 - 2 2 6 2 2 6 38 38 38 46 46 46
40341 - 26 26 26 106 106 106 54 54 54 18 18 18
40342 - 6 6 6 0 0 0 0 0 0 0 0 0
40343 - 0 0 0 0 0 0 0 0 0 0 0 0
40344 - 0 0 0 0 0 0 0 0 0 0 0 0
40345 - 0 0 0 0 0 0 0 0 0 0 0 0
40346 - 0 0 0 0 0 0 0 0 0 0 0 0
40347 - 0 0 0 6 6 6 14 14 14 22 22 22
40348 - 30 30 30 38 38 38 50 50 50 70 70 70
40349 -106 106 106 190 142 34 226 170 11 242 186 14
40350 -246 190 14 246 190 14 246 190 14 154 114 10
40351 - 6 6 6 74 74 74 226 226 226 253 253 253
40352 -253 253 253 253 253 253 253 253 253 253 253 253
40353 -253 253 253 253 253 253 231 231 231 250 250 250
40354 -253 253 253 253 253 253 253 253 253 253 253 253
40355 -253 253 253 253 253 253 253 253 253 253 253 253
40356 -253 253 253 253 253 253 253 253 253 253 253 253
40357 -253 253 253 253 253 253 253 253 253 228 184 62
40358 -241 196 14 241 208 19 232 195 16 38 30 10
40359 - 2 2 6 2 2 6 2 2 6 2 2 6
40360 - 2 2 6 6 6 6 30 30 30 26 26 26
40361 -203 166 17 154 142 90 66 66 66 26 26 26
40362 - 6 6 6 0 0 0 0 0 0 0 0 0
40363 - 0 0 0 0 0 0 0 0 0 0 0 0
40364 - 0 0 0 0 0 0 0 0 0 0 0 0
40365 - 0 0 0 0 0 0 0 0 0 0 0 0
40366 - 0 0 0 0 0 0 0 0 0 0 0 0
40367 - 6 6 6 18 18 18 38 38 38 58 58 58
40368 - 78 78 78 86 86 86 101 101 101 123 123 123
40369 -175 146 61 210 150 10 234 174 13 246 186 14
40370 -246 190 14 246 190 14 246 190 14 238 190 10
40371 -102 78 10 2 2 6 46 46 46 198 198 198
40372 -253 253 253 253 253 253 253 253 253 253 253 253
40373 -253 253 253 253 253 253 234 234 234 242 242 242
40374 -253 253 253 253 253 253 253 253 253 253 253 253
40375 -253 253 253 253 253 253 253 253 253 253 253 253
40376 -253 253 253 253 253 253 253 253 253 253 253 253
40377 -253 253 253 253 253 253 253 253 253 224 178 62
40378 -242 186 14 241 196 14 210 166 10 22 18 6
40379 - 2 2 6 2 2 6 2 2 6 2 2 6
40380 - 2 2 6 2 2 6 6 6 6 121 92 8
40381 -238 202 15 232 195 16 82 82 82 34 34 34
40382 - 10 10 10 0 0 0 0 0 0 0 0 0
40383 - 0 0 0 0 0 0 0 0 0 0 0 0
40384 - 0 0 0 0 0 0 0 0 0 0 0 0
40385 - 0 0 0 0 0 0 0 0 0 0 0 0
40386 - 0 0 0 0 0 0 0 0 0 0 0 0
40387 - 14 14 14 38 38 38 70 70 70 154 122 46
40388 -190 142 34 200 144 11 197 138 11 197 138 11
40389 -213 154 11 226 170 11 242 186 14 246 190 14
40390 -246 190 14 246 190 14 246 190 14 246 190 14
40391 -225 175 15 46 32 6 2 2 6 22 22 22
40392 -158 158 158 250 250 250 253 253 253 253 253 253
40393 -253 253 253 253 253 253 253 253 253 253 253 253
40394 -253 253 253 253 253 253 253 253 253 253 253 253
40395 -253 253 253 253 253 253 253 253 253 253 253 253
40396 -253 253 253 253 253 253 253 253 253 253 253 253
40397 -253 253 253 250 250 250 242 242 242 224 178 62
40398 -239 182 13 236 186 11 213 154 11 46 32 6
40399 - 2 2 6 2 2 6 2 2 6 2 2 6
40400 - 2 2 6 2 2 6 61 42 6 225 175 15
40401 -238 190 10 236 186 11 112 100 78 42 42 42
40402 - 14 14 14 0 0 0 0 0 0 0 0 0
40403 - 0 0 0 0 0 0 0 0 0 0 0 0
40404 - 0 0 0 0 0 0 0 0 0 0 0 0
40405 - 0 0 0 0 0 0 0 0 0 0 0 0
40406 - 0 0 0 0 0 0 0 0 0 6 6 6
40407 - 22 22 22 54 54 54 154 122 46 213 154 11
40408 -226 170 11 230 174 11 226 170 11 226 170 11
40409 -236 178 12 242 186 14 246 190 14 246 190 14
40410 -246 190 14 246 190 14 246 190 14 246 190 14
40411 -241 196 14 184 144 12 10 10 10 2 2 6
40412 - 6 6 6 116 116 116 242 242 242 253 253 253
40413 -253 253 253 253 253 253 253 253 253 253 253 253
40414 -253 253 253 253 253 253 253 253 253 253 253 253
40415 -253 253 253 253 253 253 253 253 253 253 253 253
40416 -253 253 253 253 253 253 253 253 253 253 253 253
40417 -253 253 253 231 231 231 198 198 198 214 170 54
40418 -236 178 12 236 178 12 210 150 10 137 92 6
40419 - 18 14 6 2 2 6 2 2 6 2 2 6
40420 - 6 6 6 70 47 6 200 144 11 236 178 12
40421 -239 182 13 239 182 13 124 112 88 58 58 58
40422 - 22 22 22 6 6 6 0 0 0 0 0 0
40423 - 0 0 0 0 0 0 0 0 0 0 0 0
40424 - 0 0 0 0 0 0 0 0 0 0 0 0
40425 - 0 0 0 0 0 0 0 0 0 0 0 0
40426 - 0 0 0 0 0 0 0 0 0 10 10 10
40427 - 30 30 30 70 70 70 180 133 36 226 170 11
40428 -239 182 13 242 186 14 242 186 14 246 186 14
40429 -246 190 14 246 190 14 246 190 14 246 190 14
40430 -246 190 14 246 190 14 246 190 14 246 190 14
40431 -246 190 14 232 195 16 98 70 6 2 2 6
40432 - 2 2 6 2 2 6 66 66 66 221 221 221
40433 -253 253 253 253 253 253 253 253 253 253 253 253
40434 -253 253 253 253 253 253 253 253 253 253 253 253
40435 -253 253 253 253 253 253 253 253 253 253 253 253
40436 -253 253 253 253 253 253 253 253 253 253 253 253
40437 -253 253 253 206 206 206 198 198 198 214 166 58
40438 -230 174 11 230 174 11 216 158 10 192 133 9
40439 -163 110 8 116 81 8 102 78 10 116 81 8
40440 -167 114 7 197 138 11 226 170 11 239 182 13
40441 -242 186 14 242 186 14 162 146 94 78 78 78
40442 - 34 34 34 14 14 14 6 6 6 0 0 0
40443 - 0 0 0 0 0 0 0 0 0 0 0 0
40444 - 0 0 0 0 0 0 0 0 0 0 0 0
40445 - 0 0 0 0 0 0 0 0 0 0 0 0
40446 - 0 0 0 0 0 0 0 0 0 6 6 6
40447 - 30 30 30 78 78 78 190 142 34 226 170 11
40448 -239 182 13 246 190 14 246 190 14 246 190 14
40449 -246 190 14 246 190 14 246 190 14 246 190 14
40450 -246 190 14 246 190 14 246 190 14 246 190 14
40451 -246 190 14 241 196 14 203 166 17 22 18 6
40452 - 2 2 6 2 2 6 2 2 6 38 38 38
40453 -218 218 218 253 253 253 253 253 253 253 253 253
40454 -253 253 253 253 253 253 253 253 253 253 253 253
40455 -253 253 253 253 253 253 253 253 253 253 253 253
40456 -253 253 253 253 253 253 253 253 253 253 253 253
40457 -250 250 250 206 206 206 198 198 198 202 162 69
40458 -226 170 11 236 178 12 224 166 10 210 150 10
40459 -200 144 11 197 138 11 192 133 9 197 138 11
40460 -210 150 10 226 170 11 242 186 14 246 190 14
40461 -246 190 14 246 186 14 225 175 15 124 112 88
40462 - 62 62 62 30 30 30 14 14 14 6 6 6
40463 - 0 0 0 0 0 0 0 0 0 0 0 0
40464 - 0 0 0 0 0 0 0 0 0 0 0 0
40465 - 0 0 0 0 0 0 0 0 0 0 0 0
40466 - 0 0 0 0 0 0 0 0 0 10 10 10
40467 - 30 30 30 78 78 78 174 135 50 224 166 10
40468 -239 182 13 246 190 14 246 190 14 246 190 14
40469 -246 190 14 246 190 14 246 190 14 246 190 14
40470 -246 190 14 246 190 14 246 190 14 246 190 14
40471 -246 190 14 246 190 14 241 196 14 139 102 15
40472 - 2 2 6 2 2 6 2 2 6 2 2 6
40473 - 78 78 78 250 250 250 253 253 253 253 253 253
40474 -253 253 253 253 253 253 253 253 253 253 253 253
40475 -253 253 253 253 253 253 253 253 253 253 253 253
40476 -253 253 253 253 253 253 253 253 253 253 253 253
40477 -250 250 250 214 214 214 198 198 198 190 150 46
40478 -219 162 10 236 178 12 234 174 13 224 166 10
40479 -216 158 10 213 154 11 213 154 11 216 158 10
40480 -226 170 11 239 182 13 246 190 14 246 190 14
40481 -246 190 14 246 190 14 242 186 14 206 162 42
40482 -101 101 101 58 58 58 30 30 30 14 14 14
40483 - 6 6 6 0 0 0 0 0 0 0 0 0
40484 - 0 0 0 0 0 0 0 0 0 0 0 0
40485 - 0 0 0 0 0 0 0 0 0 0 0 0
40486 - 0 0 0 0 0 0 0 0 0 10 10 10
40487 - 30 30 30 74 74 74 174 135 50 216 158 10
40488 -236 178 12 246 190 14 246 190 14 246 190 14
40489 -246 190 14 246 190 14 246 190 14 246 190 14
40490 -246 190 14 246 190 14 246 190 14 246 190 14
40491 -246 190 14 246 190 14 241 196 14 226 184 13
40492 - 61 42 6 2 2 6 2 2 6 2 2 6
40493 - 22 22 22 238 238 238 253 253 253 253 253 253
40494 -253 253 253 253 253 253 253 253 253 253 253 253
40495 -253 253 253 253 253 253 253 253 253 253 253 253
40496 -253 253 253 253 253 253 253 253 253 253 253 253
40497 -253 253 253 226 226 226 187 187 187 180 133 36
40498 -216 158 10 236 178 12 239 182 13 236 178 12
40499 -230 174 11 226 170 11 226 170 11 230 174 11
40500 -236 178 12 242 186 14 246 190 14 246 190 14
40501 -246 190 14 246 190 14 246 186 14 239 182 13
40502 -206 162 42 106 106 106 66 66 66 34 34 34
40503 - 14 14 14 6 6 6 0 0 0 0 0 0
40504 - 0 0 0 0 0 0 0 0 0 0 0 0
40505 - 0 0 0 0 0 0 0 0 0 0 0 0
40506 - 0 0 0 0 0 0 0 0 0 6 6 6
40507 - 26 26 26 70 70 70 163 133 67 213 154 11
40508 -236 178 12 246 190 14 246 190 14 246 190 14
40509 -246 190 14 246 190 14 246 190 14 246 190 14
40510 -246 190 14 246 190 14 246 190 14 246 190 14
40511 -246 190 14 246 190 14 246 190 14 241 196 14
40512 -190 146 13 18 14 6 2 2 6 2 2 6
40513 - 46 46 46 246 246 246 253 253 253 253 253 253
40514 -253 253 253 253 253 253 253 253 253 253 253 253
40515 -253 253 253 253 253 253 253 253 253 253 253 253
40516 -253 253 253 253 253 253 253 253 253 253 253 253
40517 -253 253 253 221 221 221 86 86 86 156 107 11
40518 -216 158 10 236 178 12 242 186 14 246 186 14
40519 -242 186 14 239 182 13 239 182 13 242 186 14
40520 -242 186 14 246 186 14 246 190 14 246 190 14
40521 -246 190 14 246 190 14 246 190 14 246 190 14
40522 -242 186 14 225 175 15 142 122 72 66 66 66
40523 - 30 30 30 10 10 10 0 0 0 0 0 0
40524 - 0 0 0 0 0 0 0 0 0 0 0 0
40525 - 0 0 0 0 0 0 0 0 0 0 0 0
40526 - 0 0 0 0 0 0 0 0 0 6 6 6
40527 - 26 26 26 70 70 70 163 133 67 210 150 10
40528 -236 178 12 246 190 14 246 190 14 246 190 14
40529 -246 190 14 246 190 14 246 190 14 246 190 14
40530 -246 190 14 246 190 14 246 190 14 246 190 14
40531 -246 190 14 246 190 14 246 190 14 246 190 14
40532 -232 195 16 121 92 8 34 34 34 106 106 106
40533 -221 221 221 253 253 253 253 253 253 253 253 253
40534 -253 253 253 253 253 253 253 253 253 253 253 253
40535 -253 253 253 253 253 253 253 253 253 253 253 253
40536 -253 253 253 253 253 253 253 253 253 253 253 253
40537 -242 242 242 82 82 82 18 14 6 163 110 8
40538 -216 158 10 236 178 12 242 186 14 246 190 14
40539 -246 190 14 246 190 14 246 190 14 246 190 14
40540 -246 190 14 246 190 14 246 190 14 246 190 14
40541 -246 190 14 246 190 14 246 190 14 246 190 14
40542 -246 190 14 246 190 14 242 186 14 163 133 67
40543 - 46 46 46 18 18 18 6 6 6 0 0 0
40544 - 0 0 0 0 0 0 0 0 0 0 0 0
40545 - 0 0 0 0 0 0 0 0 0 0 0 0
40546 - 0 0 0 0 0 0 0 0 0 10 10 10
40547 - 30 30 30 78 78 78 163 133 67 210 150 10
40548 -236 178 12 246 186 14 246 190 14 246 190 14
40549 -246 190 14 246 190 14 246 190 14 246 190 14
40550 -246 190 14 246 190 14 246 190 14 246 190 14
40551 -246 190 14 246 190 14 246 190 14 246 190 14
40552 -241 196 14 215 174 15 190 178 144 253 253 253
40553 -253 253 253 253 253 253 253 253 253 253 253 253
40554 -253 253 253 253 253 253 253 253 253 253 253 253
40555 -253 253 253 253 253 253 253 253 253 253 253 253
40556 -253 253 253 253 253 253 253 253 253 218 218 218
40557 - 58 58 58 2 2 6 22 18 6 167 114 7
40558 -216 158 10 236 178 12 246 186 14 246 190 14
40559 -246 190 14 246 190 14 246 190 14 246 190 14
40560 -246 190 14 246 190 14 246 190 14 246 190 14
40561 -246 190 14 246 190 14 246 190 14 246 190 14
40562 -246 190 14 246 186 14 242 186 14 190 150 46
40563 - 54 54 54 22 22 22 6 6 6 0 0 0
40564 - 0 0 0 0 0 0 0 0 0 0 0 0
40565 - 0 0 0 0 0 0 0 0 0 0 0 0
40566 - 0 0 0 0 0 0 0 0 0 14 14 14
40567 - 38 38 38 86 86 86 180 133 36 213 154 11
40568 -236 178 12 246 186 14 246 190 14 246 190 14
40569 -246 190 14 246 190 14 246 190 14 246 190 14
40570 -246 190 14 246 190 14 246 190 14 246 190 14
40571 -246 190 14 246 190 14 246 190 14 246 190 14
40572 -246 190 14 232 195 16 190 146 13 214 214 214
40573 -253 253 253 253 253 253 253 253 253 253 253 253
40574 -253 253 253 253 253 253 253 253 253 253 253 253
40575 -253 253 253 253 253 253 253 253 253 253 253 253
40576 -253 253 253 250 250 250 170 170 170 26 26 26
40577 - 2 2 6 2 2 6 37 26 9 163 110 8
40578 -219 162 10 239 182 13 246 186 14 246 190 14
40579 -246 190 14 246 190 14 246 190 14 246 190 14
40580 -246 190 14 246 190 14 246 190 14 246 190 14
40581 -246 190 14 246 190 14 246 190 14 246 190 14
40582 -246 186 14 236 178 12 224 166 10 142 122 72
40583 - 46 46 46 18 18 18 6 6 6 0 0 0
40584 - 0 0 0 0 0 0 0 0 0 0 0 0
40585 - 0 0 0 0 0 0 0 0 0 0 0 0
40586 - 0 0 0 0 0 0 6 6 6 18 18 18
40587 - 50 50 50 109 106 95 192 133 9 224 166 10
40588 -242 186 14 246 190 14 246 190 14 246 190 14
40589 -246 190 14 246 190 14 246 190 14 246 190 14
40590 -246 190 14 246 190 14 246 190 14 246 190 14
40591 -246 190 14 246 190 14 246 190 14 246 190 14
40592 -242 186 14 226 184 13 210 162 10 142 110 46
40593 -226 226 226 253 253 253 253 253 253 253 253 253
40594 -253 253 253 253 253 253 253 253 253 253 253 253
40595 -253 253 253 253 253 253 253 253 253 253 253 253
40596 -198 198 198 66 66 66 2 2 6 2 2 6
40597 - 2 2 6 2 2 6 50 34 6 156 107 11
40598 -219 162 10 239 182 13 246 186 14 246 190 14
40599 -246 190 14 246 190 14 246 190 14 246 190 14
40600 -246 190 14 246 190 14 246 190 14 246 190 14
40601 -246 190 14 246 190 14 246 190 14 242 186 14
40602 -234 174 13 213 154 11 154 122 46 66 66 66
40603 - 30 30 30 10 10 10 0 0 0 0 0 0
40604 - 0 0 0 0 0 0 0 0 0 0 0 0
40605 - 0 0 0 0 0 0 0 0 0 0 0 0
40606 - 0 0 0 0 0 0 6 6 6 22 22 22
40607 - 58 58 58 154 121 60 206 145 10 234 174 13
40608 -242 186 14 246 186 14 246 190 14 246 190 14
40609 -246 190 14 246 190 14 246 190 14 246 190 14
40610 -246 190 14 246 190 14 246 190 14 246 190 14
40611 -246 190 14 246 190 14 246 190 14 246 190 14
40612 -246 186 14 236 178 12 210 162 10 163 110 8
40613 - 61 42 6 138 138 138 218 218 218 250 250 250
40614 -253 253 253 253 253 253 253 253 253 250 250 250
40615 -242 242 242 210 210 210 144 144 144 66 66 66
40616 - 6 6 6 2 2 6 2 2 6 2 2 6
40617 - 2 2 6 2 2 6 61 42 6 163 110 8
40618 -216 158 10 236 178 12 246 190 14 246 190 14
40619 -246 190 14 246 190 14 246 190 14 246 190 14
40620 -246 190 14 246 190 14 246 190 14 246 190 14
40621 -246 190 14 239 182 13 230 174 11 216 158 10
40622 -190 142 34 124 112 88 70 70 70 38 38 38
40623 - 18 18 18 6 6 6 0 0 0 0 0 0
40624 - 0 0 0 0 0 0 0 0 0 0 0 0
40625 - 0 0 0 0 0 0 0 0 0 0 0 0
40626 - 0 0 0 0 0 0 6 6 6 22 22 22
40627 - 62 62 62 168 124 44 206 145 10 224 166 10
40628 -236 178 12 239 182 13 242 186 14 242 186 14
40629 -246 186 14 246 190 14 246 190 14 246 190 14
40630 -246 190 14 246 190 14 246 190 14 246 190 14
40631 -246 190 14 246 190 14 246 190 14 246 190 14
40632 -246 190 14 236 178 12 216 158 10 175 118 6
40633 - 80 54 7 2 2 6 6 6 6 30 30 30
40634 - 54 54 54 62 62 62 50 50 50 38 38 38
40635 - 14 14 14 2 2 6 2 2 6 2 2 6
40636 - 2 2 6 2 2 6 2 2 6 2 2 6
40637 - 2 2 6 6 6 6 80 54 7 167 114 7
40638 -213 154 11 236 178 12 246 190 14 246 190 14
40639 -246 190 14 246 190 14 246 190 14 246 190 14
40640 -246 190 14 242 186 14 239 182 13 239 182 13
40641 -230 174 11 210 150 10 174 135 50 124 112 88
40642 - 82 82 82 54 54 54 34 34 34 18 18 18
40643 - 6 6 6 0 0 0 0 0 0 0 0 0
40644 - 0 0 0 0 0 0 0 0 0 0 0 0
40645 - 0 0 0 0 0 0 0 0 0 0 0 0
40646 - 0 0 0 0 0 0 6 6 6 18 18 18
40647 - 50 50 50 158 118 36 192 133 9 200 144 11
40648 -216 158 10 219 162 10 224 166 10 226 170 11
40649 -230 174 11 236 178 12 239 182 13 239 182 13
40650 -242 186 14 246 186 14 246 190 14 246 190 14
40651 -246 190 14 246 190 14 246 190 14 246 190 14
40652 -246 186 14 230 174 11 210 150 10 163 110 8
40653 -104 69 6 10 10 10 2 2 6 2 2 6
40654 - 2 2 6 2 2 6 2 2 6 2 2 6
40655 - 2 2 6 2 2 6 2 2 6 2 2 6
40656 - 2 2 6 2 2 6 2 2 6 2 2 6
40657 - 2 2 6 6 6 6 91 60 6 167 114 7
40658 -206 145 10 230 174 11 242 186 14 246 190 14
40659 -246 190 14 246 190 14 246 186 14 242 186 14
40660 -239 182 13 230 174 11 224 166 10 213 154 11
40661 -180 133 36 124 112 88 86 86 86 58 58 58
40662 - 38 38 38 22 22 22 10 10 10 6 6 6
40663 - 0 0 0 0 0 0 0 0 0 0 0 0
40664 - 0 0 0 0 0 0 0 0 0 0 0 0
40665 - 0 0 0 0 0 0 0 0 0 0 0 0
40666 - 0 0 0 0 0 0 0 0 0 14 14 14
40667 - 34 34 34 70 70 70 138 110 50 158 118 36
40668 -167 114 7 180 123 7 192 133 9 197 138 11
40669 -200 144 11 206 145 10 213 154 11 219 162 10
40670 -224 166 10 230 174 11 239 182 13 242 186 14
40671 -246 186 14 246 186 14 246 186 14 246 186 14
40672 -239 182 13 216 158 10 185 133 11 152 99 6
40673 -104 69 6 18 14 6 2 2 6 2 2 6
40674 - 2 2 6 2 2 6 2 2 6 2 2 6
40675 - 2 2 6 2 2 6 2 2 6 2 2 6
40676 - 2 2 6 2 2 6 2 2 6 2 2 6
40677 - 2 2 6 6 6 6 80 54 7 152 99 6
40678 -192 133 9 219 162 10 236 178 12 239 182 13
40679 -246 186 14 242 186 14 239 182 13 236 178 12
40680 -224 166 10 206 145 10 192 133 9 154 121 60
40681 - 94 94 94 62 62 62 42 42 42 22 22 22
40682 - 14 14 14 6 6 6 0 0 0 0 0 0
40683 - 0 0 0 0 0 0 0 0 0 0 0 0
40684 - 0 0 0 0 0 0 0 0 0 0 0 0
40685 - 0 0 0 0 0 0 0 0 0 0 0 0
40686 - 0 0 0 0 0 0 0 0 0 6 6 6
40687 - 18 18 18 34 34 34 58 58 58 78 78 78
40688 -101 98 89 124 112 88 142 110 46 156 107 11
40689 -163 110 8 167 114 7 175 118 6 180 123 7
40690 -185 133 11 197 138 11 210 150 10 219 162 10
40691 -226 170 11 236 178 12 236 178 12 234 174 13
40692 -219 162 10 197 138 11 163 110 8 130 83 6
40693 - 91 60 6 10 10 10 2 2 6 2 2 6
40694 - 18 18 18 38 38 38 38 38 38 38 38 38
40695 - 38 38 38 38 38 38 38 38 38 38 38 38
40696 - 38 38 38 38 38 38 26 26 26 2 2 6
40697 - 2 2 6 6 6 6 70 47 6 137 92 6
40698 -175 118 6 200 144 11 219 162 10 230 174 11
40699 -234 174 13 230 174 11 219 162 10 210 150 10
40700 -192 133 9 163 110 8 124 112 88 82 82 82
40701 - 50 50 50 30 30 30 14 14 14 6 6 6
40702 - 0 0 0 0 0 0 0 0 0 0 0 0
40703 - 0 0 0 0 0 0 0 0 0 0 0 0
40704 - 0 0 0 0 0 0 0 0 0 0 0 0
40705 - 0 0 0 0 0 0 0 0 0 0 0 0
40706 - 0 0 0 0 0 0 0 0 0 0 0 0
40707 - 6 6 6 14 14 14 22 22 22 34 34 34
40708 - 42 42 42 58 58 58 74 74 74 86 86 86
40709 -101 98 89 122 102 70 130 98 46 121 87 25
40710 -137 92 6 152 99 6 163 110 8 180 123 7
40711 -185 133 11 197 138 11 206 145 10 200 144 11
40712 -180 123 7 156 107 11 130 83 6 104 69 6
40713 - 50 34 6 54 54 54 110 110 110 101 98 89
40714 - 86 86 86 82 82 82 78 78 78 78 78 78
40715 - 78 78 78 78 78 78 78 78 78 78 78 78
40716 - 78 78 78 82 82 82 86 86 86 94 94 94
40717 -106 106 106 101 101 101 86 66 34 124 80 6
40718 -156 107 11 180 123 7 192 133 9 200 144 11
40719 -206 145 10 200 144 11 192 133 9 175 118 6
40720 -139 102 15 109 106 95 70 70 70 42 42 42
40721 - 22 22 22 10 10 10 0 0 0 0 0 0
40722 - 0 0 0 0 0 0 0 0 0 0 0 0
40723 - 0 0 0 0 0 0 0 0 0 0 0 0
40724 - 0 0 0 0 0 0 0 0 0 0 0 0
40725 - 0 0 0 0 0 0 0 0 0 0 0 0
40726 - 0 0 0 0 0 0 0 0 0 0 0 0
40727 - 0 0 0 0 0 0 6 6 6 10 10 10
40728 - 14 14 14 22 22 22 30 30 30 38 38 38
40729 - 50 50 50 62 62 62 74 74 74 90 90 90
40730 -101 98 89 112 100 78 121 87 25 124 80 6
40731 -137 92 6 152 99 6 152 99 6 152 99 6
40732 -138 86 6 124 80 6 98 70 6 86 66 30
40733 -101 98 89 82 82 82 58 58 58 46 46 46
40734 - 38 38 38 34 34 34 34 34 34 34 34 34
40735 - 34 34 34 34 34 34 34 34 34 34 34 34
40736 - 34 34 34 34 34 34 38 38 38 42 42 42
40737 - 54 54 54 82 82 82 94 86 76 91 60 6
40738 -134 86 6 156 107 11 167 114 7 175 118 6
40739 -175 118 6 167 114 7 152 99 6 121 87 25
40740 -101 98 89 62 62 62 34 34 34 18 18 18
40741 - 6 6 6 0 0 0 0 0 0 0 0 0
40742 - 0 0 0 0 0 0 0 0 0 0 0 0
40743 - 0 0 0 0 0 0 0 0 0 0 0 0
40744 - 0 0 0 0 0 0 0 0 0 0 0 0
40745 - 0 0 0 0 0 0 0 0 0 0 0 0
40746 - 0 0 0 0 0 0 0 0 0 0 0 0
40747 - 0 0 0 0 0 0 0 0 0 0 0 0
40748 - 0 0 0 6 6 6 6 6 6 10 10 10
40749 - 18 18 18 22 22 22 30 30 30 42 42 42
40750 - 50 50 50 66 66 66 86 86 86 101 98 89
40751 -106 86 58 98 70 6 104 69 6 104 69 6
40752 -104 69 6 91 60 6 82 62 34 90 90 90
40753 - 62 62 62 38 38 38 22 22 22 14 14 14
40754 - 10 10 10 10 10 10 10 10 10 10 10 10
40755 - 10 10 10 10 10 10 6 6 6 10 10 10
40756 - 10 10 10 10 10 10 10 10 10 14 14 14
40757 - 22 22 22 42 42 42 70 70 70 89 81 66
40758 - 80 54 7 104 69 6 124 80 6 137 92 6
40759 -134 86 6 116 81 8 100 82 52 86 86 86
40760 - 58 58 58 30 30 30 14 14 14 6 6 6
40761 - 0 0 0 0 0 0 0 0 0 0 0 0
40762 - 0 0 0 0 0 0 0 0 0 0 0 0
40763 - 0 0 0 0 0 0 0 0 0 0 0 0
40764 - 0 0 0 0 0 0 0 0 0 0 0 0
40765 - 0 0 0 0 0 0 0 0 0 0 0 0
40766 - 0 0 0 0 0 0 0 0 0 0 0 0
40767 - 0 0 0 0 0 0 0 0 0 0 0 0
40768 - 0 0 0 0 0 0 0 0 0 0 0 0
40769 - 0 0 0 6 6 6 10 10 10 14 14 14
40770 - 18 18 18 26 26 26 38 38 38 54 54 54
40771 - 70 70 70 86 86 86 94 86 76 89 81 66
40772 - 89 81 66 86 86 86 74 74 74 50 50 50
40773 - 30 30 30 14 14 14 6 6 6 0 0 0
40774 - 0 0 0 0 0 0 0 0 0 0 0 0
40775 - 0 0 0 0 0 0 0 0 0 0 0 0
40776 - 0 0 0 0 0 0 0 0 0 0 0 0
40777 - 6 6 6 18 18 18 34 34 34 58 58 58
40778 - 82 82 82 89 81 66 89 81 66 89 81 66
40779 - 94 86 66 94 86 76 74 74 74 50 50 50
40780 - 26 26 26 14 14 14 6 6 6 0 0 0
40781 - 0 0 0 0 0 0 0 0 0 0 0 0
40782 - 0 0 0 0 0 0 0 0 0 0 0 0
40783 - 0 0 0 0 0 0 0 0 0 0 0 0
40784 - 0 0 0 0 0 0 0 0 0 0 0 0
40785 - 0 0 0 0 0 0 0 0 0 0 0 0
40786 - 0 0 0 0 0 0 0 0 0 0 0 0
40787 - 0 0 0 0 0 0 0 0 0 0 0 0
40788 - 0 0 0 0 0 0 0 0 0 0 0 0
40789 - 0 0 0 0 0 0 0 0 0 0 0 0
40790 - 6 6 6 6 6 6 14 14 14 18 18 18
40791 - 30 30 30 38 38 38 46 46 46 54 54 54
40792 - 50 50 50 42 42 42 30 30 30 18 18 18
40793 - 10 10 10 0 0 0 0 0 0 0 0 0
40794 - 0 0 0 0 0 0 0 0 0 0 0 0
40795 - 0 0 0 0 0 0 0 0 0 0 0 0
40796 - 0 0 0 0 0 0 0 0 0 0 0 0
40797 - 0 0 0 6 6 6 14 14 14 26 26 26
40798 - 38 38 38 50 50 50 58 58 58 58 58 58
40799 - 54 54 54 42 42 42 30 30 30 18 18 18
40800 - 10 10 10 0 0 0 0 0 0 0 0 0
40801 - 0 0 0 0 0 0 0 0 0 0 0 0
40802 - 0 0 0 0 0 0 0 0 0 0 0 0
40803 - 0 0 0 0 0 0 0 0 0 0 0 0
40804 - 0 0 0 0 0 0 0 0 0 0 0 0
40805 - 0 0 0 0 0 0 0 0 0 0 0 0
40806 - 0 0 0 0 0 0 0 0 0 0 0 0
40807 - 0 0 0 0 0 0 0 0 0 0 0 0
40808 - 0 0 0 0 0 0 0 0 0 0 0 0
40809 - 0 0 0 0 0 0 0 0 0 0 0 0
40810 - 0 0 0 0 0 0 0 0 0 6 6 6
40811 - 6 6 6 10 10 10 14 14 14 18 18 18
40812 - 18 18 18 14 14 14 10 10 10 6 6 6
40813 - 0 0 0 0 0 0 0 0 0 0 0 0
40814 - 0 0 0 0 0 0 0 0 0 0 0 0
40815 - 0 0 0 0 0 0 0 0 0 0 0 0
40816 - 0 0 0 0 0 0 0 0 0 0 0 0
40817 - 0 0 0 0 0 0 0 0 0 6 6 6
40818 - 14 14 14 18 18 18 22 22 22 22 22 22
40819 - 18 18 18 14 14 14 10 10 10 6 6 6
40820 - 0 0 0 0 0 0 0 0 0 0 0 0
40821 - 0 0 0 0 0 0 0 0 0 0 0 0
40822 - 0 0 0 0 0 0 0 0 0 0 0 0
40823 - 0 0 0 0 0 0 0 0 0 0 0 0
40824 - 0 0 0 0 0 0 0 0 0 0 0 0
40825 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40826 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40827 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40828 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40829 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40830 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40831 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40832 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40833 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40834 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40835 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40836 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40837 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40838 +4 4 4 4 4 4
40839 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40840 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40841 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40842 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40843 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40844 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40845 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40846 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40847 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40848 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40849 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40850 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40851 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40852 +4 4 4 4 4 4
40853 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40854 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40855 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40856 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40857 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40858 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40859 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40860 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40861 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40862 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40863 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40864 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40865 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40866 +4 4 4 4 4 4
40867 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40868 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40869 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40870 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40871 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40872 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40873 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40874 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40875 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40876 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40877 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40878 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40879 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40880 +4 4 4 4 4 4
40881 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40882 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40883 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40884 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40885 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40886 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40887 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40888 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40889 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40890 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40891 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40892 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40893 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40894 +4 4 4 4 4 4
40895 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40896 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40897 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40898 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40899 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40900 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40901 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40902 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40903 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40904 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40905 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40906 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40907 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40908 +4 4 4 4 4 4
40909 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40910 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40911 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40912 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40913 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
40914 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
40915 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40916 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40917 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40918 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
40919 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40920 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
40921 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40922 +4 4 4 4 4 4
40923 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40924 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40925 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40926 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40927 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
40928 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
40929 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40930 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40931 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40932 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
40933 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
40934 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
40935 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40936 +4 4 4 4 4 4
40937 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40938 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40939 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40940 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40941 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
40942 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
40943 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40944 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40945 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40946 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
40947 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
40948 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
40949 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
40950 +4 4 4 4 4 4
40951 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40952 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40953 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40954 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
40955 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
40956 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
40957 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
40958 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40959 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
40960 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
40961 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
40962 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
40963 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
40964 +4 4 4 4 4 4
40965 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40966 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40967 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40968 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
40969 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
40970 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
40971 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
40972 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40973 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
40974 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
40975 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
40976 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
40977 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
40978 +4 4 4 4 4 4
40979 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40980 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40981 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40982 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
40983 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
40984 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
40985 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
40986 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
40987 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
40988 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
40989 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
40990 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
40991 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
40992 +4 4 4 4 4 4
40993 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40994 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40995 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
40996 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
40997 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
40998 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
40999 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
41000 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
41001 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
41002 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
41003 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
41004 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
41005 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
41006 +4 4 4 4 4 4
41007 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41008 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41009 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
41010 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
41011 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
41012 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
41013 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
41014 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
41015 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
41016 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
41017 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
41018 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
41019 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
41020 +4 4 4 4 4 4
41021 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41022 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41023 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
41024 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
41025 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
41026 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
41027 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
41028 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
41029 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
41030 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
41031 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
41032 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
41033 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
41034 +4 4 4 4 4 4
41035 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41036 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41037 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
41038 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
41039 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
41040 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
41041 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
41042 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
41043 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
41044 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
41045 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
41046 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
41047 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
41048 +4 4 4 4 4 4
41049 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41050 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
41051 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
41052 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
41053 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
41054 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
41055 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
41056 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
41057 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
41058 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
41059 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
41060 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
41061 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
41062 +4 4 4 4 4 4
41063 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41064 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
41065 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
41066 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
41067 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
41068 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
41069 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
41070 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
41071 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
41072 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
41073 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
41074 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
41075 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
41076 +0 0 0 4 4 4
41077 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
41078 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
41079 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
41080 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
41081 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
41082 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
41083 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
41084 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
41085 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
41086 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
41087 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
41088 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
41089 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
41090 +2 0 0 0 0 0
41091 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
41092 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
41093 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
41094 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
41095 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
41096 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
41097 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
41098 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
41099 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
41100 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
41101 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
41102 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
41103 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
41104 +37 38 37 0 0 0
41105 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
41106 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
41107 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
41108 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
41109 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
41110 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
41111 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
41112 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
41113 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
41114 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
41115 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
41116 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
41117 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
41118 +85 115 134 4 0 0
41119 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
41120 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
41121 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
41122 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
41123 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
41124 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
41125 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
41126 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
41127 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
41128 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
41129 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
41130 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
41131 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
41132 +60 73 81 4 0 0
41133 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
41134 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
41135 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
41136 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
41137 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
41138 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
41139 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
41140 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
41141 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
41142 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
41143 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
41144 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
41145 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
41146 +16 19 21 4 0 0
41147 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
41148 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
41149 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
41150 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
41151 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
41152 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
41153 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
41154 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
41155 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
41156 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
41157 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
41158 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
41159 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
41160 +4 0 0 4 3 3
41161 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
41162 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
41163 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
41164 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
41165 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
41166 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
41167 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
41168 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
41169 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
41170 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
41171 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
41172 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
41173 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
41174 +3 2 2 4 4 4
41175 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
41176 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
41177 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
41178 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
41179 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
41180 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
41181 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
41182 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
41183 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
41184 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
41185 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
41186 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
41187 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
41188 +4 4 4 4 4 4
41189 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
41190 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
41191 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
41192 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
41193 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
41194 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
41195 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
41196 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
41197 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
41198 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
41199 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
41200 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
41201 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
41202 +4 4 4 4 4 4
41203 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
41204 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
41205 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
41206 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
41207 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
41208 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
41209 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
41210 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
41211 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
41212 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
41213 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
41214 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
41215 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
41216 +5 5 5 5 5 5
41217 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
41218 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
41219 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
41220 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
41221 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
41222 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
41223 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
41224 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
41225 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
41226 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
41227 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
41228 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
41229 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
41230 +5 5 5 4 4 4
41231 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
41232 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
41233 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
41234 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
41235 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
41236 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
41237 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
41238 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
41239 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
41240 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
41241 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
41242 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
41243 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41244 +4 4 4 4 4 4
41245 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
41246 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
41247 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
41248 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
41249 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
41250 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
41251 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
41252 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
41253 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
41254 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
41255 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
41256 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
41257 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41258 +4 4 4 4 4 4
41259 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
41260 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
41261 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
41262 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
41263 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
41264 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
41265 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
41266 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
41267 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
41268 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
41269 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
41270 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41271 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41272 +4 4 4 4 4 4
41273 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
41274 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
41275 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
41276 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
41277 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
41278 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
41279 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
41280 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
41281 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
41282 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
41283 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
41284 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41285 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41286 +4 4 4 4 4 4
41287 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
41288 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
41289 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
41290 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
41291 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
41292 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
41293 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
41294 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
41295 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
41296 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
41297 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41298 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41299 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41300 +4 4 4 4 4 4
41301 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
41302 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
41303 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
41304 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
41305 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
41306 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
41307 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
41308 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
41309 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
41310 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
41311 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
41312 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41313 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41314 +4 4 4 4 4 4
41315 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
41316 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
41317 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
41318 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
41319 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
41320 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
41321 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
41322 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
41323 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
41324 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
41325 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
41326 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41327 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41328 +4 4 4 4 4 4
41329 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
41330 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
41331 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
41332 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
41333 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
41334 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
41335 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
41336 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
41337 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
41338 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
41339 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41340 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41341 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41342 +4 4 4 4 4 4
41343 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
41344 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
41345 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
41346 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
41347 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41348 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
41349 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
41350 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
41351 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
41352 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
41353 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41354 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41355 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41356 +4 4 4 4 4 4
41357 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
41358 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
41359 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
41360 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
41361 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41362 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
41363 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
41364 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
41365 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
41366 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
41367 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41368 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41369 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41370 +4 4 4 4 4 4
41371 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
41372 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
41373 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
41374 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
41375 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41376 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
41377 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
41378 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
41379 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
41380 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41381 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41382 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41383 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41384 +4 4 4 4 4 4
41385 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
41386 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
41387 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
41388 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
41389 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
41390 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
41391 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
41392 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
41393 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41394 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41395 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41396 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41397 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41398 +4 4 4 4 4 4
41399 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
41400 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
41401 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
41402 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
41403 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41404 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
41405 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
41406 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
41407 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
41408 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41409 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41410 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41411 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41412 +4 4 4 4 4 4
41413 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
41414 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
41415 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
41416 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
41417 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
41418 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
41419 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
41420 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
41421 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41422 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41423 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41424 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41425 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41426 +4 4 4 4 4 4
41427 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
41428 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
41429 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
41430 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
41431 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
41432 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
41433 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
41434 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
41435 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
41436 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41437 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41438 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41439 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41440 +4 4 4 4 4 4
41441 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
41442 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
41443 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
41444 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
41445 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
41446 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
41447 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
41448 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
41449 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41450 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41451 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41452 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41453 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41454 +4 4 4 4 4 4
41455 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
41456 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
41457 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
41458 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
41459 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
41460 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
41461 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
41462 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
41463 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
41464 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41465 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41466 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41467 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41468 +4 4 4 4 4 4
41469 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
41470 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
41471 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
41472 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
41473 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
41474 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
41475 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
41476 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
41477 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41478 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41479 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41480 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41481 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41482 +4 4 4 4 4 4
41483 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41484 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
41485 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
41486 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
41487 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
41488 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
41489 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
41490 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
41491 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
41492 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41493 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41494 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41495 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41496 +4 4 4 4 4 4
41497 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
41498 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
41499 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
41500 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
41501 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
41502 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
41503 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41504 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
41505 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
41506 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41507 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41508 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41509 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41510 +4 4 4 4 4 4
41511 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41512 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
41513 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
41514 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
41515 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
41516 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
41517 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
41518 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
41519 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
41520 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41521 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41522 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41523 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41524 +4 4 4 4 4 4
41525 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
41526 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
41527 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
41528 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
41529 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
41530 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
41531 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
41532 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
41533 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
41534 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41535 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41536 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41537 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41538 +4 4 4 4 4 4
41539 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41540 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
41541 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
41542 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
41543 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
41544 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
41545 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
41546 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
41547 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
41548 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41549 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41550 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41551 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41552 +4 4 4 4 4 4
41553 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
41554 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
41555 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
41556 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
41557 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
41558 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
41559 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
41560 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
41561 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
41562 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41563 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41564 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41565 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41566 +4 4 4 4 4 4
41567 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41568 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
41569 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
41570 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
41571 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
41572 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
41573 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
41574 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
41575 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
41576 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41577 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41578 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41579 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41580 +4 4 4 4 4 4
41581 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
41582 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
41583 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
41584 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
41585 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
41586 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
41587 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
41588 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
41589 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
41590 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
41591 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41592 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41593 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41594 +4 4 4 4 4 4
41595 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
41596 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
41597 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
41598 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
41599 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
41600 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
41601 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
41602 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
41603 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
41604 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
41605 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41606 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41607 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41608 +4 4 4 4 4 4
41609 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
41610 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
41611 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
41612 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
41613 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
41614 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
41615 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41616 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
41617 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
41618 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
41619 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41620 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41621 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41622 +4 4 4 4 4 4
41623 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
41624 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
41625 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
41626 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
41627 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
41628 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
41629 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
41630 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
41631 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
41632 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
41633 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41634 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41635 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41636 +4 4 4 4 4 4
41637 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
41638 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
41639 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
41640 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
41641 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
41642 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
41643 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
41644 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
41645 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
41646 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
41647 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41648 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41649 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41650 +4 4 4 4 4 4
41651 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41652 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
41653 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
41654 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
41655 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
41656 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
41657 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
41658 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
41659 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
41660 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
41661 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41662 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41663 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41664 +4 4 4 4 4 4
41665 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
41666 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
41667 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
41668 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
41669 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
41670 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
41671 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
41672 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
41673 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
41674 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
41675 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41676 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41677 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41678 +4 4 4 4 4 4
41679 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
41680 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
41681 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
41682 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
41683 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
41684 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
41685 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
41686 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
41687 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
41688 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41689 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41690 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41691 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41692 +4 4 4 4 4 4
41693 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
41694 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41695 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
41696 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
41697 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
41698 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
41699 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
41700 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
41701 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
41702 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41703 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41704 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41705 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41706 +4 4 4 4 4 4
41707 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
41708 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
41709 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
41710 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
41711 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
41712 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
41713 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
41714 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
41715 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
41716 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41717 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41718 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41719 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41720 +4 4 4 4 4 4
41721 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
41722 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
41723 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
41724 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
41725 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
41726 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
41727 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
41728 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
41729 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41730 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41731 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41732 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41733 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41734 +4 4 4 4 4 4
41735 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
41736 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
41737 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
41738 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
41739 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
41740 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
41741 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
41742 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
41743 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41744 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41745 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41746 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41747 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41748 +4 4 4 4 4 4
41749 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
41750 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
41751 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
41752 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
41753 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
41754 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
41755 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
41756 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
41757 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41758 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41759 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41760 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41761 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41762 +4 4 4 4 4 4
41763 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41764 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
41765 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41766 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
41767 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
41768 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
41769 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
41770 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
41771 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41772 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41773 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41774 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41775 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41776 +4 4 4 4 4 4
41777 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41778 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
41779 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
41780 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
41781 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
41782 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
41783 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
41784 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
41785 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41786 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41787 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41788 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41789 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41790 +4 4 4 4 4 4
41791 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41792 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
41793 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
41794 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
41795 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
41796 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
41797 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
41798 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41799 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41800 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41801 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41802 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41803 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41804 +4 4 4 4 4 4
41805 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41806 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41807 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
41808 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
41809 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
41810 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
41811 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
41812 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41813 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41814 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41815 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41816 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41817 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41818 +4 4 4 4 4 4
41819 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41820 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41821 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41822 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41823 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
41824 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
41825 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
41826 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41827 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41828 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41829 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41830 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41831 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41832 +4 4 4 4 4 4
41833 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41834 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41835 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41836 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
41837 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
41838 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
41839 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
41840 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41841 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41842 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41843 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41844 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41845 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41846 +4 4 4 4 4 4
41847 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41848 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41849 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41850 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
41851 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41852 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
41853 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
41854 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41855 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41856 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41857 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41858 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41859 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41860 +4 4 4 4 4 4
41861 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41862 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41863 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41864 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
41865 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
41866 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
41867 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
41868 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41869 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41870 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41871 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41872 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41873 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41874 +4 4 4 4 4 4
41875 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41876 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41877 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41878 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41879 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
41880 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41881 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41882 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41883 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41884 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41885 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41886 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41887 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41888 +4 4 4 4 4 4
41889 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41890 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41891 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41892 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41893 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
41894 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
41895 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
41896 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41897 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41898 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41899 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41900 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41901 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41902 +4 4 4 4 4 4
41903 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41904 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41905 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41906 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41907 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
41908 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
41909 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41910 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41911 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41912 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41913 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41914 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41915 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41916 +4 4 4 4 4 4
41917 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41918 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41919 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41920 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41921 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
41922 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
41923 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41924 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41925 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41926 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41927 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41928 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41929 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41930 +4 4 4 4 4 4
41931 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41932 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41933 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41934 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41935 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
41936 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
41937 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41938 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41939 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41940 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41941 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41942 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41943 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41944 +4 4 4 4 4 4
41945 diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
41946 index a40c05e..785c583 100644
41947 --- a/drivers/video/udlfb.c
41948 +++ b/drivers/video/udlfb.c
41949 @@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
41950 dlfb_urb_completion(urb);
41951
41952 error:
41953 - atomic_add(bytes_sent, &dev->bytes_sent);
41954 - atomic_add(bytes_identical, &dev->bytes_identical);
41955 - atomic_add(width*height*2, &dev->bytes_rendered);
41956 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
41957 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
41958 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
41959 end_cycles = get_cycles();
41960 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
41961 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
41962 >> 10)), /* Kcycles */
41963 &dev->cpu_kcycles_used);
41964
41965 @@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
41966 dlfb_urb_completion(urb);
41967
41968 error:
41969 - atomic_add(bytes_sent, &dev->bytes_sent);
41970 - atomic_add(bytes_identical, &dev->bytes_identical);
41971 - atomic_add(bytes_rendered, &dev->bytes_rendered);
41972 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
41973 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
41974 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
41975 end_cycles = get_cycles();
41976 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
41977 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
41978 >> 10)), /* Kcycles */
41979 &dev->cpu_kcycles_used);
41980 }
41981 @@ -1368,7 +1368,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
41982 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41983 struct dlfb_data *dev = fb_info->par;
41984 return snprintf(buf, PAGE_SIZE, "%u\n",
41985 - atomic_read(&dev->bytes_rendered));
41986 + atomic_read_unchecked(&dev->bytes_rendered));
41987 }
41988
41989 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
41990 @@ -1376,7 +1376,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
41991 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41992 struct dlfb_data *dev = fb_info->par;
41993 return snprintf(buf, PAGE_SIZE, "%u\n",
41994 - atomic_read(&dev->bytes_identical));
41995 + atomic_read_unchecked(&dev->bytes_identical));
41996 }
41997
41998 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
41999 @@ -1384,7 +1384,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
42000 struct fb_info *fb_info = dev_get_drvdata(fbdev);
42001 struct dlfb_data *dev = fb_info->par;
42002 return snprintf(buf, PAGE_SIZE, "%u\n",
42003 - atomic_read(&dev->bytes_sent));
42004 + atomic_read_unchecked(&dev->bytes_sent));
42005 }
42006
42007 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
42008 @@ -1392,7 +1392,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
42009 struct fb_info *fb_info = dev_get_drvdata(fbdev);
42010 struct dlfb_data *dev = fb_info->par;
42011 return snprintf(buf, PAGE_SIZE, "%u\n",
42012 - atomic_read(&dev->cpu_kcycles_used));
42013 + atomic_read_unchecked(&dev->cpu_kcycles_used));
42014 }
42015
42016 static ssize_t edid_show(
42017 @@ -1449,10 +1449,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
42018 struct fb_info *fb_info = dev_get_drvdata(fbdev);
42019 struct dlfb_data *dev = fb_info->par;
42020
42021 - atomic_set(&dev->bytes_rendered, 0);
42022 - atomic_set(&dev->bytes_identical, 0);
42023 - atomic_set(&dev->bytes_sent, 0);
42024 - atomic_set(&dev->cpu_kcycles_used, 0);
42025 + atomic_set_unchecked(&dev->bytes_rendered, 0);
42026 + atomic_set_unchecked(&dev->bytes_identical, 0);
42027 + atomic_set_unchecked(&dev->bytes_sent, 0);
42028 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
42029
42030 return count;
42031 }
42032 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
42033 index 8408543..357841c 100644
42034 --- a/drivers/video/uvesafb.c
42035 +++ b/drivers/video/uvesafb.c
42036 @@ -19,6 +19,7 @@
42037 #include <linux/io.h>
42038 #include <linux/mutex.h>
42039 #include <linux/slab.h>
42040 +#include <linux/moduleloader.h>
42041 #include <video/edid.h>
42042 #include <video/uvesafb.h>
42043 #ifdef CONFIG_X86
42044 @@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
42045 NULL,
42046 };
42047
42048 - return call_usermodehelper(v86d_path, argv, envp, 1);
42049 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
42050 }
42051
42052 /*
42053 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
42054 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
42055 par->pmi_setpal = par->ypan = 0;
42056 } else {
42057 +
42058 +#ifdef CONFIG_PAX_KERNEXEC
42059 +#ifdef CONFIG_MODULES
42060 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
42061 +#endif
42062 + if (!par->pmi_code) {
42063 + par->pmi_setpal = par->ypan = 0;
42064 + return 0;
42065 + }
42066 +#endif
42067 +
42068 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
42069 + task->t.regs.edi);
42070 +
42071 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42072 + pax_open_kernel();
42073 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
42074 + pax_close_kernel();
42075 +
42076 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
42077 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
42078 +#else
42079 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
42080 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
42081 +#endif
42082 +
42083 printk(KERN_INFO "uvesafb: protected mode interface info at "
42084 "%04x:%04x\n",
42085 (u16)task->t.regs.es, (u16)task->t.regs.edi);
42086 @@ -816,13 +839,14 @@ static int __devinit uvesafb_vbe_init(struct fb_info *info)
42087 par->ypan = ypan;
42088
42089 if (par->pmi_setpal || par->ypan) {
42090 +#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
42091 if (__supported_pte_mask & _PAGE_NX) {
42092 par->pmi_setpal = par->ypan = 0;
42093 printk(KERN_WARNING "uvesafb: NX protection is actively."
42094 "We have better not to use the PMI.\n");
42095 - } else {
42096 + } else
42097 +#endif
42098 uvesafb_vbe_getpmi(task, par);
42099 - }
42100 }
42101 #else
42102 /* The protected mode interface is not available on non-x86. */
42103 @@ -1828,6 +1852,11 @@ out:
42104 if (par->vbe_modes)
42105 kfree(par->vbe_modes);
42106
42107 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42108 + if (par->pmi_code)
42109 + module_free_exec(NULL, par->pmi_code);
42110 +#endif
42111 +
42112 framebuffer_release(info);
42113 return err;
42114 }
42115 @@ -1854,6 +1883,12 @@ static int uvesafb_remove(struct platform_device *dev)
42116 kfree(par->vbe_state_orig);
42117 if (par->vbe_state_saved)
42118 kfree(par->vbe_state_saved);
42119 +
42120 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42121 + if (par->pmi_code)
42122 + module_free_exec(NULL, par->pmi_code);
42123 +#endif
42124 +
42125 }
42126
42127 framebuffer_release(info);
42128 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
42129 index 501b340..86bd4cf 100644
42130 --- a/drivers/video/vesafb.c
42131 +++ b/drivers/video/vesafb.c
42132 @@ -9,6 +9,7 @@
42133 */
42134
42135 #include <linux/module.h>
42136 +#include <linux/moduleloader.h>
42137 #include <linux/kernel.h>
42138 #include <linux/errno.h>
42139 #include <linux/string.h>
42140 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
42141 static int vram_total __initdata; /* Set total amount of memory */
42142 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
42143 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
42144 -static void (*pmi_start)(void) __read_mostly;
42145 -static void (*pmi_pal) (void) __read_mostly;
42146 +static void (*pmi_start)(void) __read_only;
42147 +static void (*pmi_pal) (void) __read_only;
42148 static int depth __read_mostly;
42149 static int vga_compat __read_mostly;
42150 /* --------------------------------------------------------------------- */
42151 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
42152 unsigned int size_vmode;
42153 unsigned int size_remap;
42154 unsigned int size_total;
42155 + void *pmi_code = NULL;
42156
42157 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
42158 return -ENODEV;
42159 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
42160 size_remap = size_total;
42161 vesafb_fix.smem_len = size_remap;
42162
42163 -#ifndef __i386__
42164 - screen_info.vesapm_seg = 0;
42165 -#endif
42166 -
42167 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
42168 printk(KERN_WARNING
42169 "vesafb: cannot reserve video memory at 0x%lx\n",
42170 @@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
42171 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
42172 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
42173
42174 +#ifdef __i386__
42175 +
42176 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42177 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
42178 + if (!pmi_code)
42179 +#elif !defined(CONFIG_PAX_KERNEXEC)
42180 + if (0)
42181 +#endif
42182 +
42183 +#endif
42184 + screen_info.vesapm_seg = 0;
42185 +
42186 if (screen_info.vesapm_seg) {
42187 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
42188 - screen_info.vesapm_seg,screen_info.vesapm_off);
42189 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
42190 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
42191 }
42192
42193 if (screen_info.vesapm_seg < 0xc000)
42194 @@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
42195
42196 if (ypan || pmi_setpal) {
42197 unsigned short *pmi_base;
42198 +
42199 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
42200 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
42201 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
42202 +
42203 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42204 + pax_open_kernel();
42205 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
42206 +#else
42207 + pmi_code = pmi_base;
42208 +#endif
42209 +
42210 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
42211 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
42212 +
42213 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42214 + pmi_start = ktva_ktla(pmi_start);
42215 + pmi_pal = ktva_ktla(pmi_pal);
42216 + pax_close_kernel();
42217 +#endif
42218 +
42219 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
42220 if (pmi_base[3]) {
42221 printk(KERN_INFO "vesafb: pmi: ports = ");
42222 @@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
42223 info->node, info->fix.id);
42224 return 0;
42225 err:
42226 +
42227 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
42228 + module_free_exec(NULL, pmi_code);
42229 +#endif
42230 +
42231 if (info->screen_base)
42232 iounmap(info->screen_base);
42233 framebuffer_release(info);
42234 diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
42235 index 88714ae..16c2e11 100644
42236 --- a/drivers/video/via/via_clock.h
42237 +++ b/drivers/video/via/via_clock.h
42238 @@ -56,7 +56,7 @@ struct via_clock {
42239
42240 void (*set_engine_pll_state)(u8 state);
42241 void (*set_engine_pll)(struct via_pll_config config);
42242 -};
42243 +} __no_const;
42244
42245
42246 static inline u32 get_pll_internal_frequency(u32 ref_freq,
42247 diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
42248 index e56c934..fc22f4b 100644
42249 --- a/drivers/xen/xen-pciback/conf_space.h
42250 +++ b/drivers/xen/xen-pciback/conf_space.h
42251 @@ -44,15 +44,15 @@ struct config_field {
42252 struct {
42253 conf_dword_write write;
42254 conf_dword_read read;
42255 - } dw;
42256 + } __no_const dw;
42257 struct {
42258 conf_word_write write;
42259 conf_word_read read;
42260 - } w;
42261 + } __no_const w;
42262 struct {
42263 conf_byte_write write;
42264 conf_byte_read read;
42265 - } b;
42266 + } __no_const b;
42267 } u;
42268 struct list_head list;
42269 };
42270 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
42271 index 014c8dd..6f3dfe6 100644
42272 --- a/fs/9p/vfs_inode.c
42273 +++ b/fs/9p/vfs_inode.c
42274 @@ -1303,7 +1303,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
42275 void
42276 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
42277 {
42278 - char *s = nd_get_link(nd);
42279 + const char *s = nd_get_link(nd);
42280
42281 p9_debug(P9_DEBUG_VFS, " %s %s\n",
42282 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
42283 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
42284 index e95d1b6..3454244 100644
42285 --- a/fs/Kconfig.binfmt
42286 +++ b/fs/Kconfig.binfmt
42287 @@ -89,7 +89,7 @@ config HAVE_AOUT
42288
42289 config BINFMT_AOUT
42290 tristate "Kernel support for a.out and ECOFF binaries"
42291 - depends on HAVE_AOUT
42292 + depends on HAVE_AOUT && BROKEN
42293 ---help---
42294 A.out (Assembler.OUTput) is a set of formats for libraries and
42295 executables used in the earliest versions of UNIX. Linux used
42296 diff --git a/fs/aio.c b/fs/aio.c
42297 index b9d64d8..86cb1d5 100644
42298 --- a/fs/aio.c
42299 +++ b/fs/aio.c
42300 @@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx *ctx)
42301 size += sizeof(struct io_event) * nr_events;
42302 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
42303
42304 - if (nr_pages < 0)
42305 + if (nr_pages <= 0)
42306 return -EINVAL;
42307
42308 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
42309 @@ -1461,22 +1461,27 @@ static ssize_t aio_fsync(struct kiocb *iocb)
42310 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
42311 {
42312 ssize_t ret;
42313 + struct iovec iovstack;
42314
42315 #ifdef CONFIG_COMPAT
42316 if (compat)
42317 ret = compat_rw_copy_check_uvector(type,
42318 (struct compat_iovec __user *)kiocb->ki_buf,
42319 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
42320 + kiocb->ki_nbytes, 1, &iovstack,
42321 &kiocb->ki_iovec, 1);
42322 else
42323 #endif
42324 ret = rw_copy_check_uvector(type,
42325 (struct iovec __user *)kiocb->ki_buf,
42326 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
42327 + kiocb->ki_nbytes, 1, &iovstack,
42328 &kiocb->ki_iovec, 1);
42329 if (ret < 0)
42330 goto out;
42331
42332 + if (kiocb->ki_iovec == &iovstack) {
42333 + kiocb->ki_inline_vec = iovstack;
42334 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
42335 + }
42336 kiocb->ki_nr_segs = kiocb->ki_nbytes;
42337 kiocb->ki_cur_seg = 0;
42338 /* ki_nbytes/left now reflect bytes instead of segs */
42339 diff --git a/fs/attr.c b/fs/attr.c
42340 index 95053ad..2cc93ca 100644
42341 --- a/fs/attr.c
42342 +++ b/fs/attr.c
42343 @@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
42344 unsigned long limit;
42345
42346 limit = rlimit(RLIMIT_FSIZE);
42347 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
42348 if (limit != RLIM_INFINITY && offset > limit)
42349 goto out_sig;
42350 if (offset > inode->i_sb->s_maxbytes)
42351 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
42352 index f624cd0..3d9a559 100644
42353 --- a/fs/autofs4/waitq.c
42354 +++ b/fs/autofs4/waitq.c
42355 @@ -61,7 +61,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
42356 {
42357 unsigned long sigpipe, flags;
42358 mm_segment_t fs;
42359 - const char *data = (const char *)addr;
42360 + const char __user *data = (const char __force_user *)addr;
42361 ssize_t wr = 0;
42362
42363 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
42364 diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
42365 index 6e6d536..457113a 100644
42366 --- a/fs/befs/linuxvfs.c
42367 +++ b/fs/befs/linuxvfs.c
42368 @@ -502,7 +502,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
42369 {
42370 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
42371 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
42372 - char *link = nd_get_link(nd);
42373 + const char *link = nd_get_link(nd);
42374 if (!IS_ERR(link))
42375 kfree(link);
42376 }
42377 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
42378 index 1ff9405..f1e376a 100644
42379 --- a/fs/binfmt_aout.c
42380 +++ b/fs/binfmt_aout.c
42381 @@ -16,6 +16,7 @@
42382 #include <linux/string.h>
42383 #include <linux/fs.h>
42384 #include <linux/file.h>
42385 +#include <linux/security.h>
42386 #include <linux/stat.h>
42387 #include <linux/fcntl.h>
42388 #include <linux/ptrace.h>
42389 @@ -86,6 +87,8 @@ static int aout_core_dump(struct coredump_params *cprm)
42390 #endif
42391 # define START_STACK(u) ((void __user *)u.start_stack)
42392
42393 + memset(&dump, 0, sizeof(dump));
42394 +
42395 fs = get_fs();
42396 set_fs(KERNEL_DS);
42397 has_dumped = 1;
42398 @@ -97,10 +100,12 @@ static int aout_core_dump(struct coredump_params *cprm)
42399
42400 /* If the size of the dump file exceeds the rlimit, then see what would happen
42401 if we wrote the stack, but not the data area. */
42402 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
42403 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
42404 dump.u_dsize = 0;
42405
42406 /* Make sure we have enough room to write the stack and data areas. */
42407 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
42408 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
42409 dump.u_ssize = 0;
42410
42411 @@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
42412 rlim = rlimit(RLIMIT_DATA);
42413 if (rlim >= RLIM_INFINITY)
42414 rlim = ~0;
42415 +
42416 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
42417 if (ex.a_data + ex.a_bss > rlim)
42418 return -ENOMEM;
42419
42420 @@ -269,6 +276,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
42421 install_exec_creds(bprm);
42422 current->flags &= ~PF_FORKNOEXEC;
42423
42424 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42425 + current->mm->pax_flags = 0UL;
42426 +#endif
42427 +
42428 +#ifdef CONFIG_PAX_PAGEEXEC
42429 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
42430 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
42431 +
42432 +#ifdef CONFIG_PAX_EMUTRAMP
42433 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
42434 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
42435 +#endif
42436 +
42437 +#ifdef CONFIG_PAX_MPROTECT
42438 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
42439 + current->mm->pax_flags |= MF_PAX_MPROTECT;
42440 +#endif
42441 +
42442 + }
42443 +#endif
42444 +
42445 if (N_MAGIC(ex) == OMAGIC) {
42446 unsigned long text_addr, map_size;
42447 loff_t pos;
42448 @@ -341,7 +369,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
42449
42450 down_write(&current->mm->mmap_sem);
42451 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
42452 - PROT_READ | PROT_WRITE | PROT_EXEC,
42453 + PROT_READ | PROT_WRITE,
42454 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
42455 fd_offset + ex.a_text);
42456 up_write(&current->mm->mmap_sem);
42457 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
42458 index 07d096c..25762af 100644
42459 --- a/fs/binfmt_elf.c
42460 +++ b/fs/binfmt_elf.c
42461 @@ -32,6 +32,7 @@
42462 #include <linux/elf.h>
42463 #include <linux/utsname.h>
42464 #include <linux/coredump.h>
42465 +#include <linux/xattr.h>
42466 #include <asm/uaccess.h>
42467 #include <asm/param.h>
42468 #include <asm/page.h>
42469 @@ -51,6 +52,10 @@ static int elf_core_dump(struct coredump_params *cprm);
42470 #define elf_core_dump NULL
42471 #endif
42472
42473 +#ifdef CONFIG_PAX_MPROTECT
42474 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
42475 +#endif
42476 +
42477 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
42478 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
42479 #else
42480 @@ -70,6 +75,11 @@ static struct linux_binfmt elf_format = {
42481 .load_binary = load_elf_binary,
42482 .load_shlib = load_elf_library,
42483 .core_dump = elf_core_dump,
42484 +
42485 +#ifdef CONFIG_PAX_MPROTECT
42486 + .handle_mprotect= elf_handle_mprotect,
42487 +#endif
42488 +
42489 .min_coredump = ELF_EXEC_PAGESIZE,
42490 };
42491
42492 @@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
42493
42494 static int set_brk(unsigned long start, unsigned long end)
42495 {
42496 + unsigned long e = end;
42497 +
42498 start = ELF_PAGEALIGN(start);
42499 end = ELF_PAGEALIGN(end);
42500 if (end > start) {
42501 @@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
42502 if (BAD_ADDR(addr))
42503 return addr;
42504 }
42505 - current->mm->start_brk = current->mm->brk = end;
42506 + current->mm->start_brk = current->mm->brk = e;
42507 return 0;
42508 }
42509
42510 @@ -148,12 +160,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
42511 elf_addr_t __user *u_rand_bytes;
42512 const char *k_platform = ELF_PLATFORM;
42513 const char *k_base_platform = ELF_BASE_PLATFORM;
42514 - unsigned char k_rand_bytes[16];
42515 + u32 k_rand_bytes[4];
42516 int items;
42517 elf_addr_t *elf_info;
42518 int ei_index = 0;
42519 const struct cred *cred = current_cred();
42520 struct vm_area_struct *vma;
42521 + unsigned long saved_auxv[AT_VECTOR_SIZE];
42522
42523 /*
42524 * In some cases (e.g. Hyper-Threading), we want to avoid L1
42525 @@ -195,8 +208,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
42526 * Generate 16 random bytes for userspace PRNG seeding.
42527 */
42528 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
42529 - u_rand_bytes = (elf_addr_t __user *)
42530 - STACK_ALLOC(p, sizeof(k_rand_bytes));
42531 + srandom32(k_rand_bytes[0] ^ random32());
42532 + srandom32(k_rand_bytes[1] ^ random32());
42533 + srandom32(k_rand_bytes[2] ^ random32());
42534 + srandom32(k_rand_bytes[3] ^ random32());
42535 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
42536 + u_rand_bytes = (elf_addr_t __user *) p;
42537 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
42538 return -EFAULT;
42539
42540 @@ -308,9 +325,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
42541 return -EFAULT;
42542 current->mm->env_end = p;
42543
42544 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
42545 +
42546 /* Put the elf_info on the stack in the right place. */
42547 sp = (elf_addr_t __user *)envp + 1;
42548 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
42549 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
42550 return -EFAULT;
42551 return 0;
42552 }
42553 @@ -381,10 +400,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
42554 {
42555 struct elf_phdr *elf_phdata;
42556 struct elf_phdr *eppnt;
42557 - unsigned long load_addr = 0;
42558 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
42559 int load_addr_set = 0;
42560 unsigned long last_bss = 0, elf_bss = 0;
42561 - unsigned long error = ~0UL;
42562 + unsigned long error = -EINVAL;
42563 unsigned long total_size;
42564 int retval, i, size;
42565
42566 @@ -430,6 +449,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
42567 goto out_close;
42568 }
42569
42570 +#ifdef CONFIG_PAX_SEGMEXEC
42571 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
42572 + pax_task_size = SEGMEXEC_TASK_SIZE;
42573 +#endif
42574 +
42575 eppnt = elf_phdata;
42576 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
42577 if (eppnt->p_type == PT_LOAD) {
42578 @@ -473,8 +497,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
42579 k = load_addr + eppnt->p_vaddr;
42580 if (BAD_ADDR(k) ||
42581 eppnt->p_filesz > eppnt->p_memsz ||
42582 - eppnt->p_memsz > TASK_SIZE ||
42583 - TASK_SIZE - eppnt->p_memsz < k) {
42584 + eppnt->p_memsz > pax_task_size ||
42585 + pax_task_size - eppnt->p_memsz < k) {
42586 error = -ENOMEM;
42587 goto out_close;
42588 }
42589 @@ -528,6 +552,351 @@ out:
42590 return error;
42591 }
42592
42593 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
42594 +static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
42595 +{
42596 + unsigned long pax_flags = 0UL;
42597 +
42598 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
42599 +
42600 +#ifdef CONFIG_PAX_PAGEEXEC
42601 + if (elf_phdata->p_flags & PF_PAGEEXEC)
42602 + pax_flags |= MF_PAX_PAGEEXEC;
42603 +#endif
42604 +
42605 +#ifdef CONFIG_PAX_SEGMEXEC
42606 + if (elf_phdata->p_flags & PF_SEGMEXEC)
42607 + pax_flags |= MF_PAX_SEGMEXEC;
42608 +#endif
42609 +
42610 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42611 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42612 + if ((__supported_pte_mask & _PAGE_NX))
42613 + pax_flags &= ~MF_PAX_SEGMEXEC;
42614 + else
42615 + pax_flags &= ~MF_PAX_PAGEEXEC;
42616 + }
42617 +#endif
42618 +
42619 +#ifdef CONFIG_PAX_EMUTRAMP
42620 + if (elf_phdata->p_flags & PF_EMUTRAMP)
42621 + pax_flags |= MF_PAX_EMUTRAMP;
42622 +#endif
42623 +
42624 +#ifdef CONFIG_PAX_MPROTECT
42625 + if (elf_phdata->p_flags & PF_MPROTECT)
42626 + pax_flags |= MF_PAX_MPROTECT;
42627 +#endif
42628 +
42629 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42630 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
42631 + pax_flags |= MF_PAX_RANDMMAP;
42632 +#endif
42633 +
42634 +#endif
42635 +
42636 + return pax_flags;
42637 +}
42638 +
42639 +static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
42640 +{
42641 + unsigned long pax_flags = 0UL;
42642 +
42643 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
42644 +
42645 +#ifdef CONFIG_PAX_PAGEEXEC
42646 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
42647 + pax_flags |= MF_PAX_PAGEEXEC;
42648 +#endif
42649 +
42650 +#ifdef CONFIG_PAX_SEGMEXEC
42651 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
42652 + pax_flags |= MF_PAX_SEGMEXEC;
42653 +#endif
42654 +
42655 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42656 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42657 + if ((__supported_pte_mask & _PAGE_NX))
42658 + pax_flags &= ~MF_PAX_SEGMEXEC;
42659 + else
42660 + pax_flags &= ~MF_PAX_PAGEEXEC;
42661 + }
42662 +#endif
42663 +
42664 +#ifdef CONFIG_PAX_EMUTRAMP
42665 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
42666 + pax_flags |= MF_PAX_EMUTRAMP;
42667 +#endif
42668 +
42669 +#ifdef CONFIG_PAX_MPROTECT
42670 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
42671 + pax_flags |= MF_PAX_MPROTECT;
42672 +#endif
42673 +
42674 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42675 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
42676 + pax_flags |= MF_PAX_RANDMMAP;
42677 +#endif
42678 +
42679 +#endif
42680 +
42681 + return pax_flags;
42682 +}
42683 +
42684 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
42685 +{
42686 + unsigned long pax_flags = 0UL;
42687 +
42688 +#ifdef CONFIG_PAX_EI_PAX
42689 +
42690 +#ifdef CONFIG_PAX_PAGEEXEC
42691 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
42692 + pax_flags |= MF_PAX_PAGEEXEC;
42693 +#endif
42694 +
42695 +#ifdef CONFIG_PAX_SEGMEXEC
42696 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
42697 + pax_flags |= MF_PAX_SEGMEXEC;
42698 +#endif
42699 +
42700 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42701 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42702 + if ((__supported_pte_mask & _PAGE_NX))
42703 + pax_flags &= ~MF_PAX_SEGMEXEC;
42704 + else
42705 + pax_flags &= ~MF_PAX_PAGEEXEC;
42706 + }
42707 +#endif
42708 +
42709 +#ifdef CONFIG_PAX_EMUTRAMP
42710 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
42711 + pax_flags |= MF_PAX_EMUTRAMP;
42712 +#endif
42713 +
42714 +#ifdef CONFIG_PAX_MPROTECT
42715 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
42716 + pax_flags |= MF_PAX_MPROTECT;
42717 +#endif
42718 +
42719 +#ifdef CONFIG_PAX_ASLR
42720 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
42721 + pax_flags |= MF_PAX_RANDMMAP;
42722 +#endif
42723 +
42724 +#else
42725 +
42726 +#ifdef CONFIG_PAX_PAGEEXEC
42727 + pax_flags |= MF_PAX_PAGEEXEC;
42728 +#endif
42729 +
42730 +#ifdef CONFIG_PAX_MPROTECT
42731 + pax_flags |= MF_PAX_MPROTECT;
42732 +#endif
42733 +
42734 +#ifdef CONFIG_PAX_RANDMMAP
42735 + pax_flags |= MF_PAX_RANDMMAP;
42736 +#endif
42737 +
42738 +#ifdef CONFIG_PAX_SEGMEXEC
42739 + if (!(pax_flags & MF_PAX_PAGEEXEC) || !(__supported_pte_mask & _PAGE_NX)) {
42740 + pax_flags &= ~MF_PAX_PAGEEXEC;
42741 + pax_flags |= MF_PAX_SEGMEXEC;
42742 + }
42743 +#endif
42744 +
42745 +#endif
42746 +
42747 + return pax_flags;
42748 +}
42749 +
42750 +static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
42751 +{
42752 +
42753 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
42754 + unsigned long i;
42755 +
42756 + for (i = 0UL; i < elf_ex->e_phnum; i++)
42757 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
42758 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
42759 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
42760 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
42761 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
42762 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
42763 + return ~0UL;
42764 +
42765 +#ifdef CONFIG_PAX_SOFTMODE
42766 + if (pax_softmode)
42767 + return pax_parse_pt_pax_softmode(&elf_phdata[i]);
42768 + else
42769 +#endif
42770 +
42771 + return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
42772 + break;
42773 + }
42774 +#endif
42775 +
42776 + return ~0UL;
42777 +}
42778 +
42779 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
42780 +static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
42781 +{
42782 + unsigned long pax_flags = 0UL;
42783 +
42784 +#ifdef CONFIG_PAX_PAGEEXEC
42785 + if (pax_flags_softmode & MF_PAX_PAGEEXEC)
42786 + pax_flags |= MF_PAX_PAGEEXEC;
42787 +#endif
42788 +
42789 +#ifdef CONFIG_PAX_SEGMEXEC
42790 + if (pax_flags_softmode & MF_PAX_SEGMEXEC)
42791 + pax_flags |= MF_PAX_SEGMEXEC;
42792 +#endif
42793 +
42794 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42795 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42796 + if ((__supported_pte_mask & _PAGE_NX))
42797 + pax_flags &= ~MF_PAX_SEGMEXEC;
42798 + else
42799 + pax_flags &= ~MF_PAX_PAGEEXEC;
42800 + }
42801 +#endif
42802 +
42803 +#ifdef CONFIG_PAX_EMUTRAMP
42804 + if (pax_flags_softmode & MF_PAX_EMUTRAMP)
42805 + pax_flags |= MF_PAX_EMUTRAMP;
42806 +#endif
42807 +
42808 +#ifdef CONFIG_PAX_MPROTECT
42809 + if (pax_flags_softmode & MF_PAX_MPROTECT)
42810 + pax_flags |= MF_PAX_MPROTECT;
42811 +#endif
42812 +
42813 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42814 + if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
42815 + pax_flags |= MF_PAX_RANDMMAP;
42816 +#endif
42817 +
42818 + return pax_flags;
42819 +}
42820 +
42821 +static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
42822 +{
42823 + unsigned long pax_flags = 0UL;
42824 +
42825 +#ifdef CONFIG_PAX_PAGEEXEC
42826 + if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
42827 + pax_flags |= MF_PAX_PAGEEXEC;
42828 +#endif
42829 +
42830 +#ifdef CONFIG_PAX_SEGMEXEC
42831 + if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
42832 + pax_flags |= MF_PAX_SEGMEXEC;
42833 +#endif
42834 +
42835 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42836 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42837 + if ((__supported_pte_mask & _PAGE_NX))
42838 + pax_flags &= ~MF_PAX_SEGMEXEC;
42839 + else
42840 + pax_flags &= ~MF_PAX_PAGEEXEC;
42841 + }
42842 +#endif
42843 +
42844 +#ifdef CONFIG_PAX_EMUTRAMP
42845 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
42846 + pax_flags |= MF_PAX_EMUTRAMP;
42847 +#endif
42848 +
42849 +#ifdef CONFIG_PAX_MPROTECT
42850 + if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
42851 + pax_flags |= MF_PAX_MPROTECT;
42852 +#endif
42853 +
42854 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42855 + if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
42856 + pax_flags |= MF_PAX_RANDMMAP;
42857 +#endif
42858 +
42859 + return pax_flags;
42860 +}
42861 +#endif
42862 +
42863 +static unsigned long pax_parse_xattr_pax(struct file * const file)
42864 +{
42865 +
42866 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
42867 + ssize_t xattr_size, i;
42868 + unsigned char xattr_value[5];
42869 + unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
42870 +
42871 + xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
42872 + if (xattr_size <= 0)
42873 + return ~0UL;
42874 +
42875 + for (i = 0; i < xattr_size; i++)
42876 + switch (xattr_value[i]) {
42877 + default:
42878 + return ~0UL;
42879 +
42880 +#define parse_flag(option1, option2, flag) \
42881 + case option1: \
42882 + pax_flags_hardmode |= MF_PAX_##flag; \
42883 + break; \
42884 + case option2: \
42885 + pax_flags_softmode |= MF_PAX_##flag; \
42886 + break;
42887 +
42888 + parse_flag('p', 'P', PAGEEXEC);
42889 + parse_flag('e', 'E', EMUTRAMP);
42890 + parse_flag('m', 'M', MPROTECT);
42891 + parse_flag('r', 'R', RANDMMAP);
42892 + parse_flag('s', 'S', SEGMEXEC);
42893 +
42894 +#undef parse_flag
42895 + }
42896 +
42897 + if (pax_flags_hardmode & pax_flags_softmode)
42898 + return ~0UL;
42899 +
42900 +#ifdef CONFIG_PAX_SOFTMODE
42901 + if (pax_softmode)
42902 + return pax_parse_xattr_pax_softmode(pax_flags_softmode);
42903 + else
42904 +#endif
42905 +
42906 + return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
42907 +#else
42908 + return ~0UL;
42909 +#endif
42910 +
42911 +}
42912 +
42913 +static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
42914 +{
42915 + unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
42916 +
42917 + pax_flags = pax_parse_ei_pax(elf_ex);
42918 + pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
42919 + xattr_pax_flags = pax_parse_xattr_pax(file);
42920 +
42921 + if (pt_pax_flags == ~0UL)
42922 + pt_pax_flags = xattr_pax_flags;
42923 + else if (xattr_pax_flags == ~0UL)
42924 + xattr_pax_flags = pt_pax_flags;
42925 + if (pt_pax_flags != xattr_pax_flags)
42926 + return -EINVAL;
42927 + if (pt_pax_flags != ~0UL)
42928 + pax_flags = pt_pax_flags;
42929 +
42930 + if (0 > pax_check_flags(&pax_flags))
42931 + return -EINVAL;
42932 +
42933 + current->mm->pax_flags = pax_flags;
42934 + return 0;
42935 +}
42936 +#endif
42937 +
42938 /*
42939 * These are the functions used to load ELF style executables and shared
42940 * libraries. There is no binary dependent code anywhere else.
42941 @@ -544,6 +913,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
42942 {
42943 unsigned int random_variable = 0;
42944
42945 +#ifdef CONFIG_PAX_RANDUSTACK
42946 + if (randomize_va_space)
42947 + return stack_top - current->mm->delta_stack;
42948 +#endif
42949 +
42950 if ((current->flags & PF_RANDOMIZE) &&
42951 !(current->personality & ADDR_NO_RANDOMIZE)) {
42952 random_variable = get_random_int() & STACK_RND_MASK;
42953 @@ -562,7 +936,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42954 unsigned long load_addr = 0, load_bias = 0;
42955 int load_addr_set = 0;
42956 char * elf_interpreter = NULL;
42957 - unsigned long error;
42958 + unsigned long error = 0;
42959 struct elf_phdr *elf_ppnt, *elf_phdata;
42960 unsigned long elf_bss, elf_brk;
42961 int retval, i;
42962 @@ -572,11 +946,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42963 unsigned long start_code, end_code, start_data, end_data;
42964 unsigned long reloc_func_desc __maybe_unused = 0;
42965 int executable_stack = EXSTACK_DEFAULT;
42966 - unsigned long def_flags = 0;
42967 struct {
42968 struct elfhdr elf_ex;
42969 struct elfhdr interp_elf_ex;
42970 } *loc;
42971 + unsigned long pax_task_size = TASK_SIZE;
42972
42973 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
42974 if (!loc) {
42975 @@ -713,11 +1087,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42976
42977 /* OK, This is the point of no return */
42978 current->flags &= ~PF_FORKNOEXEC;
42979 - current->mm->def_flags = def_flags;
42980 +
42981 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42982 + current->mm->pax_flags = 0UL;
42983 +#endif
42984 +
42985 +#ifdef CONFIG_PAX_DLRESOLVE
42986 + current->mm->call_dl_resolve = 0UL;
42987 +#endif
42988 +
42989 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
42990 + current->mm->call_syscall = 0UL;
42991 +#endif
42992 +
42993 +#ifdef CONFIG_PAX_ASLR
42994 + current->mm->delta_mmap = 0UL;
42995 + current->mm->delta_stack = 0UL;
42996 +#endif
42997 +
42998 + current->mm->def_flags = 0;
42999 +
43000 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
43001 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
43002 + send_sig(SIGKILL, current, 0);
43003 + goto out_free_dentry;
43004 + }
43005 +#endif
43006 +
43007 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
43008 + pax_set_initial_flags(bprm);
43009 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
43010 + if (pax_set_initial_flags_func)
43011 + (pax_set_initial_flags_func)(bprm);
43012 +#endif
43013 +
43014 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
43015 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
43016 + current->mm->context.user_cs_limit = PAGE_SIZE;
43017 + current->mm->def_flags |= VM_PAGEEXEC;
43018 + }
43019 +#endif
43020 +
43021 +#ifdef CONFIG_PAX_SEGMEXEC
43022 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
43023 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
43024 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
43025 + pax_task_size = SEGMEXEC_TASK_SIZE;
43026 + current->mm->def_flags |= VM_NOHUGEPAGE;
43027 + }
43028 +#endif
43029 +
43030 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
43031 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43032 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
43033 + put_cpu();
43034 + }
43035 +#endif
43036
43037 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
43038 may depend on the personality. */
43039 SET_PERSONALITY(loc->elf_ex);
43040 +
43041 +#ifdef CONFIG_PAX_ASLR
43042 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
43043 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
43044 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
43045 + }
43046 +#endif
43047 +
43048 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43049 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43050 + executable_stack = EXSTACK_DISABLE_X;
43051 + current->personality &= ~READ_IMPLIES_EXEC;
43052 + } else
43053 +#endif
43054 +
43055 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
43056 current->personality |= READ_IMPLIES_EXEC;
43057
43058 @@ -808,6 +1252,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
43059 #else
43060 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
43061 #endif
43062 +
43063 +#ifdef CONFIG_PAX_RANDMMAP
43064 + /* PaX: randomize base address at the default exe base if requested */
43065 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
43066 +#ifdef CONFIG_SPARC64
43067 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
43068 +#else
43069 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
43070 +#endif
43071 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
43072 + elf_flags |= MAP_FIXED;
43073 + }
43074 +#endif
43075 +
43076 }
43077
43078 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
43079 @@ -840,9 +1298,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
43080 * allowed task size. Note that p_filesz must always be
43081 * <= p_memsz so it is only necessary to check p_memsz.
43082 */
43083 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
43084 - elf_ppnt->p_memsz > TASK_SIZE ||
43085 - TASK_SIZE - elf_ppnt->p_memsz < k) {
43086 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
43087 + elf_ppnt->p_memsz > pax_task_size ||
43088 + pax_task_size - elf_ppnt->p_memsz < k) {
43089 /* set_brk can never work. Avoid overflows. */
43090 send_sig(SIGKILL, current, 0);
43091 retval = -EINVAL;
43092 @@ -881,11 +1339,40 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
43093 goto out_free_dentry;
43094 }
43095 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
43096 - send_sig(SIGSEGV, current, 0);
43097 - retval = -EFAULT; /* Nobody gets to see this, but.. */
43098 - goto out_free_dentry;
43099 + /*
43100 + * This bss-zeroing can fail if the ELF
43101 + * file specifies odd protections. So
43102 + * we don't check the return value
43103 + */
43104 }
43105
43106 +#ifdef CONFIG_PAX_RANDMMAP
43107 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
43108 + unsigned long start, size;
43109 +
43110 + start = ELF_PAGEALIGN(elf_brk);
43111 + size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
43112 + down_write(&current->mm->mmap_sem);
43113 + retval = -ENOMEM;
43114 + if (!find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
43115 + unsigned long prot = PROT_NONE;
43116 +
43117 + current->mm->brk_gap = PAGE_ALIGN(size) >> PAGE_SHIFT;
43118 +// if (current->personality & ADDR_NO_RANDOMIZE)
43119 +// prot = PROT_READ;
43120 + start = do_mmap(NULL, start, size, prot, MAP_ANONYMOUS | MAP_FIXED | MAP_PRIVATE, 0);
43121 + retval = IS_ERR_VALUE(start) ? start : 0;
43122 + }
43123 + up_write(&current->mm->mmap_sem);
43124 + if (retval == 0)
43125 + retval = set_brk(start + size, start + size + PAGE_SIZE);
43126 + if (retval < 0) {
43127 + send_sig(SIGKILL, current, 0);
43128 + goto out_free_dentry;
43129 + }
43130 + }
43131 +#endif
43132 +
43133 if (elf_interpreter) {
43134 unsigned long uninitialized_var(interp_map_addr);
43135
43136 @@ -1098,7 +1585,7 @@ out:
43137 * Decide what to dump of a segment, part, all or none.
43138 */
43139 static unsigned long vma_dump_size(struct vm_area_struct *vma,
43140 - unsigned long mm_flags)
43141 + unsigned long mm_flags, long signr)
43142 {
43143 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
43144
43145 @@ -1132,7 +1619,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
43146 if (vma->vm_file == NULL)
43147 return 0;
43148
43149 - if (FILTER(MAPPED_PRIVATE))
43150 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
43151 goto whole;
43152
43153 /*
43154 @@ -1354,9 +1841,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
43155 {
43156 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
43157 int i = 0;
43158 - do
43159 + do {
43160 i += 2;
43161 - while (auxv[i - 2] != AT_NULL);
43162 + } while (auxv[i - 2] != AT_NULL);
43163 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
43164 }
43165
43166 @@ -1862,14 +2349,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
43167 }
43168
43169 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
43170 - unsigned long mm_flags)
43171 + struct coredump_params *cprm)
43172 {
43173 struct vm_area_struct *vma;
43174 size_t size = 0;
43175
43176 for (vma = first_vma(current, gate_vma); vma != NULL;
43177 vma = next_vma(vma, gate_vma))
43178 - size += vma_dump_size(vma, mm_flags);
43179 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
43180 return size;
43181 }
43182
43183 @@ -1963,7 +2450,7 @@ static int elf_core_dump(struct coredump_params *cprm)
43184
43185 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
43186
43187 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
43188 + offset += elf_core_vma_data_size(gate_vma, cprm);
43189 offset += elf_core_extra_data_size();
43190 e_shoff = offset;
43191
43192 @@ -1977,10 +2464,12 @@ static int elf_core_dump(struct coredump_params *cprm)
43193 offset = dataoff;
43194
43195 size += sizeof(*elf);
43196 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
43197 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
43198 goto end_coredump;
43199
43200 size += sizeof(*phdr4note);
43201 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
43202 if (size > cprm->limit
43203 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
43204 goto end_coredump;
43205 @@ -1994,7 +2483,7 @@ static int elf_core_dump(struct coredump_params *cprm)
43206 phdr.p_offset = offset;
43207 phdr.p_vaddr = vma->vm_start;
43208 phdr.p_paddr = 0;
43209 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
43210 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
43211 phdr.p_memsz = vma->vm_end - vma->vm_start;
43212 offset += phdr.p_filesz;
43213 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
43214 @@ -2005,6 +2494,7 @@ static int elf_core_dump(struct coredump_params *cprm)
43215 phdr.p_align = ELF_EXEC_PAGESIZE;
43216
43217 size += sizeof(phdr);
43218 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
43219 if (size > cprm->limit
43220 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
43221 goto end_coredump;
43222 @@ -2029,7 +2519,7 @@ static int elf_core_dump(struct coredump_params *cprm)
43223 unsigned long addr;
43224 unsigned long end;
43225
43226 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
43227 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
43228
43229 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
43230 struct page *page;
43231 @@ -2038,6 +2528,7 @@ static int elf_core_dump(struct coredump_params *cprm)
43232 page = get_dump_page(addr);
43233 if (page) {
43234 void *kaddr = kmap(page);
43235 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
43236 stop = ((size += PAGE_SIZE) > cprm->limit) ||
43237 !dump_write(cprm->file, kaddr,
43238 PAGE_SIZE);
43239 @@ -2055,6 +2546,7 @@ static int elf_core_dump(struct coredump_params *cprm)
43240
43241 if (e_phnum == PN_XNUM) {
43242 size += sizeof(*shdr4extnum);
43243 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
43244 if (size > cprm->limit
43245 || !dump_write(cprm->file, shdr4extnum,
43246 sizeof(*shdr4extnum)))
43247 @@ -2075,6 +2567,97 @@ out:
43248
43249 #endif /* CONFIG_ELF_CORE */
43250
43251 +#ifdef CONFIG_PAX_MPROTECT
43252 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
43253 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
43254 + * we'll remove VM_MAYWRITE for good on RELRO segments.
43255 + *
43256 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
43257 + * basis because we want to allow the common case and not the special ones.
43258 + */
43259 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
43260 +{
43261 + struct elfhdr elf_h;
43262 + struct elf_phdr elf_p;
43263 + unsigned long i;
43264 + unsigned long oldflags;
43265 + bool is_textrel_rw, is_textrel_rx, is_relro;
43266 +
43267 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
43268 + return;
43269 +
43270 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
43271 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
43272 +
43273 +#ifdef CONFIG_PAX_ELFRELOCS
43274 + /* possible TEXTREL */
43275 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
43276 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
43277 +#else
43278 + is_textrel_rw = false;
43279 + is_textrel_rx = false;
43280 +#endif
43281 +
43282 + /* possible RELRO */
43283 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
43284 +
43285 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
43286 + return;
43287 +
43288 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
43289 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
43290 +
43291 +#ifdef CONFIG_PAX_ETEXECRELOCS
43292 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
43293 +#else
43294 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
43295 +#endif
43296 +
43297 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
43298 + !elf_check_arch(&elf_h) ||
43299 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
43300 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
43301 + return;
43302 +
43303 + for (i = 0UL; i < elf_h.e_phnum; i++) {
43304 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
43305 + return;
43306 + switch (elf_p.p_type) {
43307 + case PT_DYNAMIC:
43308 + if (!is_textrel_rw && !is_textrel_rx)
43309 + continue;
43310 + i = 0UL;
43311 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
43312 + elf_dyn dyn;
43313 +
43314 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
43315 + return;
43316 + if (dyn.d_tag == DT_NULL)
43317 + return;
43318 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
43319 + gr_log_textrel(vma);
43320 + if (is_textrel_rw)
43321 + vma->vm_flags |= VM_MAYWRITE;
43322 + else
43323 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
43324 + vma->vm_flags &= ~VM_MAYWRITE;
43325 + return;
43326 + }
43327 + i++;
43328 + }
43329 + return;
43330 +
43331 + case PT_GNU_RELRO:
43332 + if (!is_relro)
43333 + continue;
43334 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
43335 + vma->vm_flags &= ~VM_MAYWRITE;
43336 + return;
43337 + }
43338 + }
43339 +}
43340 +#endif
43341 +
43342 static int __init init_elf_binfmt(void)
43343 {
43344 return register_binfmt(&elf_format);
43345 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
43346 index 1bffbe0..c8c283e 100644
43347 --- a/fs/binfmt_flat.c
43348 +++ b/fs/binfmt_flat.c
43349 @@ -567,7 +567,9 @@ static int load_flat_file(struct linux_binprm * bprm,
43350 realdatastart = (unsigned long) -ENOMEM;
43351 printk("Unable to allocate RAM for process data, errno %d\n",
43352 (int)-realdatastart);
43353 + down_write(&current->mm->mmap_sem);
43354 do_munmap(current->mm, textpos, text_len);
43355 + up_write(&current->mm->mmap_sem);
43356 ret = realdatastart;
43357 goto err;
43358 }
43359 @@ -591,8 +593,10 @@ static int load_flat_file(struct linux_binprm * bprm,
43360 }
43361 if (IS_ERR_VALUE(result)) {
43362 printk("Unable to read data+bss, errno %d\n", (int)-result);
43363 + down_write(&current->mm->mmap_sem);
43364 do_munmap(current->mm, textpos, text_len);
43365 do_munmap(current->mm, realdatastart, len);
43366 + up_write(&current->mm->mmap_sem);
43367 ret = result;
43368 goto err;
43369 }
43370 @@ -661,8 +665,10 @@ static int load_flat_file(struct linux_binprm * bprm,
43371 }
43372 if (IS_ERR_VALUE(result)) {
43373 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
43374 + down_write(&current->mm->mmap_sem);
43375 do_munmap(current->mm, textpos, text_len + data_len + extra +
43376 MAX_SHARED_LIBS * sizeof(unsigned long));
43377 + up_write(&current->mm->mmap_sem);
43378 ret = result;
43379 goto err;
43380 }
43381 diff --git a/fs/bio.c b/fs/bio.c
43382 index b980ecd..74800bf 100644
43383 --- a/fs/bio.c
43384 +++ b/fs/bio.c
43385 @@ -833,7 +833,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
43386 /*
43387 * Overflow, abort
43388 */
43389 - if (end < start)
43390 + if (end < start || end - start > INT_MAX - nr_pages)
43391 return ERR_PTR(-EINVAL);
43392
43393 nr_pages += end - start;
43394 @@ -1229,7 +1229,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
43395 const int read = bio_data_dir(bio) == READ;
43396 struct bio_map_data *bmd = bio->bi_private;
43397 int i;
43398 - char *p = bmd->sgvecs[0].iov_base;
43399 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
43400
43401 __bio_for_each_segment(bvec, bio, i, 0) {
43402 char *addr = page_address(bvec->bv_page);
43403 diff --git a/fs/block_dev.c b/fs/block_dev.c
43404 index 5e9f198..6bf9b1c 100644
43405 --- a/fs/block_dev.c
43406 +++ b/fs/block_dev.c
43407 @@ -703,7 +703,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
43408 else if (bdev->bd_contains == bdev)
43409 return true; /* is a whole device which isn't held */
43410
43411 - else if (whole->bd_holder == bd_may_claim)
43412 + else if (whole->bd_holder == (void *)bd_may_claim)
43413 return true; /* is a partition of a device that is being partitioned */
43414 else if (whole->bd_holder != NULL)
43415 return false; /* is a partition of a held device */
43416 diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
43417 index d986824..af1befd 100644
43418 --- a/fs/btrfs/check-integrity.c
43419 +++ b/fs/btrfs/check-integrity.c
43420 @@ -157,7 +157,7 @@ struct btrfsic_block {
43421 union {
43422 bio_end_io_t *bio;
43423 bh_end_io_t *bh;
43424 - } orig_bio_bh_end_io;
43425 + } __no_const orig_bio_bh_end_io;
43426 int submit_bio_bh_rw;
43427 u64 flush_gen; /* only valid if !never_written */
43428 };
43429 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
43430 index 0639a55..7d9e07f 100644
43431 --- a/fs/btrfs/ctree.c
43432 +++ b/fs/btrfs/ctree.c
43433 @@ -488,9 +488,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
43434 free_extent_buffer(buf);
43435 add_root_to_dirty_list(root);
43436 } else {
43437 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
43438 - parent_start = parent->start;
43439 - else
43440 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
43441 + if (parent)
43442 + parent_start = parent->start;
43443 + else
43444 + parent_start = 0;
43445 + } else
43446 parent_start = 0;
43447
43448 WARN_ON(trans->transid != btrfs_header_generation(parent));
43449 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
43450 index 892b347..b3db246 100644
43451 --- a/fs/btrfs/inode.c
43452 +++ b/fs/btrfs/inode.c
43453 @@ -6930,7 +6930,7 @@ fail:
43454 return -ENOMEM;
43455 }
43456
43457 -static int btrfs_getattr(struct vfsmount *mnt,
43458 +int btrfs_getattr(struct vfsmount *mnt,
43459 struct dentry *dentry, struct kstat *stat)
43460 {
43461 struct inode *inode = dentry->d_inode;
43462 @@ -6944,6 +6944,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
43463 return 0;
43464 }
43465
43466 +EXPORT_SYMBOL(btrfs_getattr);
43467 +
43468 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
43469 +{
43470 + return BTRFS_I(inode)->root->anon_dev;
43471 +}
43472 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
43473 +
43474 /*
43475 * If a file is moved, it will inherit the cow and compression flags of the new
43476 * directory.
43477 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
43478 index 1b36f19..5ac7360 100644
43479 --- a/fs/btrfs/ioctl.c
43480 +++ b/fs/btrfs/ioctl.c
43481 @@ -2783,9 +2783,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
43482 for (i = 0; i < num_types; i++) {
43483 struct btrfs_space_info *tmp;
43484
43485 + /* Don't copy in more than we allocated */
43486 if (!slot_count)
43487 break;
43488
43489 + slot_count--;
43490 +
43491 info = NULL;
43492 rcu_read_lock();
43493 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
43494 @@ -2807,15 +2810,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
43495 memcpy(dest, &space, sizeof(space));
43496 dest++;
43497 space_args.total_spaces++;
43498 - slot_count--;
43499 }
43500 - if (!slot_count)
43501 - break;
43502 }
43503 up_read(&info->groups_sem);
43504 }
43505
43506 - user_dest = (struct btrfs_ioctl_space_info *)
43507 + user_dest = (struct btrfs_ioctl_space_info __user *)
43508 (arg + sizeof(struct btrfs_ioctl_space_args));
43509
43510 if (copy_to_user(user_dest, dest_orig, alloc_size))
43511 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
43512 index 8c1aae2..1e46446 100644
43513 --- a/fs/btrfs/relocation.c
43514 +++ b/fs/btrfs/relocation.c
43515 @@ -1244,7 +1244,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
43516 }
43517 spin_unlock(&rc->reloc_root_tree.lock);
43518
43519 - BUG_ON((struct btrfs_root *)node->data != root);
43520 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
43521
43522 if (!del) {
43523 spin_lock(&rc->reloc_root_tree.lock);
43524 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
43525 index 622f469..e8d2d55 100644
43526 --- a/fs/cachefiles/bind.c
43527 +++ b/fs/cachefiles/bind.c
43528 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
43529 args);
43530
43531 /* start by checking things over */
43532 - ASSERT(cache->fstop_percent >= 0 &&
43533 - cache->fstop_percent < cache->fcull_percent &&
43534 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
43535 cache->fcull_percent < cache->frun_percent &&
43536 cache->frun_percent < 100);
43537
43538 - ASSERT(cache->bstop_percent >= 0 &&
43539 - cache->bstop_percent < cache->bcull_percent &&
43540 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
43541 cache->bcull_percent < cache->brun_percent &&
43542 cache->brun_percent < 100);
43543
43544 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
43545 index 0a1467b..6a53245 100644
43546 --- a/fs/cachefiles/daemon.c
43547 +++ b/fs/cachefiles/daemon.c
43548 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
43549 if (n > buflen)
43550 return -EMSGSIZE;
43551
43552 - if (copy_to_user(_buffer, buffer, n) != 0)
43553 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
43554 return -EFAULT;
43555
43556 return n;
43557 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
43558 if (test_bit(CACHEFILES_DEAD, &cache->flags))
43559 return -EIO;
43560
43561 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
43562 + if (datalen > PAGE_SIZE - 1)
43563 return -EOPNOTSUPP;
43564
43565 /* drag the command string into the kernel so we can parse it */
43566 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
43567 if (args[0] != '%' || args[1] != '\0')
43568 return -EINVAL;
43569
43570 - if (fstop < 0 || fstop >= cache->fcull_percent)
43571 + if (fstop >= cache->fcull_percent)
43572 return cachefiles_daemon_range_error(cache, args);
43573
43574 cache->fstop_percent = fstop;
43575 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
43576 if (args[0] != '%' || args[1] != '\0')
43577 return -EINVAL;
43578
43579 - if (bstop < 0 || bstop >= cache->bcull_percent)
43580 + if (bstop >= cache->bcull_percent)
43581 return cachefiles_daemon_range_error(cache, args);
43582
43583 cache->bstop_percent = bstop;
43584 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
43585 index bd6bc1b..b627b53 100644
43586 --- a/fs/cachefiles/internal.h
43587 +++ b/fs/cachefiles/internal.h
43588 @@ -57,7 +57,7 @@ struct cachefiles_cache {
43589 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
43590 struct rb_root active_nodes; /* active nodes (can't be culled) */
43591 rwlock_t active_lock; /* lock for active_nodes */
43592 - atomic_t gravecounter; /* graveyard uniquifier */
43593 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
43594 unsigned frun_percent; /* when to stop culling (% files) */
43595 unsigned fcull_percent; /* when to start culling (% files) */
43596 unsigned fstop_percent; /* when to stop allocating (% files) */
43597 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
43598 * proc.c
43599 */
43600 #ifdef CONFIG_CACHEFILES_HISTOGRAM
43601 -extern atomic_t cachefiles_lookup_histogram[HZ];
43602 -extern atomic_t cachefiles_mkdir_histogram[HZ];
43603 -extern atomic_t cachefiles_create_histogram[HZ];
43604 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
43605 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
43606 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
43607
43608 extern int __init cachefiles_proc_init(void);
43609 extern void cachefiles_proc_cleanup(void);
43610 static inline
43611 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
43612 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
43613 {
43614 unsigned long jif = jiffies - start_jif;
43615 if (jif >= HZ)
43616 jif = HZ - 1;
43617 - atomic_inc(&histogram[jif]);
43618 + atomic_inc_unchecked(&histogram[jif]);
43619 }
43620
43621 #else
43622 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
43623 index a0358c2..d6137f2 100644
43624 --- a/fs/cachefiles/namei.c
43625 +++ b/fs/cachefiles/namei.c
43626 @@ -318,7 +318,7 @@ try_again:
43627 /* first step is to make up a grave dentry in the graveyard */
43628 sprintf(nbuffer, "%08x%08x",
43629 (uint32_t) get_seconds(),
43630 - (uint32_t) atomic_inc_return(&cache->gravecounter));
43631 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
43632
43633 /* do the multiway lock magic */
43634 trap = lock_rename(cache->graveyard, dir);
43635 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
43636 index eccd339..4c1d995 100644
43637 --- a/fs/cachefiles/proc.c
43638 +++ b/fs/cachefiles/proc.c
43639 @@ -14,9 +14,9 @@
43640 #include <linux/seq_file.h>
43641 #include "internal.h"
43642
43643 -atomic_t cachefiles_lookup_histogram[HZ];
43644 -atomic_t cachefiles_mkdir_histogram[HZ];
43645 -atomic_t cachefiles_create_histogram[HZ];
43646 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
43647 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
43648 +atomic_unchecked_t cachefiles_create_histogram[HZ];
43649
43650 /*
43651 * display the latency histogram
43652 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
43653 return 0;
43654 default:
43655 index = (unsigned long) v - 3;
43656 - x = atomic_read(&cachefiles_lookup_histogram[index]);
43657 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
43658 - z = atomic_read(&cachefiles_create_histogram[index]);
43659 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
43660 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
43661 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
43662 if (x == 0 && y == 0 && z == 0)
43663 return 0;
43664
43665 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
43666 index 0e3c092..818480e 100644
43667 --- a/fs/cachefiles/rdwr.c
43668 +++ b/fs/cachefiles/rdwr.c
43669 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
43670 old_fs = get_fs();
43671 set_fs(KERNEL_DS);
43672 ret = file->f_op->write(
43673 - file, (const void __user *) data, len, &pos);
43674 + file, (const void __force_user *) data, len, &pos);
43675 set_fs(old_fs);
43676 kunmap(page);
43677 if (ret != len)
43678 diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
43679 index 3e8094b..cb3ff3d 100644
43680 --- a/fs/ceph/dir.c
43681 +++ b/fs/ceph/dir.c
43682 @@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
43683 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
43684 struct ceph_mds_client *mdsc = fsc->mdsc;
43685 unsigned frag = fpos_frag(filp->f_pos);
43686 - int off = fpos_off(filp->f_pos);
43687 + unsigned int off = fpos_off(filp->f_pos);
43688 int err;
43689 u32 ftype;
43690 struct ceph_mds_reply_info_parsed *rinfo;
43691 @@ -598,7 +598,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
43692 if (nd &&
43693 (nd->flags & LOOKUP_OPEN) &&
43694 !(nd->intent.open.flags & O_CREAT)) {
43695 - int mode = nd->intent.open.create_mode & ~current->fs->umask;
43696 + int mode = nd->intent.open.create_mode & ~current_umask();
43697 return ceph_lookup_open(dir, dentry, nd, mode, 1);
43698 }
43699
43700 diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
43701 index cfd1ce3..6b13a74 100644
43702 --- a/fs/cifs/asn1.c
43703 +++ b/fs/cifs/asn1.c
43704 @@ -416,6 +416,9 @@ asn1_subid_decode(struct asn1_ctx *ctx, unsigned long *subid)
43705
43706 static int
43707 asn1_oid_decode(struct asn1_ctx *ctx,
43708 + unsigned char *eoc, unsigned long **oid, unsigned int *len) __size_overflow(2);
43709 +static int
43710 +asn1_oid_decode(struct asn1_ctx *ctx,
43711 unsigned char *eoc, unsigned long **oid, unsigned int *len)
43712 {
43713 unsigned long subid;
43714 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
43715 index 24b3dfc..3cd5454 100644
43716 --- a/fs/cifs/cifs_debug.c
43717 +++ b/fs/cifs/cifs_debug.c
43718 @@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
43719
43720 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
43721 #ifdef CONFIG_CIFS_STATS2
43722 - atomic_set(&totBufAllocCount, 0);
43723 - atomic_set(&totSmBufAllocCount, 0);
43724 + atomic_set_unchecked(&totBufAllocCount, 0);
43725 + atomic_set_unchecked(&totSmBufAllocCount, 0);
43726 #endif /* CONFIG_CIFS_STATS2 */
43727 spin_lock(&cifs_tcp_ses_lock);
43728 list_for_each(tmp1, &cifs_tcp_ses_list) {
43729 @@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
43730 tcon = list_entry(tmp3,
43731 struct cifs_tcon,
43732 tcon_list);
43733 - atomic_set(&tcon->num_smbs_sent, 0);
43734 - atomic_set(&tcon->num_writes, 0);
43735 - atomic_set(&tcon->num_reads, 0);
43736 - atomic_set(&tcon->num_oplock_brks, 0);
43737 - atomic_set(&tcon->num_opens, 0);
43738 - atomic_set(&tcon->num_posixopens, 0);
43739 - atomic_set(&tcon->num_posixmkdirs, 0);
43740 - atomic_set(&tcon->num_closes, 0);
43741 - atomic_set(&tcon->num_deletes, 0);
43742 - atomic_set(&tcon->num_mkdirs, 0);
43743 - atomic_set(&tcon->num_rmdirs, 0);
43744 - atomic_set(&tcon->num_renames, 0);
43745 - atomic_set(&tcon->num_t2renames, 0);
43746 - atomic_set(&tcon->num_ffirst, 0);
43747 - atomic_set(&tcon->num_fnext, 0);
43748 - atomic_set(&tcon->num_fclose, 0);
43749 - atomic_set(&tcon->num_hardlinks, 0);
43750 - atomic_set(&tcon->num_symlinks, 0);
43751 - atomic_set(&tcon->num_locks, 0);
43752 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
43753 + atomic_set_unchecked(&tcon->num_writes, 0);
43754 + atomic_set_unchecked(&tcon->num_reads, 0);
43755 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
43756 + atomic_set_unchecked(&tcon->num_opens, 0);
43757 + atomic_set_unchecked(&tcon->num_posixopens, 0);
43758 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
43759 + atomic_set_unchecked(&tcon->num_closes, 0);
43760 + atomic_set_unchecked(&tcon->num_deletes, 0);
43761 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
43762 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
43763 + atomic_set_unchecked(&tcon->num_renames, 0);
43764 + atomic_set_unchecked(&tcon->num_t2renames, 0);
43765 + atomic_set_unchecked(&tcon->num_ffirst, 0);
43766 + atomic_set_unchecked(&tcon->num_fnext, 0);
43767 + atomic_set_unchecked(&tcon->num_fclose, 0);
43768 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
43769 + atomic_set_unchecked(&tcon->num_symlinks, 0);
43770 + atomic_set_unchecked(&tcon->num_locks, 0);
43771 }
43772 }
43773 }
43774 @@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
43775 smBufAllocCount.counter, cifs_min_small);
43776 #ifdef CONFIG_CIFS_STATS2
43777 seq_printf(m, "Total Large %d Small %d Allocations\n",
43778 - atomic_read(&totBufAllocCount),
43779 - atomic_read(&totSmBufAllocCount));
43780 + atomic_read_unchecked(&totBufAllocCount),
43781 + atomic_read_unchecked(&totSmBufAllocCount));
43782 #endif /* CONFIG_CIFS_STATS2 */
43783
43784 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
43785 @@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
43786 if (tcon->need_reconnect)
43787 seq_puts(m, "\tDISCONNECTED ");
43788 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
43789 - atomic_read(&tcon->num_smbs_sent),
43790 - atomic_read(&tcon->num_oplock_brks));
43791 + atomic_read_unchecked(&tcon->num_smbs_sent),
43792 + atomic_read_unchecked(&tcon->num_oplock_brks));
43793 seq_printf(m, "\nReads: %d Bytes: %lld",
43794 - atomic_read(&tcon->num_reads),
43795 + atomic_read_unchecked(&tcon->num_reads),
43796 (long long)(tcon->bytes_read));
43797 seq_printf(m, "\nWrites: %d Bytes: %lld",
43798 - atomic_read(&tcon->num_writes),
43799 + atomic_read_unchecked(&tcon->num_writes),
43800 (long long)(tcon->bytes_written));
43801 seq_printf(m, "\nFlushes: %d",
43802 - atomic_read(&tcon->num_flushes));
43803 + atomic_read_unchecked(&tcon->num_flushes));
43804 seq_printf(m, "\nLocks: %d HardLinks: %d "
43805 "Symlinks: %d",
43806 - atomic_read(&tcon->num_locks),
43807 - atomic_read(&tcon->num_hardlinks),
43808 - atomic_read(&tcon->num_symlinks));
43809 + atomic_read_unchecked(&tcon->num_locks),
43810 + atomic_read_unchecked(&tcon->num_hardlinks),
43811 + atomic_read_unchecked(&tcon->num_symlinks));
43812 seq_printf(m, "\nOpens: %d Closes: %d "
43813 "Deletes: %d",
43814 - atomic_read(&tcon->num_opens),
43815 - atomic_read(&tcon->num_closes),
43816 - atomic_read(&tcon->num_deletes));
43817 + atomic_read_unchecked(&tcon->num_opens),
43818 + atomic_read_unchecked(&tcon->num_closes),
43819 + atomic_read_unchecked(&tcon->num_deletes));
43820 seq_printf(m, "\nPosix Opens: %d "
43821 "Posix Mkdirs: %d",
43822 - atomic_read(&tcon->num_posixopens),
43823 - atomic_read(&tcon->num_posixmkdirs));
43824 + atomic_read_unchecked(&tcon->num_posixopens),
43825 + atomic_read_unchecked(&tcon->num_posixmkdirs));
43826 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
43827 - atomic_read(&tcon->num_mkdirs),
43828 - atomic_read(&tcon->num_rmdirs));
43829 + atomic_read_unchecked(&tcon->num_mkdirs),
43830 + atomic_read_unchecked(&tcon->num_rmdirs));
43831 seq_printf(m, "\nRenames: %d T2 Renames %d",
43832 - atomic_read(&tcon->num_renames),
43833 - atomic_read(&tcon->num_t2renames));
43834 + atomic_read_unchecked(&tcon->num_renames),
43835 + atomic_read_unchecked(&tcon->num_t2renames));
43836 seq_printf(m, "\nFindFirst: %d FNext %d "
43837 "FClose %d",
43838 - atomic_read(&tcon->num_ffirst),
43839 - atomic_read(&tcon->num_fnext),
43840 - atomic_read(&tcon->num_fclose));
43841 + atomic_read_unchecked(&tcon->num_ffirst),
43842 + atomic_read_unchecked(&tcon->num_fnext),
43843 + atomic_read_unchecked(&tcon->num_fclose));
43844 }
43845 }
43846 }
43847 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
43848 index 6ee1cb4..8443157 100644
43849 --- a/fs/cifs/cifsfs.c
43850 +++ b/fs/cifs/cifsfs.c
43851 @@ -989,7 +989,7 @@ cifs_init_request_bufs(void)
43852 cifs_req_cachep = kmem_cache_create("cifs_request",
43853 CIFSMaxBufSize +
43854 MAX_CIFS_HDR_SIZE, 0,
43855 - SLAB_HWCACHE_ALIGN, NULL);
43856 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
43857 if (cifs_req_cachep == NULL)
43858 return -ENOMEM;
43859
43860 @@ -1016,7 +1016,7 @@ cifs_init_request_bufs(void)
43861 efficient to alloc 1 per page off the slab compared to 17K (5page)
43862 alloc of large cifs buffers even when page debugging is on */
43863 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
43864 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
43865 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
43866 NULL);
43867 if (cifs_sm_req_cachep == NULL) {
43868 mempool_destroy(cifs_req_poolp);
43869 @@ -1101,8 +1101,8 @@ init_cifs(void)
43870 atomic_set(&bufAllocCount, 0);
43871 atomic_set(&smBufAllocCount, 0);
43872 #ifdef CONFIG_CIFS_STATS2
43873 - atomic_set(&totBufAllocCount, 0);
43874 - atomic_set(&totSmBufAllocCount, 0);
43875 + atomic_set_unchecked(&totBufAllocCount, 0);
43876 + atomic_set_unchecked(&totSmBufAllocCount, 0);
43877 #endif /* CONFIG_CIFS_STATS2 */
43878
43879 atomic_set(&midCount, 0);
43880 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
43881 index d47d20a..77e8b33 100644
43882 --- a/fs/cifs/cifsglob.h
43883 +++ b/fs/cifs/cifsglob.h
43884 @@ -388,28 +388,28 @@ struct cifs_tcon {
43885 __u16 Flags; /* optional support bits */
43886 enum statusEnum tidStatus;
43887 #ifdef CONFIG_CIFS_STATS
43888 - atomic_t num_smbs_sent;
43889 - atomic_t num_writes;
43890 - atomic_t num_reads;
43891 - atomic_t num_flushes;
43892 - atomic_t num_oplock_brks;
43893 - atomic_t num_opens;
43894 - atomic_t num_closes;
43895 - atomic_t num_deletes;
43896 - atomic_t num_mkdirs;
43897 - atomic_t num_posixopens;
43898 - atomic_t num_posixmkdirs;
43899 - atomic_t num_rmdirs;
43900 - atomic_t num_renames;
43901 - atomic_t num_t2renames;
43902 - atomic_t num_ffirst;
43903 - atomic_t num_fnext;
43904 - atomic_t num_fclose;
43905 - atomic_t num_hardlinks;
43906 - atomic_t num_symlinks;
43907 - atomic_t num_locks;
43908 - atomic_t num_acl_get;
43909 - atomic_t num_acl_set;
43910 + atomic_unchecked_t num_smbs_sent;
43911 + atomic_unchecked_t num_writes;
43912 + atomic_unchecked_t num_reads;
43913 + atomic_unchecked_t num_flushes;
43914 + atomic_unchecked_t num_oplock_brks;
43915 + atomic_unchecked_t num_opens;
43916 + atomic_unchecked_t num_closes;
43917 + atomic_unchecked_t num_deletes;
43918 + atomic_unchecked_t num_mkdirs;
43919 + atomic_unchecked_t num_posixopens;
43920 + atomic_unchecked_t num_posixmkdirs;
43921 + atomic_unchecked_t num_rmdirs;
43922 + atomic_unchecked_t num_renames;
43923 + atomic_unchecked_t num_t2renames;
43924 + atomic_unchecked_t num_ffirst;
43925 + atomic_unchecked_t num_fnext;
43926 + atomic_unchecked_t num_fclose;
43927 + atomic_unchecked_t num_hardlinks;
43928 + atomic_unchecked_t num_symlinks;
43929 + atomic_unchecked_t num_locks;
43930 + atomic_unchecked_t num_acl_get;
43931 + atomic_unchecked_t num_acl_set;
43932 #ifdef CONFIG_CIFS_STATS2
43933 unsigned long long time_writes;
43934 unsigned long long time_reads;
43935 @@ -624,7 +624,7 @@ convert_delimiter(char *path, char delim)
43936 }
43937
43938 #ifdef CONFIG_CIFS_STATS
43939 -#define cifs_stats_inc atomic_inc
43940 +#define cifs_stats_inc atomic_inc_unchecked
43941
43942 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
43943 unsigned int bytes)
43944 @@ -983,8 +983,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
43945 /* Various Debug counters */
43946 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
43947 #ifdef CONFIG_CIFS_STATS2
43948 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
43949 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
43950 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
43951 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
43952 #endif
43953 GLOBAL_EXTERN atomic_t smBufAllocCount;
43954 GLOBAL_EXTERN atomic_t midCount;
43955 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
43956 index 6b0e064..94e6c3c 100644
43957 --- a/fs/cifs/link.c
43958 +++ b/fs/cifs/link.c
43959 @@ -600,7 +600,7 @@ symlink_exit:
43960
43961 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
43962 {
43963 - char *p = nd_get_link(nd);
43964 + const char *p = nd_get_link(nd);
43965 if (!IS_ERR(p))
43966 kfree(p);
43967 }
43968 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
43969 index 703ef5c..2a44ed5 100644
43970 --- a/fs/cifs/misc.c
43971 +++ b/fs/cifs/misc.c
43972 @@ -156,7 +156,7 @@ cifs_buf_get(void)
43973 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
43974 atomic_inc(&bufAllocCount);
43975 #ifdef CONFIG_CIFS_STATS2
43976 - atomic_inc(&totBufAllocCount);
43977 + atomic_inc_unchecked(&totBufAllocCount);
43978 #endif /* CONFIG_CIFS_STATS2 */
43979 }
43980
43981 @@ -191,7 +191,7 @@ cifs_small_buf_get(void)
43982 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
43983 atomic_inc(&smBufAllocCount);
43984 #ifdef CONFIG_CIFS_STATS2
43985 - atomic_inc(&totSmBufAllocCount);
43986 + atomic_inc_unchecked(&totSmBufAllocCount);
43987 #endif /* CONFIG_CIFS_STATS2 */
43988
43989 }
43990 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
43991 index 6901578..d402eb5 100644
43992 --- a/fs/coda/cache.c
43993 +++ b/fs/coda/cache.c
43994 @@ -24,7 +24,7 @@
43995 #include "coda_linux.h"
43996 #include "coda_cache.h"
43997
43998 -static atomic_t permission_epoch = ATOMIC_INIT(0);
43999 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
44000
44001 /* replace or extend an acl cache hit */
44002 void coda_cache_enter(struct inode *inode, int mask)
44003 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
44004 struct coda_inode_info *cii = ITOC(inode);
44005
44006 spin_lock(&cii->c_lock);
44007 - cii->c_cached_epoch = atomic_read(&permission_epoch);
44008 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
44009 if (cii->c_uid != current_fsuid()) {
44010 cii->c_uid = current_fsuid();
44011 cii->c_cached_perm = mask;
44012 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
44013 {
44014 struct coda_inode_info *cii = ITOC(inode);
44015 spin_lock(&cii->c_lock);
44016 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
44017 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
44018 spin_unlock(&cii->c_lock);
44019 }
44020
44021 /* remove all acl caches */
44022 void coda_cache_clear_all(struct super_block *sb)
44023 {
44024 - atomic_inc(&permission_epoch);
44025 + atomic_inc_unchecked(&permission_epoch);
44026 }
44027
44028
44029 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
44030 spin_lock(&cii->c_lock);
44031 hit = (mask & cii->c_cached_perm) == mask &&
44032 cii->c_uid == current_fsuid() &&
44033 - cii->c_cached_epoch == atomic_read(&permission_epoch);
44034 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
44035 spin_unlock(&cii->c_lock);
44036
44037 return hit;
44038 diff --git a/fs/compat.c b/fs/compat.c
44039 index 07880ba..3fb2862 100644
44040 --- a/fs/compat.c
44041 +++ b/fs/compat.c
44042 @@ -491,7 +491,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
44043
44044 set_fs(KERNEL_DS);
44045 /* The __user pointer cast is valid because of the set_fs() */
44046 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
44047 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
44048 set_fs(oldfs);
44049 /* truncating is ok because it's a user address */
44050 if (!ret)
44051 @@ -549,7 +549,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
44052 goto out;
44053
44054 ret = -EINVAL;
44055 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
44056 + if (nr_segs > UIO_MAXIOV)
44057 goto out;
44058 if (nr_segs > fast_segs) {
44059 ret = -ENOMEM;
44060 @@ -832,6 +832,7 @@ struct compat_old_linux_dirent {
44061
44062 struct compat_readdir_callback {
44063 struct compat_old_linux_dirent __user *dirent;
44064 + struct file * file;
44065 int result;
44066 };
44067
44068 @@ -849,6 +850,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
44069 buf->result = -EOVERFLOW;
44070 return -EOVERFLOW;
44071 }
44072 +
44073 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
44074 + return 0;
44075 +
44076 buf->result++;
44077 dirent = buf->dirent;
44078 if (!access_ok(VERIFY_WRITE, dirent,
44079 @@ -881,6 +886,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
44080
44081 buf.result = 0;
44082 buf.dirent = dirent;
44083 + buf.file = file;
44084
44085 error = vfs_readdir(file, compat_fillonedir, &buf);
44086 if (buf.result)
44087 @@ -901,6 +907,7 @@ struct compat_linux_dirent {
44088 struct compat_getdents_callback {
44089 struct compat_linux_dirent __user *current_dir;
44090 struct compat_linux_dirent __user *previous;
44091 + struct file * file;
44092 int count;
44093 int error;
44094 };
44095 @@ -922,6 +929,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
44096 buf->error = -EOVERFLOW;
44097 return -EOVERFLOW;
44098 }
44099 +
44100 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
44101 + return 0;
44102 +
44103 dirent = buf->previous;
44104 if (dirent) {
44105 if (__put_user(offset, &dirent->d_off))
44106 @@ -969,6 +980,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
44107 buf.previous = NULL;
44108 buf.count = count;
44109 buf.error = 0;
44110 + buf.file = file;
44111
44112 error = vfs_readdir(file, compat_filldir, &buf);
44113 if (error >= 0)
44114 @@ -990,6 +1002,7 @@ out:
44115 struct compat_getdents_callback64 {
44116 struct linux_dirent64 __user *current_dir;
44117 struct linux_dirent64 __user *previous;
44118 + struct file * file;
44119 int count;
44120 int error;
44121 };
44122 @@ -1006,6 +1019,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
44123 buf->error = -EINVAL; /* only used if we fail.. */
44124 if (reclen > buf->count)
44125 return -EINVAL;
44126 +
44127 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
44128 + return 0;
44129 +
44130 dirent = buf->previous;
44131
44132 if (dirent) {
44133 @@ -1057,13 +1074,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
44134 buf.previous = NULL;
44135 buf.count = count;
44136 buf.error = 0;
44137 + buf.file = file;
44138
44139 error = vfs_readdir(file, compat_filldir64, &buf);
44140 if (error >= 0)
44141 error = buf.error;
44142 lastdirent = buf.previous;
44143 if (lastdirent) {
44144 - typeof(lastdirent->d_off) d_off = file->f_pos;
44145 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
44146 if (__put_user_unaligned(d_off, &lastdirent->d_off))
44147 error = -EFAULT;
44148 else
44149 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
44150 index 112e45a..b59845b 100644
44151 --- a/fs/compat_binfmt_elf.c
44152 +++ b/fs/compat_binfmt_elf.c
44153 @@ -30,11 +30,13 @@
44154 #undef elf_phdr
44155 #undef elf_shdr
44156 #undef elf_note
44157 +#undef elf_dyn
44158 #undef elf_addr_t
44159 #define elfhdr elf32_hdr
44160 #define elf_phdr elf32_phdr
44161 #define elf_shdr elf32_shdr
44162 #define elf_note elf32_note
44163 +#define elf_dyn Elf32_Dyn
44164 #define elf_addr_t Elf32_Addr
44165
44166 /*
44167 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
44168 index a26bea1..ae23e72 100644
44169 --- a/fs/compat_ioctl.c
44170 +++ b/fs/compat_ioctl.c
44171 @@ -211,6 +211,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
44172
44173 err = get_user(palp, &up->palette);
44174 err |= get_user(length, &up->length);
44175 + if (err)
44176 + return -EFAULT;
44177
44178 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
44179 err = put_user(compat_ptr(palp), &up_native->palette);
44180 @@ -622,7 +624,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
44181 return -EFAULT;
44182 if (__get_user(udata, &ss32->iomem_base))
44183 return -EFAULT;
44184 - ss.iomem_base = compat_ptr(udata);
44185 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
44186 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
44187 __get_user(ss.port_high, &ss32->port_high))
44188 return -EFAULT;
44189 @@ -797,7 +799,7 @@ static int compat_ioctl_preallocate(struct file *file,
44190 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
44191 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
44192 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
44193 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
44194 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
44195 return -EFAULT;
44196
44197 return ioctl_preallocate(file, p);
44198 @@ -1611,8 +1613,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
44199 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
44200 {
44201 unsigned int a, b;
44202 - a = *(unsigned int *)p;
44203 - b = *(unsigned int *)q;
44204 + a = *(const unsigned int *)p;
44205 + b = *(const unsigned int *)q;
44206 if (a > b)
44207 return 1;
44208 if (a < b)
44209 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
44210 index 5ddd7eb..c18bf04 100644
44211 --- a/fs/configfs/dir.c
44212 +++ b/fs/configfs/dir.c
44213 @@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
44214 }
44215 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
44216 struct configfs_dirent *next;
44217 - const char * name;
44218 + const unsigned char * name;
44219 + char d_name[sizeof(next->s_dentry->d_iname)];
44220 int len;
44221 struct inode *inode = NULL;
44222
44223 @@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
44224 continue;
44225
44226 name = configfs_get_name(next);
44227 - len = strlen(name);
44228 + if (next->s_dentry && name == next->s_dentry->d_iname) {
44229 + len = next->s_dentry->d_name.len;
44230 + memcpy(d_name, name, len);
44231 + name = d_name;
44232 + } else
44233 + len = strlen(name);
44234
44235 /*
44236 * We'll have a dentry and an inode for
44237 diff --git a/fs/configfs/file.c b/fs/configfs/file.c
44238 index 2b6cb23..d76e879 100644
44239 --- a/fs/configfs/file.c
44240 +++ b/fs/configfs/file.c
44241 @@ -135,6 +135,8 @@ out:
44242 */
44243
44244 static int
44245 +fill_write_buffer(struct configfs_buffer * buffer, const char __user * buf, size_t count) __size_overflow(3);
44246 +static int
44247 fill_write_buffer(struct configfs_buffer * buffer, const char __user * buf, size_t count)
44248 {
44249 int error;
44250 diff --git a/fs/dcache.c b/fs/dcache.c
44251 index 2576d14..0cec38d 100644
44252 --- a/fs/dcache.c
44253 +++ b/fs/dcache.c
44254 @@ -105,10 +105,10 @@ static unsigned int d_hash_shift __read_mostly;
44255 static struct hlist_bl_head *dentry_hashtable __read_mostly;
44256
44257 static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
44258 - unsigned long hash)
44259 + unsigned int hash)
44260 {
44261 - hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
44262 - hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS);
44263 + hash += (unsigned long) parent / L1_CACHE_BYTES;
44264 + hash = hash + (hash >> D_HASHBITS);
44265 return dentry_hashtable + (hash & D_HASHMASK);
44266 }
44267
44268 @@ -3067,7 +3067,7 @@ void __init vfs_caches_init(unsigned long mempages)
44269 mempages -= reserve;
44270
44271 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
44272 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
44273 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
44274
44275 dcache_init();
44276 inode_init();
44277 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
44278 index 956d5dd..e755e04 100644
44279 --- a/fs/debugfs/inode.c
44280 +++ b/fs/debugfs/inode.c
44281 @@ -261,7 +261,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
44282 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
44283 {
44284 return debugfs_create_file(name,
44285 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
44286 + S_IFDIR | S_IRWXU,
44287 +#else
44288 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
44289 +#endif
44290 parent, NULL, NULL);
44291 }
44292 EXPORT_SYMBOL_GPL(debugfs_create_dir);
44293 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
44294 index ab35b11..b30af66 100644
44295 --- a/fs/ecryptfs/inode.c
44296 +++ b/fs/ecryptfs/inode.c
44297 @@ -672,7 +672,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
44298 old_fs = get_fs();
44299 set_fs(get_ds());
44300 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
44301 - (char __user *)lower_buf,
44302 + (char __force_user *)lower_buf,
44303 lower_bufsiz);
44304 set_fs(old_fs);
44305 if (rc < 0)
44306 @@ -718,7 +718,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
44307 }
44308 old_fs = get_fs();
44309 set_fs(get_ds());
44310 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
44311 + rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
44312 set_fs(old_fs);
44313 if (rc < 0) {
44314 kfree(buf);
44315 @@ -733,7 +733,7 @@ out:
44316 static void
44317 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
44318 {
44319 - char *buf = nd_get_link(nd);
44320 + const char *buf = nd_get_link(nd);
44321 if (!IS_ERR(buf)) {
44322 /* Free the char* */
44323 kfree(buf);
44324 diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
44325 index 3a06f40..f7af544 100644
44326 --- a/fs/ecryptfs/miscdev.c
44327 +++ b/fs/ecryptfs/miscdev.c
44328 @@ -345,7 +345,7 @@ check_list:
44329 goto out_unlock_msg_ctx;
44330 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
44331 if (msg_ctx->msg) {
44332 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
44333 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
44334 goto out_unlock_msg_ctx;
44335 i += packet_length_size;
44336 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
44337 diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
44338 index b2a34a1..162fa69 100644
44339 --- a/fs/ecryptfs/read_write.c
44340 +++ b/fs/ecryptfs/read_write.c
44341 @@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
44342 return -EIO;
44343 fs_save = get_fs();
44344 set_fs(get_ds());
44345 - rc = vfs_write(lower_file, data, size, &offset);
44346 + rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
44347 set_fs(fs_save);
44348 mark_inode_dirty_sync(ecryptfs_inode);
44349 return rc;
44350 @@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
44351 return -EIO;
44352 fs_save = get_fs();
44353 set_fs(get_ds());
44354 - rc = vfs_read(lower_file, data, size, &offset);
44355 + rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
44356 set_fs(fs_save);
44357 return rc;
44358 }
44359 diff --git a/fs/exec.c b/fs/exec.c
44360 index ae42277..32c9035 100644
44361 --- a/fs/exec.c
44362 +++ b/fs/exec.c
44363 @@ -55,6 +55,13 @@
44364 #include <linux/pipe_fs_i.h>
44365 #include <linux/oom.h>
44366 #include <linux/compat.h>
44367 +#include <linux/random.h>
44368 +#include <linux/seq_file.h>
44369 +
44370 +#ifdef CONFIG_PAX_REFCOUNT
44371 +#include <linux/kallsyms.h>
44372 +#include <linux/kdebug.h>
44373 +#endif
44374
44375 #include <asm/uaccess.h>
44376 #include <asm/mmu_context.h>
44377 @@ -63,6 +70,15 @@
44378 #include <trace/events/task.h>
44379 #include "internal.h"
44380
44381 +#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
44382 +void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
44383 +#endif
44384 +
44385 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
44386 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
44387 +EXPORT_SYMBOL(pax_set_initial_flags_func);
44388 +#endif
44389 +
44390 int core_uses_pid;
44391 char core_pattern[CORENAME_MAX_SIZE] = "core";
44392 unsigned int core_pipe_limit;
44393 @@ -72,7 +88,7 @@ struct core_name {
44394 char *corename;
44395 int used, size;
44396 };
44397 -static atomic_t call_count = ATOMIC_INIT(1);
44398 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
44399
44400 /* The maximal length of core_pattern is also specified in sysctl.c */
44401
44402 @@ -190,18 +206,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
44403 int write)
44404 {
44405 struct page *page;
44406 - int ret;
44407
44408 -#ifdef CONFIG_STACK_GROWSUP
44409 - if (write) {
44410 - ret = expand_downwards(bprm->vma, pos);
44411 - if (ret < 0)
44412 - return NULL;
44413 - }
44414 -#endif
44415 - ret = get_user_pages(current, bprm->mm, pos,
44416 - 1, write, 1, &page, NULL);
44417 - if (ret <= 0)
44418 + if (0 > expand_downwards(bprm->vma, pos))
44419 + return NULL;
44420 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
44421 return NULL;
44422
44423 if (write) {
44424 @@ -217,6 +225,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
44425 if (size <= ARG_MAX)
44426 return page;
44427
44428 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44429 + // only allow 512KB for argv+env on suid/sgid binaries
44430 + // to prevent easy ASLR exhaustion
44431 + if (((bprm->cred->euid != current_euid()) ||
44432 + (bprm->cred->egid != current_egid())) &&
44433 + (size > (512 * 1024))) {
44434 + put_page(page);
44435 + return NULL;
44436 + }
44437 +#endif
44438 +
44439 /*
44440 * Limit to 1/4-th the stack size for the argv+env strings.
44441 * This ensures that:
44442 @@ -276,6 +295,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
44443 vma->vm_end = STACK_TOP_MAX;
44444 vma->vm_start = vma->vm_end - PAGE_SIZE;
44445 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
44446 +
44447 +#ifdef CONFIG_PAX_SEGMEXEC
44448 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
44449 +#endif
44450 +
44451 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
44452 INIT_LIST_HEAD(&vma->anon_vma_chain);
44453
44454 @@ -290,6 +314,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
44455 mm->stack_vm = mm->total_vm = 1;
44456 up_write(&mm->mmap_sem);
44457 bprm->p = vma->vm_end - sizeof(void *);
44458 +
44459 +#ifdef CONFIG_PAX_RANDUSTACK
44460 + if (randomize_va_space)
44461 + bprm->p ^= random32() & ~PAGE_MASK;
44462 +#endif
44463 +
44464 return 0;
44465 err:
44466 up_write(&mm->mmap_sem);
44467 @@ -398,19 +428,7 @@ err:
44468 return err;
44469 }
44470
44471 -struct user_arg_ptr {
44472 -#ifdef CONFIG_COMPAT
44473 - bool is_compat;
44474 -#endif
44475 - union {
44476 - const char __user *const __user *native;
44477 -#ifdef CONFIG_COMPAT
44478 - compat_uptr_t __user *compat;
44479 -#endif
44480 - } ptr;
44481 -};
44482 -
44483 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
44484 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
44485 {
44486 const char __user *native;
44487
44488 @@ -419,14 +437,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
44489 compat_uptr_t compat;
44490
44491 if (get_user(compat, argv.ptr.compat + nr))
44492 - return ERR_PTR(-EFAULT);
44493 + return (const char __force_user *)ERR_PTR(-EFAULT);
44494
44495 return compat_ptr(compat);
44496 }
44497 #endif
44498
44499 if (get_user(native, argv.ptr.native + nr))
44500 - return ERR_PTR(-EFAULT);
44501 + return (const char __force_user *)ERR_PTR(-EFAULT);
44502
44503 return native;
44504 }
44505 @@ -445,7 +463,7 @@ static int count(struct user_arg_ptr argv, int max)
44506 if (!p)
44507 break;
44508
44509 - if (IS_ERR(p))
44510 + if (IS_ERR((const char __force_kernel *)p))
44511 return -EFAULT;
44512
44513 if (i++ >= max)
44514 @@ -479,7 +497,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
44515
44516 ret = -EFAULT;
44517 str = get_user_arg_ptr(argv, argc);
44518 - if (IS_ERR(str))
44519 + if (IS_ERR((const char __force_kernel *)str))
44520 goto out;
44521
44522 len = strnlen_user(str, MAX_ARG_STRLEN);
44523 @@ -561,7 +579,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
44524 int r;
44525 mm_segment_t oldfs = get_fs();
44526 struct user_arg_ptr argv = {
44527 - .ptr.native = (const char __user *const __user *)__argv,
44528 + .ptr.native = (const char __force_user *const __force_user *)__argv,
44529 };
44530
44531 set_fs(KERNEL_DS);
44532 @@ -596,7 +614,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
44533 unsigned long new_end = old_end - shift;
44534 struct mmu_gather tlb;
44535
44536 - BUG_ON(new_start > new_end);
44537 + if (new_start >= new_end || new_start < mmap_min_addr)
44538 + return -ENOMEM;
44539
44540 /*
44541 * ensure there are no vmas between where we want to go
44542 @@ -605,6 +624,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
44543 if (vma != find_vma(mm, new_start))
44544 return -EFAULT;
44545
44546 +#ifdef CONFIG_PAX_SEGMEXEC
44547 + BUG_ON(pax_find_mirror_vma(vma));
44548 +#endif
44549 +
44550 /*
44551 * cover the whole range: [new_start, old_end)
44552 */
44553 @@ -685,10 +708,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
44554 stack_top = arch_align_stack(stack_top);
44555 stack_top = PAGE_ALIGN(stack_top);
44556
44557 - if (unlikely(stack_top < mmap_min_addr) ||
44558 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
44559 - return -ENOMEM;
44560 -
44561 stack_shift = vma->vm_end - stack_top;
44562
44563 bprm->p -= stack_shift;
44564 @@ -700,8 +719,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
44565 bprm->exec -= stack_shift;
44566
44567 down_write(&mm->mmap_sem);
44568 +
44569 + /* Move stack pages down in memory. */
44570 + if (stack_shift) {
44571 + ret = shift_arg_pages(vma, stack_shift);
44572 + if (ret)
44573 + goto out_unlock;
44574 + }
44575 +
44576 vm_flags = VM_STACK_FLAGS;
44577
44578 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
44579 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
44580 + vm_flags &= ~VM_EXEC;
44581 +
44582 +#ifdef CONFIG_PAX_MPROTECT
44583 + if (mm->pax_flags & MF_PAX_MPROTECT)
44584 + vm_flags &= ~VM_MAYEXEC;
44585 +#endif
44586 +
44587 + }
44588 +#endif
44589 +
44590 /*
44591 * Adjust stack execute permissions; explicitly enable for
44592 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
44593 @@ -720,13 +759,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
44594 goto out_unlock;
44595 BUG_ON(prev != vma);
44596
44597 - /* Move stack pages down in memory. */
44598 - if (stack_shift) {
44599 - ret = shift_arg_pages(vma, stack_shift);
44600 - if (ret)
44601 - goto out_unlock;
44602 - }
44603 -
44604 /* mprotect_fixup is overkill to remove the temporary stack flags */
44605 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
44606
44607 @@ -807,7 +839,7 @@ int kernel_read(struct file *file, loff_t offset,
44608 old_fs = get_fs();
44609 set_fs(get_ds());
44610 /* The cast to a user pointer is valid due to the set_fs() */
44611 - result = vfs_read(file, (void __user *)addr, count, &pos);
44612 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
44613 set_fs(old_fs);
44614 return result;
44615 }
44616 @@ -1255,7 +1287,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
44617 }
44618 rcu_read_unlock();
44619
44620 - if (p->fs->users > n_fs) {
44621 + if (atomic_read(&p->fs->users) > n_fs) {
44622 bprm->unsafe |= LSM_UNSAFE_SHARE;
44623 } else {
44624 res = -EAGAIN;
44625 @@ -1450,6 +1482,28 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
44626
44627 EXPORT_SYMBOL(search_binary_handler);
44628
44629 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44630 +static DEFINE_PER_CPU(u64, exec_counter);
44631 +static int __init init_exec_counters(void)
44632 +{
44633 + unsigned int cpu;
44634 +
44635 + for_each_possible_cpu(cpu) {
44636 + per_cpu(exec_counter, cpu) = (u64)cpu;
44637 + }
44638 +
44639 + return 0;
44640 +}
44641 +early_initcall(init_exec_counters);
44642 +static inline void increment_exec_counter(void)
44643 +{
44644 + BUILD_BUG_ON(NR_CPUS > (1 << 16));
44645 + current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
44646 +}
44647 +#else
44648 +static inline void increment_exec_counter(void) {}
44649 +#endif
44650 +
44651 /*
44652 * sys_execve() executes a new program.
44653 */
44654 @@ -1458,6 +1512,11 @@ static int do_execve_common(const char *filename,
44655 struct user_arg_ptr envp,
44656 struct pt_regs *regs)
44657 {
44658 +#ifdef CONFIG_GRKERNSEC
44659 + struct file *old_exec_file;
44660 + struct acl_subject_label *old_acl;
44661 + struct rlimit old_rlim[RLIM_NLIMITS];
44662 +#endif
44663 struct linux_binprm *bprm;
44664 struct file *file;
44665 struct files_struct *displaced;
44666 @@ -1465,6 +1524,8 @@ static int do_execve_common(const char *filename,
44667 int retval;
44668 const struct cred *cred = current_cred();
44669
44670 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
44671 +
44672 /*
44673 * We move the actual failure in case of RLIMIT_NPROC excess from
44674 * set*uid() to execve() because too many poorly written programs
44675 @@ -1505,12 +1566,27 @@ static int do_execve_common(const char *filename,
44676 if (IS_ERR(file))
44677 goto out_unmark;
44678
44679 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
44680 + retval = -EPERM;
44681 + goto out_file;
44682 + }
44683 +
44684 sched_exec();
44685
44686 bprm->file = file;
44687 bprm->filename = filename;
44688 bprm->interp = filename;
44689
44690 + if (gr_process_user_ban()) {
44691 + retval = -EPERM;
44692 + goto out_file;
44693 + }
44694 +
44695 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
44696 + retval = -EACCES;
44697 + goto out_file;
44698 + }
44699 +
44700 retval = bprm_mm_init(bprm);
44701 if (retval)
44702 goto out_file;
44703 @@ -1527,24 +1603,65 @@ static int do_execve_common(const char *filename,
44704 if (retval < 0)
44705 goto out;
44706
44707 +#ifdef CONFIG_GRKERNSEC
44708 + old_acl = current->acl;
44709 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
44710 + old_exec_file = current->exec_file;
44711 + get_file(file);
44712 + current->exec_file = file;
44713 +#endif
44714 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44715 + /* limit suid stack to 8MB
44716 + we saved the old limits above and will restore them if this exec fails
44717 + */
44718 + if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
44719 + (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
44720 + current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
44721 +#endif
44722 +
44723 + if (!gr_tpe_allow(file)) {
44724 + retval = -EACCES;
44725 + goto out_fail;
44726 + }
44727 +
44728 + if (gr_check_crash_exec(file)) {
44729 + retval = -EACCES;
44730 + goto out_fail;
44731 + }
44732 +
44733 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
44734 + bprm->unsafe);
44735 + if (retval < 0)
44736 + goto out_fail;
44737 +
44738 retval = copy_strings_kernel(1, &bprm->filename, bprm);
44739 if (retval < 0)
44740 - goto out;
44741 + goto out_fail;
44742
44743 bprm->exec = bprm->p;
44744 retval = copy_strings(bprm->envc, envp, bprm);
44745 if (retval < 0)
44746 - goto out;
44747 + goto out_fail;
44748
44749 retval = copy_strings(bprm->argc, argv, bprm);
44750 if (retval < 0)
44751 - goto out;
44752 + goto out_fail;
44753 +
44754 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
44755 +
44756 + gr_handle_exec_args(bprm, argv);
44757
44758 retval = search_binary_handler(bprm,regs);
44759 if (retval < 0)
44760 - goto out;
44761 + goto out_fail;
44762 +#ifdef CONFIG_GRKERNSEC
44763 + if (old_exec_file)
44764 + fput(old_exec_file);
44765 +#endif
44766
44767 /* execve succeeded */
44768 +
44769 + increment_exec_counter();
44770 current->fs->in_exec = 0;
44771 current->in_execve = 0;
44772 acct_update_integrals(current);
44773 @@ -1553,6 +1670,14 @@ static int do_execve_common(const char *filename,
44774 put_files_struct(displaced);
44775 return retval;
44776
44777 +out_fail:
44778 +#ifdef CONFIG_GRKERNSEC
44779 + current->acl = old_acl;
44780 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
44781 + fput(current->exec_file);
44782 + current->exec_file = old_exec_file;
44783 +#endif
44784 +
44785 out:
44786 if (bprm->mm) {
44787 acct_arg_size(bprm, 0);
44788 @@ -1626,7 +1751,7 @@ static int expand_corename(struct core_name *cn)
44789 {
44790 char *old_corename = cn->corename;
44791
44792 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
44793 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
44794 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
44795
44796 if (!cn->corename) {
44797 @@ -1723,7 +1848,7 @@ static int format_corename(struct core_name *cn, long signr)
44798 int pid_in_pattern = 0;
44799 int err = 0;
44800
44801 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
44802 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
44803 cn->corename = kmalloc(cn->size, GFP_KERNEL);
44804 cn->used = 0;
44805
44806 @@ -1820,6 +1945,228 @@ out:
44807 return ispipe;
44808 }
44809
44810 +int pax_check_flags(unsigned long *flags)
44811 +{
44812 + int retval = 0;
44813 +
44814 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
44815 + if (*flags & MF_PAX_SEGMEXEC)
44816 + {
44817 + *flags &= ~MF_PAX_SEGMEXEC;
44818 + retval = -EINVAL;
44819 + }
44820 +#endif
44821 +
44822 + if ((*flags & MF_PAX_PAGEEXEC)
44823 +
44824 +#ifdef CONFIG_PAX_PAGEEXEC
44825 + && (*flags & MF_PAX_SEGMEXEC)
44826 +#endif
44827 +
44828 + )
44829 + {
44830 + *flags &= ~MF_PAX_PAGEEXEC;
44831 + retval = -EINVAL;
44832 + }
44833 +
44834 + if ((*flags & MF_PAX_MPROTECT)
44835 +
44836 +#ifdef CONFIG_PAX_MPROTECT
44837 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
44838 +#endif
44839 +
44840 + )
44841 + {
44842 + *flags &= ~MF_PAX_MPROTECT;
44843 + retval = -EINVAL;
44844 + }
44845 +
44846 + if ((*flags & MF_PAX_EMUTRAMP)
44847 +
44848 +#ifdef CONFIG_PAX_EMUTRAMP
44849 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
44850 +#endif
44851 +
44852 + )
44853 + {
44854 + *flags &= ~MF_PAX_EMUTRAMP;
44855 + retval = -EINVAL;
44856 + }
44857 +
44858 + return retval;
44859 +}
44860 +
44861 +EXPORT_SYMBOL(pax_check_flags);
44862 +
44863 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
44864 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
44865 +{
44866 + struct task_struct *tsk = current;
44867 + struct mm_struct *mm = current->mm;
44868 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
44869 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
44870 + char *path_exec = NULL;
44871 + char *path_fault = NULL;
44872 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
44873 +
44874 + if (buffer_exec && buffer_fault) {
44875 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
44876 +
44877 + down_read(&mm->mmap_sem);
44878 + vma = mm->mmap;
44879 + while (vma && (!vma_exec || !vma_fault)) {
44880 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
44881 + vma_exec = vma;
44882 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
44883 + vma_fault = vma;
44884 + vma = vma->vm_next;
44885 + }
44886 + if (vma_exec) {
44887 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
44888 + if (IS_ERR(path_exec))
44889 + path_exec = "<path too long>";
44890 + else {
44891 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
44892 + if (path_exec) {
44893 + *path_exec = 0;
44894 + path_exec = buffer_exec;
44895 + } else
44896 + path_exec = "<path too long>";
44897 + }
44898 + }
44899 + if (vma_fault) {
44900 + start = vma_fault->vm_start;
44901 + end = vma_fault->vm_end;
44902 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
44903 + if (vma_fault->vm_file) {
44904 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
44905 + if (IS_ERR(path_fault))
44906 + path_fault = "<path too long>";
44907 + else {
44908 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
44909 + if (path_fault) {
44910 + *path_fault = 0;
44911 + path_fault = buffer_fault;
44912 + } else
44913 + path_fault = "<path too long>";
44914 + }
44915 + } else
44916 + path_fault = "<anonymous mapping>";
44917 + }
44918 + up_read(&mm->mmap_sem);
44919 + }
44920 + if (tsk->signal->curr_ip)
44921 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
44922 + else
44923 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
44924 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
44925 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
44926 + task_uid(tsk), task_euid(tsk), pc, sp);
44927 + free_page((unsigned long)buffer_exec);
44928 + free_page((unsigned long)buffer_fault);
44929 + pax_report_insns(regs, pc, sp);
44930 + do_coredump(SIGKILL, SIGKILL, regs);
44931 +}
44932 +#endif
44933 +
44934 +#ifdef CONFIG_PAX_REFCOUNT
44935 +void pax_report_refcount_overflow(struct pt_regs *regs)
44936 +{
44937 + if (current->signal->curr_ip)
44938 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
44939 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
44940 + else
44941 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
44942 + current->comm, task_pid_nr(current), current_uid(), current_euid());
44943 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
44944 + show_regs(regs);
44945 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
44946 +}
44947 +#endif
44948 +
44949 +#ifdef CONFIG_PAX_USERCOPY
44950 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
44951 +int object_is_on_stack(const void *obj, unsigned long len)
44952 +{
44953 + const void * const stack = task_stack_page(current);
44954 + const void * const stackend = stack + THREAD_SIZE;
44955 +
44956 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
44957 + const void *frame = NULL;
44958 + const void *oldframe;
44959 +#endif
44960 +
44961 + if (obj + len < obj)
44962 + return -1;
44963 +
44964 + if (obj + len <= stack || stackend <= obj)
44965 + return 0;
44966 +
44967 + if (obj < stack || stackend < obj + len)
44968 + return -1;
44969 +
44970 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
44971 + oldframe = __builtin_frame_address(1);
44972 + if (oldframe)
44973 + frame = __builtin_frame_address(2);
44974 + /*
44975 + low ----------------------------------------------> high
44976 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
44977 + ^----------------^
44978 + allow copies only within here
44979 + */
44980 + while (stack <= frame && frame < stackend) {
44981 + /* if obj + len extends past the last frame, this
44982 + check won't pass and the next frame will be 0,
44983 + causing us to bail out and correctly report
44984 + the copy as invalid
44985 + */
44986 + if (obj + len <= frame)
44987 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
44988 + oldframe = frame;
44989 + frame = *(const void * const *)frame;
44990 + }
44991 + return -1;
44992 +#else
44993 + return 1;
44994 +#endif
44995 +}
44996 +
44997 +__noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
44998 +{
44999 + if (current->signal->curr_ip)
45000 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
45001 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
45002 + else
45003 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
45004 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
45005 + dump_stack();
45006 + gr_handle_kernel_exploit();
45007 + do_group_exit(SIGKILL);
45008 +}
45009 +#endif
45010 +
45011 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
45012 +void pax_track_stack(void)
45013 +{
45014 + unsigned long sp = (unsigned long)&sp;
45015 + if (sp < current_thread_info()->lowest_stack &&
45016 + sp > (unsigned long)task_stack_page(current))
45017 + current_thread_info()->lowest_stack = sp;
45018 +}
45019 +EXPORT_SYMBOL(pax_track_stack);
45020 +#endif
45021 +
45022 +#ifdef CONFIG_PAX_SIZE_OVERFLOW
45023 +void report_size_overflow(const char *file, unsigned int line, const char *func)
45024 +{
45025 + printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u\n", func, file, line);
45026 + dump_stack();
45027 + do_group_exit(SIGKILL);
45028 +}
45029 +EXPORT_SYMBOL(report_size_overflow);
45030 +#endif
45031 +
45032 static int zap_process(struct task_struct *start, int exit_code)
45033 {
45034 struct task_struct *t;
45035 @@ -2017,17 +2364,17 @@ static void wait_for_dump_helpers(struct file *file)
45036 pipe = file->f_path.dentry->d_inode->i_pipe;
45037
45038 pipe_lock(pipe);
45039 - pipe->readers++;
45040 - pipe->writers--;
45041 + atomic_inc(&pipe->readers);
45042 + atomic_dec(&pipe->writers);
45043
45044 - while ((pipe->readers > 1) && (!signal_pending(current))) {
45045 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
45046 wake_up_interruptible_sync(&pipe->wait);
45047 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
45048 pipe_wait(pipe);
45049 }
45050
45051 - pipe->readers--;
45052 - pipe->writers++;
45053 + atomic_dec(&pipe->readers);
45054 + atomic_inc(&pipe->writers);
45055 pipe_unlock(pipe);
45056
45057 }
45058 @@ -2088,7 +2435,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
45059 int retval = 0;
45060 int flag = 0;
45061 int ispipe;
45062 - static atomic_t core_dump_count = ATOMIC_INIT(0);
45063 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
45064 struct coredump_params cprm = {
45065 .signr = signr,
45066 .regs = regs,
45067 @@ -2103,6 +2450,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
45068
45069 audit_core_dumps(signr);
45070
45071 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
45072 + gr_handle_brute_attach(current, cprm.mm_flags);
45073 +
45074 binfmt = mm->binfmt;
45075 if (!binfmt || !binfmt->core_dump)
45076 goto fail;
45077 @@ -2170,7 +2520,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
45078 }
45079 cprm.limit = RLIM_INFINITY;
45080
45081 - dump_count = atomic_inc_return(&core_dump_count);
45082 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
45083 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
45084 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
45085 task_tgid_vnr(current), current->comm);
45086 @@ -2197,6 +2547,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
45087 } else {
45088 struct inode *inode;
45089
45090 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
45091 +
45092 if (cprm.limit < binfmt->min_coredump)
45093 goto fail_unlock;
45094
45095 @@ -2240,7 +2592,7 @@ close_fail:
45096 filp_close(cprm.file, NULL);
45097 fail_dropcount:
45098 if (ispipe)
45099 - atomic_dec(&core_dump_count);
45100 + atomic_dec_unchecked(&core_dump_count);
45101 fail_unlock:
45102 kfree(cn.corename);
45103 fail_corename:
45104 @@ -2259,7 +2611,7 @@ fail:
45105 */
45106 int dump_write(struct file *file, const void *addr, int nr)
45107 {
45108 - return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
45109 + return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
45110 }
45111 EXPORT_SYMBOL(dump_write);
45112
45113 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
45114 index a8cbe1b..fed04cb 100644
45115 --- a/fs/ext2/balloc.c
45116 +++ b/fs/ext2/balloc.c
45117 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
45118
45119 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
45120 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
45121 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
45122 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
45123 sbi->s_resuid != current_fsuid() &&
45124 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
45125 return 0;
45126 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
45127 index a203892..4e64db5 100644
45128 --- a/fs/ext3/balloc.c
45129 +++ b/fs/ext3/balloc.c
45130 @@ -1446,9 +1446,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
45131
45132 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
45133 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
45134 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
45135 + if (free_blocks < root_blocks + 1 &&
45136 !use_reservation && sbi->s_resuid != current_fsuid() &&
45137 - (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
45138 + (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)) &&
45139 + !capable_nolog(CAP_SYS_RESOURCE)) {
45140 return 0;
45141 }
45142 return 1;
45143 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
45144 index f9e2cd8..bfdc476 100644
45145 --- a/fs/ext4/balloc.c
45146 +++ b/fs/ext4/balloc.c
45147 @@ -438,8 +438,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
45148 /* Hm, nope. Are (enough) root reserved clusters available? */
45149 if (sbi->s_resuid == current_fsuid() ||
45150 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
45151 - capable(CAP_SYS_RESOURCE) ||
45152 - (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
45153 + (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
45154 + capable_nolog(CAP_SYS_RESOURCE)) {
45155
45156 if (free_clusters >= (nclusters + dirty_clusters))
45157 return 1;
45158 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
45159 index 9983ba8..2a5272c 100644
45160 --- a/fs/ext4/ext4.h
45161 +++ b/fs/ext4/ext4.h
45162 @@ -1217,19 +1217,19 @@ struct ext4_sb_info {
45163 unsigned long s_mb_last_start;
45164
45165 /* stats for buddy allocator */
45166 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
45167 - atomic_t s_bal_success; /* we found long enough chunks */
45168 - atomic_t s_bal_allocated; /* in blocks */
45169 - atomic_t s_bal_ex_scanned; /* total extents scanned */
45170 - atomic_t s_bal_goals; /* goal hits */
45171 - atomic_t s_bal_breaks; /* too long searches */
45172 - atomic_t s_bal_2orders; /* 2^order hits */
45173 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
45174 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
45175 + atomic_unchecked_t s_bal_allocated; /* in blocks */
45176 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
45177 + atomic_unchecked_t s_bal_goals; /* goal hits */
45178 + atomic_unchecked_t s_bal_breaks; /* too long searches */
45179 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
45180 spinlock_t s_bal_lock;
45181 unsigned long s_mb_buddies_generated;
45182 unsigned long long s_mb_generation_time;
45183 - atomic_t s_mb_lost_chunks;
45184 - atomic_t s_mb_preallocated;
45185 - atomic_t s_mb_discarded;
45186 + atomic_unchecked_t s_mb_lost_chunks;
45187 + atomic_unchecked_t s_mb_preallocated;
45188 + atomic_unchecked_t s_mb_discarded;
45189 atomic_t s_lock_busy;
45190
45191 /* locality groups */
45192 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
45193 index cb990b2..4820141 100644
45194 --- a/fs/ext4/mballoc.c
45195 +++ b/fs/ext4/mballoc.c
45196 @@ -1794,7 +1794,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
45197 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
45198
45199 if (EXT4_SB(sb)->s_mb_stats)
45200 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
45201 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
45202
45203 break;
45204 }
45205 @@ -2088,7 +2088,7 @@ repeat:
45206 ac->ac_status = AC_STATUS_CONTINUE;
45207 ac->ac_flags |= EXT4_MB_HINT_FIRST;
45208 cr = 3;
45209 - atomic_inc(&sbi->s_mb_lost_chunks);
45210 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
45211 goto repeat;
45212 }
45213 }
45214 @@ -2592,25 +2592,25 @@ int ext4_mb_release(struct super_block *sb)
45215 if (sbi->s_mb_stats) {
45216 ext4_msg(sb, KERN_INFO,
45217 "mballoc: %u blocks %u reqs (%u success)",
45218 - atomic_read(&sbi->s_bal_allocated),
45219 - atomic_read(&sbi->s_bal_reqs),
45220 - atomic_read(&sbi->s_bal_success));
45221 + atomic_read_unchecked(&sbi->s_bal_allocated),
45222 + atomic_read_unchecked(&sbi->s_bal_reqs),
45223 + atomic_read_unchecked(&sbi->s_bal_success));
45224 ext4_msg(sb, KERN_INFO,
45225 "mballoc: %u extents scanned, %u goal hits, "
45226 "%u 2^N hits, %u breaks, %u lost",
45227 - atomic_read(&sbi->s_bal_ex_scanned),
45228 - atomic_read(&sbi->s_bal_goals),
45229 - atomic_read(&sbi->s_bal_2orders),
45230 - atomic_read(&sbi->s_bal_breaks),
45231 - atomic_read(&sbi->s_mb_lost_chunks));
45232 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
45233 + atomic_read_unchecked(&sbi->s_bal_goals),
45234 + atomic_read_unchecked(&sbi->s_bal_2orders),
45235 + atomic_read_unchecked(&sbi->s_bal_breaks),
45236 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
45237 ext4_msg(sb, KERN_INFO,
45238 "mballoc: %lu generated and it took %Lu",
45239 sbi->s_mb_buddies_generated,
45240 sbi->s_mb_generation_time);
45241 ext4_msg(sb, KERN_INFO,
45242 "mballoc: %u preallocated, %u discarded",
45243 - atomic_read(&sbi->s_mb_preallocated),
45244 - atomic_read(&sbi->s_mb_discarded));
45245 + atomic_read_unchecked(&sbi->s_mb_preallocated),
45246 + atomic_read_unchecked(&sbi->s_mb_discarded));
45247 }
45248
45249 free_percpu(sbi->s_locality_groups);
45250 @@ -3096,16 +3096,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
45251 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
45252
45253 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
45254 - atomic_inc(&sbi->s_bal_reqs);
45255 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
45256 + atomic_inc_unchecked(&sbi->s_bal_reqs);
45257 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
45258 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
45259 - atomic_inc(&sbi->s_bal_success);
45260 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
45261 + atomic_inc_unchecked(&sbi->s_bal_success);
45262 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
45263 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
45264 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
45265 - atomic_inc(&sbi->s_bal_goals);
45266 + atomic_inc_unchecked(&sbi->s_bal_goals);
45267 if (ac->ac_found > sbi->s_mb_max_to_scan)
45268 - atomic_inc(&sbi->s_bal_breaks);
45269 + atomic_inc_unchecked(&sbi->s_bal_breaks);
45270 }
45271
45272 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
45273 @@ -3509,7 +3509,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
45274 trace_ext4_mb_new_inode_pa(ac, pa);
45275
45276 ext4_mb_use_inode_pa(ac, pa);
45277 - atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
45278 + atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
45279
45280 ei = EXT4_I(ac->ac_inode);
45281 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
45282 @@ -3569,7 +3569,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
45283 trace_ext4_mb_new_group_pa(ac, pa);
45284
45285 ext4_mb_use_group_pa(ac, pa);
45286 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
45287 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
45288
45289 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
45290 lg = ac->ac_lg;
45291 @@ -3658,7 +3658,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
45292 * from the bitmap and continue.
45293 */
45294 }
45295 - atomic_add(free, &sbi->s_mb_discarded);
45296 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
45297
45298 return err;
45299 }
45300 @@ -3676,7 +3676,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
45301 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
45302 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
45303 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
45304 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
45305 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
45306 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
45307
45308 return 0;
45309 diff --git a/fs/fcntl.c b/fs/fcntl.c
45310 index 22764c7..86372c9 100644
45311 --- a/fs/fcntl.c
45312 +++ b/fs/fcntl.c
45313 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
45314 if (err)
45315 return err;
45316
45317 + if (gr_handle_chroot_fowner(pid, type))
45318 + return -ENOENT;
45319 + if (gr_check_protected_task_fowner(pid, type))
45320 + return -EACCES;
45321 +
45322 f_modown(filp, pid, type, force);
45323 return 0;
45324 }
45325 @@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
45326
45327 static int f_setown_ex(struct file *filp, unsigned long arg)
45328 {
45329 - struct f_owner_ex * __user owner_p = (void * __user)arg;
45330 + struct f_owner_ex __user *owner_p = (void __user *)arg;
45331 struct f_owner_ex owner;
45332 struct pid *pid;
45333 int type;
45334 @@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
45335
45336 static int f_getown_ex(struct file *filp, unsigned long arg)
45337 {
45338 - struct f_owner_ex * __user owner_p = (void * __user)arg;
45339 + struct f_owner_ex __user *owner_p = (void __user *)arg;
45340 struct f_owner_ex owner;
45341 int ret = 0;
45342
45343 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
45344 switch (cmd) {
45345 case F_DUPFD:
45346 case F_DUPFD_CLOEXEC:
45347 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
45348 if (arg >= rlimit(RLIMIT_NOFILE))
45349 break;
45350 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
45351 diff --git a/fs/fifo.c b/fs/fifo.c
45352 index b1a524d..4ee270e 100644
45353 --- a/fs/fifo.c
45354 +++ b/fs/fifo.c
45355 @@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
45356 */
45357 filp->f_op = &read_pipefifo_fops;
45358 pipe->r_counter++;
45359 - if (pipe->readers++ == 0)
45360 + if (atomic_inc_return(&pipe->readers) == 1)
45361 wake_up_partner(inode);
45362
45363 - if (!pipe->writers) {
45364 + if (!atomic_read(&pipe->writers)) {
45365 if ((filp->f_flags & O_NONBLOCK)) {
45366 /* suppress POLLHUP until we have
45367 * seen a writer */
45368 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
45369 * errno=ENXIO when there is no process reading the FIFO.
45370 */
45371 ret = -ENXIO;
45372 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
45373 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
45374 goto err;
45375
45376 filp->f_op = &write_pipefifo_fops;
45377 pipe->w_counter++;
45378 - if (!pipe->writers++)
45379 + if (atomic_inc_return(&pipe->writers) == 1)
45380 wake_up_partner(inode);
45381
45382 - if (!pipe->readers) {
45383 + if (!atomic_read(&pipe->readers)) {
45384 wait_for_partner(inode, &pipe->r_counter);
45385 if (signal_pending(current))
45386 goto err_wr;
45387 @@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
45388 */
45389 filp->f_op = &rdwr_pipefifo_fops;
45390
45391 - pipe->readers++;
45392 - pipe->writers++;
45393 + atomic_inc(&pipe->readers);
45394 + atomic_inc(&pipe->writers);
45395 pipe->r_counter++;
45396 pipe->w_counter++;
45397 - if (pipe->readers == 1 || pipe->writers == 1)
45398 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
45399 wake_up_partner(inode);
45400 break;
45401
45402 @@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
45403 return 0;
45404
45405 err_rd:
45406 - if (!--pipe->readers)
45407 + if (atomic_dec_and_test(&pipe->readers))
45408 wake_up_interruptible(&pipe->wait);
45409 ret = -ERESTARTSYS;
45410 goto err;
45411
45412 err_wr:
45413 - if (!--pipe->writers)
45414 + if (atomic_dec_and_test(&pipe->writers))
45415 wake_up_interruptible(&pipe->wait);
45416 ret = -ERESTARTSYS;
45417 goto err;
45418
45419 err:
45420 - if (!pipe->readers && !pipe->writers)
45421 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
45422 free_pipe_info(inode);
45423
45424 err_nocleanup:
45425 diff --git a/fs/file.c b/fs/file.c
45426 index 4c6992d..104cdea 100644
45427 --- a/fs/file.c
45428 +++ b/fs/file.c
45429 @@ -15,6 +15,7 @@
45430 #include <linux/slab.h>
45431 #include <linux/vmalloc.h>
45432 #include <linux/file.h>
45433 +#include <linux/security.h>
45434 #include <linux/fdtable.h>
45435 #include <linux/bitops.h>
45436 #include <linux/interrupt.h>
45437 @@ -254,6 +255,7 @@ int expand_files(struct files_struct *files, int nr)
45438 * N.B. For clone tasks sharing a files structure, this test
45439 * will limit the total number of files that can be opened.
45440 */
45441 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
45442 if (nr >= rlimit(RLIMIT_NOFILE))
45443 return -EMFILE;
45444
45445 diff --git a/fs/filesystems.c b/fs/filesystems.c
45446 index 96f2428..f5eeb8e 100644
45447 --- a/fs/filesystems.c
45448 +++ b/fs/filesystems.c
45449 @@ -273,7 +273,12 @@ struct file_system_type *get_fs_type(const char *name)
45450 int len = dot ? dot - name : strlen(name);
45451
45452 fs = __get_fs_type(name, len);
45453 +
45454 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
45455 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
45456 +#else
45457 if (!fs && (request_module("%.*s", len, name) == 0))
45458 +#endif
45459 fs = __get_fs_type(name, len);
45460
45461 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
45462 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
45463 index 78b519c..a8b4979 100644
45464 --- a/fs/fs_struct.c
45465 +++ b/fs/fs_struct.c
45466 @@ -4,6 +4,7 @@
45467 #include <linux/path.h>
45468 #include <linux/slab.h>
45469 #include <linux/fs_struct.h>
45470 +#include <linux/grsecurity.h>
45471 #include "internal.h"
45472
45473 static inline void path_get_longterm(struct path *path)
45474 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
45475 old_root = fs->root;
45476 fs->root = *path;
45477 path_get_longterm(path);
45478 + gr_set_chroot_entries(current, path);
45479 write_seqcount_end(&fs->seq);
45480 spin_unlock(&fs->lock);
45481 if (old_root.dentry)
45482 @@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
45483 && fs->root.mnt == old_root->mnt) {
45484 path_get_longterm(new_root);
45485 fs->root = *new_root;
45486 + gr_set_chroot_entries(p, new_root);
45487 count++;
45488 }
45489 if (fs->pwd.dentry == old_root->dentry
45490 @@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
45491 spin_lock(&fs->lock);
45492 write_seqcount_begin(&fs->seq);
45493 tsk->fs = NULL;
45494 - kill = !--fs->users;
45495 + gr_clear_chroot_entries(tsk);
45496 + kill = !atomic_dec_return(&fs->users);
45497 write_seqcount_end(&fs->seq);
45498 spin_unlock(&fs->lock);
45499 task_unlock(tsk);
45500 @@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
45501 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
45502 /* We don't need to lock fs - think why ;-) */
45503 if (fs) {
45504 - fs->users = 1;
45505 + atomic_set(&fs->users, 1);
45506 fs->in_exec = 0;
45507 spin_lock_init(&fs->lock);
45508 seqcount_init(&fs->seq);
45509 @@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
45510 spin_lock(&old->lock);
45511 fs->root = old->root;
45512 path_get_longterm(&fs->root);
45513 + /* instead of calling gr_set_chroot_entries here,
45514 + we call it from every caller of this function
45515 + */
45516 fs->pwd = old->pwd;
45517 path_get_longterm(&fs->pwd);
45518 spin_unlock(&old->lock);
45519 @@ -150,8 +157,9 @@ int unshare_fs_struct(void)
45520
45521 task_lock(current);
45522 spin_lock(&fs->lock);
45523 - kill = !--fs->users;
45524 + kill = !atomic_dec_return(&fs->users);
45525 current->fs = new_fs;
45526 + gr_set_chroot_entries(current, &new_fs->root);
45527 spin_unlock(&fs->lock);
45528 task_unlock(current);
45529
45530 @@ -164,13 +172,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
45531
45532 int current_umask(void)
45533 {
45534 - return current->fs->umask;
45535 + return current->fs->umask | gr_acl_umask();
45536 }
45537 EXPORT_SYMBOL(current_umask);
45538
45539 /* to be mentioned only in INIT_TASK */
45540 struct fs_struct init_fs = {
45541 - .users = 1,
45542 + .users = ATOMIC_INIT(1),
45543 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
45544 .seq = SEQCNT_ZERO,
45545 .umask = 0022,
45546 @@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
45547 task_lock(current);
45548
45549 spin_lock(&init_fs.lock);
45550 - init_fs.users++;
45551 + atomic_inc(&init_fs.users);
45552 spin_unlock(&init_fs.lock);
45553
45554 spin_lock(&fs->lock);
45555 current->fs = &init_fs;
45556 - kill = !--fs->users;
45557 + gr_set_chroot_entries(current, &current->fs->root);
45558 + kill = !atomic_dec_return(&fs->users);
45559 spin_unlock(&fs->lock);
45560
45561 task_unlock(current);
45562 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
45563 index 9905350..02eaec4 100644
45564 --- a/fs/fscache/cookie.c
45565 +++ b/fs/fscache/cookie.c
45566 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
45567 parent ? (char *) parent->def->name : "<no-parent>",
45568 def->name, netfs_data);
45569
45570 - fscache_stat(&fscache_n_acquires);
45571 + fscache_stat_unchecked(&fscache_n_acquires);
45572
45573 /* if there's no parent cookie, then we don't create one here either */
45574 if (!parent) {
45575 - fscache_stat(&fscache_n_acquires_null);
45576 + fscache_stat_unchecked(&fscache_n_acquires_null);
45577 _leave(" [no parent]");
45578 return NULL;
45579 }
45580 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
45581 /* allocate and initialise a cookie */
45582 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
45583 if (!cookie) {
45584 - fscache_stat(&fscache_n_acquires_oom);
45585 + fscache_stat_unchecked(&fscache_n_acquires_oom);
45586 _leave(" [ENOMEM]");
45587 return NULL;
45588 }
45589 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
45590
45591 switch (cookie->def->type) {
45592 case FSCACHE_COOKIE_TYPE_INDEX:
45593 - fscache_stat(&fscache_n_cookie_index);
45594 + fscache_stat_unchecked(&fscache_n_cookie_index);
45595 break;
45596 case FSCACHE_COOKIE_TYPE_DATAFILE:
45597 - fscache_stat(&fscache_n_cookie_data);
45598 + fscache_stat_unchecked(&fscache_n_cookie_data);
45599 break;
45600 default:
45601 - fscache_stat(&fscache_n_cookie_special);
45602 + fscache_stat_unchecked(&fscache_n_cookie_special);
45603 break;
45604 }
45605
45606 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
45607 if (fscache_acquire_non_index_cookie(cookie) < 0) {
45608 atomic_dec(&parent->n_children);
45609 __fscache_cookie_put(cookie);
45610 - fscache_stat(&fscache_n_acquires_nobufs);
45611 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
45612 _leave(" = NULL");
45613 return NULL;
45614 }
45615 }
45616
45617 - fscache_stat(&fscache_n_acquires_ok);
45618 + fscache_stat_unchecked(&fscache_n_acquires_ok);
45619 _leave(" = %p", cookie);
45620 return cookie;
45621 }
45622 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
45623 cache = fscache_select_cache_for_object(cookie->parent);
45624 if (!cache) {
45625 up_read(&fscache_addremove_sem);
45626 - fscache_stat(&fscache_n_acquires_no_cache);
45627 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
45628 _leave(" = -ENOMEDIUM [no cache]");
45629 return -ENOMEDIUM;
45630 }
45631 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
45632 object = cache->ops->alloc_object(cache, cookie);
45633 fscache_stat_d(&fscache_n_cop_alloc_object);
45634 if (IS_ERR(object)) {
45635 - fscache_stat(&fscache_n_object_no_alloc);
45636 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
45637 ret = PTR_ERR(object);
45638 goto error;
45639 }
45640
45641 - fscache_stat(&fscache_n_object_alloc);
45642 + fscache_stat_unchecked(&fscache_n_object_alloc);
45643
45644 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
45645
45646 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
45647 struct fscache_object *object;
45648 struct hlist_node *_p;
45649
45650 - fscache_stat(&fscache_n_updates);
45651 + fscache_stat_unchecked(&fscache_n_updates);
45652
45653 if (!cookie) {
45654 - fscache_stat(&fscache_n_updates_null);
45655 + fscache_stat_unchecked(&fscache_n_updates_null);
45656 _leave(" [no cookie]");
45657 return;
45658 }
45659 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
45660 struct fscache_object *object;
45661 unsigned long event;
45662
45663 - fscache_stat(&fscache_n_relinquishes);
45664 + fscache_stat_unchecked(&fscache_n_relinquishes);
45665 if (retire)
45666 - fscache_stat(&fscache_n_relinquishes_retire);
45667 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
45668
45669 if (!cookie) {
45670 - fscache_stat(&fscache_n_relinquishes_null);
45671 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
45672 _leave(" [no cookie]");
45673 return;
45674 }
45675 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
45676
45677 /* wait for the cookie to finish being instantiated (or to fail) */
45678 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
45679 - fscache_stat(&fscache_n_relinquishes_waitcrt);
45680 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
45681 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
45682 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
45683 }
45684 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
45685 index f6aad48..88dcf26 100644
45686 --- a/fs/fscache/internal.h
45687 +++ b/fs/fscache/internal.h
45688 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
45689 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
45690 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
45691
45692 -extern atomic_t fscache_n_op_pend;
45693 -extern atomic_t fscache_n_op_run;
45694 -extern atomic_t fscache_n_op_enqueue;
45695 -extern atomic_t fscache_n_op_deferred_release;
45696 -extern atomic_t fscache_n_op_release;
45697 -extern atomic_t fscache_n_op_gc;
45698 -extern atomic_t fscache_n_op_cancelled;
45699 -extern atomic_t fscache_n_op_rejected;
45700 +extern atomic_unchecked_t fscache_n_op_pend;
45701 +extern atomic_unchecked_t fscache_n_op_run;
45702 +extern atomic_unchecked_t fscache_n_op_enqueue;
45703 +extern atomic_unchecked_t fscache_n_op_deferred_release;
45704 +extern atomic_unchecked_t fscache_n_op_release;
45705 +extern atomic_unchecked_t fscache_n_op_gc;
45706 +extern atomic_unchecked_t fscache_n_op_cancelled;
45707 +extern atomic_unchecked_t fscache_n_op_rejected;
45708
45709 -extern atomic_t fscache_n_attr_changed;
45710 -extern atomic_t fscache_n_attr_changed_ok;
45711 -extern atomic_t fscache_n_attr_changed_nobufs;
45712 -extern atomic_t fscache_n_attr_changed_nomem;
45713 -extern atomic_t fscache_n_attr_changed_calls;
45714 +extern atomic_unchecked_t fscache_n_attr_changed;
45715 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
45716 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
45717 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
45718 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
45719
45720 -extern atomic_t fscache_n_allocs;
45721 -extern atomic_t fscache_n_allocs_ok;
45722 -extern atomic_t fscache_n_allocs_wait;
45723 -extern atomic_t fscache_n_allocs_nobufs;
45724 -extern atomic_t fscache_n_allocs_intr;
45725 -extern atomic_t fscache_n_allocs_object_dead;
45726 -extern atomic_t fscache_n_alloc_ops;
45727 -extern atomic_t fscache_n_alloc_op_waits;
45728 +extern atomic_unchecked_t fscache_n_allocs;
45729 +extern atomic_unchecked_t fscache_n_allocs_ok;
45730 +extern atomic_unchecked_t fscache_n_allocs_wait;
45731 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
45732 +extern atomic_unchecked_t fscache_n_allocs_intr;
45733 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
45734 +extern atomic_unchecked_t fscache_n_alloc_ops;
45735 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
45736
45737 -extern atomic_t fscache_n_retrievals;
45738 -extern atomic_t fscache_n_retrievals_ok;
45739 -extern atomic_t fscache_n_retrievals_wait;
45740 -extern atomic_t fscache_n_retrievals_nodata;
45741 -extern atomic_t fscache_n_retrievals_nobufs;
45742 -extern atomic_t fscache_n_retrievals_intr;
45743 -extern atomic_t fscache_n_retrievals_nomem;
45744 -extern atomic_t fscache_n_retrievals_object_dead;
45745 -extern atomic_t fscache_n_retrieval_ops;
45746 -extern atomic_t fscache_n_retrieval_op_waits;
45747 +extern atomic_unchecked_t fscache_n_retrievals;
45748 +extern atomic_unchecked_t fscache_n_retrievals_ok;
45749 +extern atomic_unchecked_t fscache_n_retrievals_wait;
45750 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
45751 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
45752 +extern atomic_unchecked_t fscache_n_retrievals_intr;
45753 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
45754 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
45755 +extern atomic_unchecked_t fscache_n_retrieval_ops;
45756 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
45757
45758 -extern atomic_t fscache_n_stores;
45759 -extern atomic_t fscache_n_stores_ok;
45760 -extern atomic_t fscache_n_stores_again;
45761 -extern atomic_t fscache_n_stores_nobufs;
45762 -extern atomic_t fscache_n_stores_oom;
45763 -extern atomic_t fscache_n_store_ops;
45764 -extern atomic_t fscache_n_store_calls;
45765 -extern atomic_t fscache_n_store_pages;
45766 -extern atomic_t fscache_n_store_radix_deletes;
45767 -extern atomic_t fscache_n_store_pages_over_limit;
45768 +extern atomic_unchecked_t fscache_n_stores;
45769 +extern atomic_unchecked_t fscache_n_stores_ok;
45770 +extern atomic_unchecked_t fscache_n_stores_again;
45771 +extern atomic_unchecked_t fscache_n_stores_nobufs;
45772 +extern atomic_unchecked_t fscache_n_stores_oom;
45773 +extern atomic_unchecked_t fscache_n_store_ops;
45774 +extern atomic_unchecked_t fscache_n_store_calls;
45775 +extern atomic_unchecked_t fscache_n_store_pages;
45776 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
45777 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
45778
45779 -extern atomic_t fscache_n_store_vmscan_not_storing;
45780 -extern atomic_t fscache_n_store_vmscan_gone;
45781 -extern atomic_t fscache_n_store_vmscan_busy;
45782 -extern atomic_t fscache_n_store_vmscan_cancelled;
45783 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
45784 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
45785 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
45786 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
45787
45788 -extern atomic_t fscache_n_marks;
45789 -extern atomic_t fscache_n_uncaches;
45790 +extern atomic_unchecked_t fscache_n_marks;
45791 +extern atomic_unchecked_t fscache_n_uncaches;
45792
45793 -extern atomic_t fscache_n_acquires;
45794 -extern atomic_t fscache_n_acquires_null;
45795 -extern atomic_t fscache_n_acquires_no_cache;
45796 -extern atomic_t fscache_n_acquires_ok;
45797 -extern atomic_t fscache_n_acquires_nobufs;
45798 -extern atomic_t fscache_n_acquires_oom;
45799 +extern atomic_unchecked_t fscache_n_acquires;
45800 +extern atomic_unchecked_t fscache_n_acquires_null;
45801 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
45802 +extern atomic_unchecked_t fscache_n_acquires_ok;
45803 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
45804 +extern atomic_unchecked_t fscache_n_acquires_oom;
45805
45806 -extern atomic_t fscache_n_updates;
45807 -extern atomic_t fscache_n_updates_null;
45808 -extern atomic_t fscache_n_updates_run;
45809 +extern atomic_unchecked_t fscache_n_updates;
45810 +extern atomic_unchecked_t fscache_n_updates_null;
45811 +extern atomic_unchecked_t fscache_n_updates_run;
45812
45813 -extern atomic_t fscache_n_relinquishes;
45814 -extern atomic_t fscache_n_relinquishes_null;
45815 -extern atomic_t fscache_n_relinquishes_waitcrt;
45816 -extern atomic_t fscache_n_relinquishes_retire;
45817 +extern atomic_unchecked_t fscache_n_relinquishes;
45818 +extern atomic_unchecked_t fscache_n_relinquishes_null;
45819 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
45820 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
45821
45822 -extern atomic_t fscache_n_cookie_index;
45823 -extern atomic_t fscache_n_cookie_data;
45824 -extern atomic_t fscache_n_cookie_special;
45825 +extern atomic_unchecked_t fscache_n_cookie_index;
45826 +extern atomic_unchecked_t fscache_n_cookie_data;
45827 +extern atomic_unchecked_t fscache_n_cookie_special;
45828
45829 -extern atomic_t fscache_n_object_alloc;
45830 -extern atomic_t fscache_n_object_no_alloc;
45831 -extern atomic_t fscache_n_object_lookups;
45832 -extern atomic_t fscache_n_object_lookups_negative;
45833 -extern atomic_t fscache_n_object_lookups_positive;
45834 -extern atomic_t fscache_n_object_lookups_timed_out;
45835 -extern atomic_t fscache_n_object_created;
45836 -extern atomic_t fscache_n_object_avail;
45837 -extern atomic_t fscache_n_object_dead;
45838 +extern atomic_unchecked_t fscache_n_object_alloc;
45839 +extern atomic_unchecked_t fscache_n_object_no_alloc;
45840 +extern atomic_unchecked_t fscache_n_object_lookups;
45841 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
45842 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
45843 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
45844 +extern atomic_unchecked_t fscache_n_object_created;
45845 +extern atomic_unchecked_t fscache_n_object_avail;
45846 +extern atomic_unchecked_t fscache_n_object_dead;
45847
45848 -extern atomic_t fscache_n_checkaux_none;
45849 -extern atomic_t fscache_n_checkaux_okay;
45850 -extern atomic_t fscache_n_checkaux_update;
45851 -extern atomic_t fscache_n_checkaux_obsolete;
45852 +extern atomic_unchecked_t fscache_n_checkaux_none;
45853 +extern atomic_unchecked_t fscache_n_checkaux_okay;
45854 +extern atomic_unchecked_t fscache_n_checkaux_update;
45855 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
45856
45857 extern atomic_t fscache_n_cop_alloc_object;
45858 extern atomic_t fscache_n_cop_lookup_object;
45859 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
45860 atomic_inc(stat);
45861 }
45862
45863 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
45864 +{
45865 + atomic_inc_unchecked(stat);
45866 +}
45867 +
45868 static inline void fscache_stat_d(atomic_t *stat)
45869 {
45870 atomic_dec(stat);
45871 @@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
45872
45873 #define __fscache_stat(stat) (NULL)
45874 #define fscache_stat(stat) do {} while (0)
45875 +#define fscache_stat_unchecked(stat) do {} while (0)
45876 #define fscache_stat_d(stat) do {} while (0)
45877 #endif
45878
45879 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
45880 index b6b897c..0ffff9c 100644
45881 --- a/fs/fscache/object.c
45882 +++ b/fs/fscache/object.c
45883 @@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
45884 /* update the object metadata on disk */
45885 case FSCACHE_OBJECT_UPDATING:
45886 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
45887 - fscache_stat(&fscache_n_updates_run);
45888 + fscache_stat_unchecked(&fscache_n_updates_run);
45889 fscache_stat(&fscache_n_cop_update_object);
45890 object->cache->ops->update_object(object);
45891 fscache_stat_d(&fscache_n_cop_update_object);
45892 @@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
45893 spin_lock(&object->lock);
45894 object->state = FSCACHE_OBJECT_DEAD;
45895 spin_unlock(&object->lock);
45896 - fscache_stat(&fscache_n_object_dead);
45897 + fscache_stat_unchecked(&fscache_n_object_dead);
45898 goto terminal_transit;
45899
45900 /* handle the parent cache of this object being withdrawn from
45901 @@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
45902 spin_lock(&object->lock);
45903 object->state = FSCACHE_OBJECT_DEAD;
45904 spin_unlock(&object->lock);
45905 - fscache_stat(&fscache_n_object_dead);
45906 + fscache_stat_unchecked(&fscache_n_object_dead);
45907 goto terminal_transit;
45908
45909 /* complain about the object being woken up once it is
45910 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
45911 parent->cookie->def->name, cookie->def->name,
45912 object->cache->tag->name);
45913
45914 - fscache_stat(&fscache_n_object_lookups);
45915 + fscache_stat_unchecked(&fscache_n_object_lookups);
45916 fscache_stat(&fscache_n_cop_lookup_object);
45917 ret = object->cache->ops->lookup_object(object);
45918 fscache_stat_d(&fscache_n_cop_lookup_object);
45919 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
45920 if (ret == -ETIMEDOUT) {
45921 /* probably stuck behind another object, so move this one to
45922 * the back of the queue */
45923 - fscache_stat(&fscache_n_object_lookups_timed_out);
45924 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
45925 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
45926 }
45927
45928 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
45929
45930 spin_lock(&object->lock);
45931 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
45932 - fscache_stat(&fscache_n_object_lookups_negative);
45933 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
45934
45935 /* transit here to allow write requests to begin stacking up
45936 * and read requests to begin returning ENODATA */
45937 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
45938 * result, in which case there may be data available */
45939 spin_lock(&object->lock);
45940 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
45941 - fscache_stat(&fscache_n_object_lookups_positive);
45942 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
45943
45944 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
45945
45946 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
45947 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
45948 } else {
45949 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
45950 - fscache_stat(&fscache_n_object_created);
45951 + fscache_stat_unchecked(&fscache_n_object_created);
45952
45953 object->state = FSCACHE_OBJECT_AVAILABLE;
45954 spin_unlock(&object->lock);
45955 @@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
45956 fscache_enqueue_dependents(object);
45957
45958 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
45959 - fscache_stat(&fscache_n_object_avail);
45960 + fscache_stat_unchecked(&fscache_n_object_avail);
45961
45962 _leave("");
45963 }
45964 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
45965 enum fscache_checkaux result;
45966
45967 if (!object->cookie->def->check_aux) {
45968 - fscache_stat(&fscache_n_checkaux_none);
45969 + fscache_stat_unchecked(&fscache_n_checkaux_none);
45970 return FSCACHE_CHECKAUX_OKAY;
45971 }
45972
45973 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
45974 switch (result) {
45975 /* entry okay as is */
45976 case FSCACHE_CHECKAUX_OKAY:
45977 - fscache_stat(&fscache_n_checkaux_okay);
45978 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
45979 break;
45980
45981 /* entry requires update */
45982 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
45983 - fscache_stat(&fscache_n_checkaux_update);
45984 + fscache_stat_unchecked(&fscache_n_checkaux_update);
45985 break;
45986
45987 /* entry requires deletion */
45988 case FSCACHE_CHECKAUX_OBSOLETE:
45989 - fscache_stat(&fscache_n_checkaux_obsolete);
45990 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
45991 break;
45992
45993 default:
45994 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
45995 index 30afdfa..2256596 100644
45996 --- a/fs/fscache/operation.c
45997 +++ b/fs/fscache/operation.c
45998 @@ -17,7 +17,7 @@
45999 #include <linux/slab.h>
46000 #include "internal.h"
46001
46002 -atomic_t fscache_op_debug_id;
46003 +atomic_unchecked_t fscache_op_debug_id;
46004 EXPORT_SYMBOL(fscache_op_debug_id);
46005
46006 /**
46007 @@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
46008 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
46009 ASSERTCMP(atomic_read(&op->usage), >, 0);
46010
46011 - fscache_stat(&fscache_n_op_enqueue);
46012 + fscache_stat_unchecked(&fscache_n_op_enqueue);
46013 switch (op->flags & FSCACHE_OP_TYPE) {
46014 case FSCACHE_OP_ASYNC:
46015 _debug("queue async");
46016 @@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
46017 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
46018 if (op->processor)
46019 fscache_enqueue_operation(op);
46020 - fscache_stat(&fscache_n_op_run);
46021 + fscache_stat_unchecked(&fscache_n_op_run);
46022 }
46023
46024 /*
46025 @@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
46026 if (object->n_ops > 1) {
46027 atomic_inc(&op->usage);
46028 list_add_tail(&op->pend_link, &object->pending_ops);
46029 - fscache_stat(&fscache_n_op_pend);
46030 + fscache_stat_unchecked(&fscache_n_op_pend);
46031 } else if (!list_empty(&object->pending_ops)) {
46032 atomic_inc(&op->usage);
46033 list_add_tail(&op->pend_link, &object->pending_ops);
46034 - fscache_stat(&fscache_n_op_pend);
46035 + fscache_stat_unchecked(&fscache_n_op_pend);
46036 fscache_start_operations(object);
46037 } else {
46038 ASSERTCMP(object->n_in_progress, ==, 0);
46039 @@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
46040 object->n_exclusive++; /* reads and writes must wait */
46041 atomic_inc(&op->usage);
46042 list_add_tail(&op->pend_link, &object->pending_ops);
46043 - fscache_stat(&fscache_n_op_pend);
46044 + fscache_stat_unchecked(&fscache_n_op_pend);
46045 ret = 0;
46046 } else {
46047 /* not allowed to submit ops in any other state */
46048 @@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
46049 if (object->n_exclusive > 0) {
46050 atomic_inc(&op->usage);
46051 list_add_tail(&op->pend_link, &object->pending_ops);
46052 - fscache_stat(&fscache_n_op_pend);
46053 + fscache_stat_unchecked(&fscache_n_op_pend);
46054 } else if (!list_empty(&object->pending_ops)) {
46055 atomic_inc(&op->usage);
46056 list_add_tail(&op->pend_link, &object->pending_ops);
46057 - fscache_stat(&fscache_n_op_pend);
46058 + fscache_stat_unchecked(&fscache_n_op_pend);
46059 fscache_start_operations(object);
46060 } else {
46061 ASSERTCMP(object->n_exclusive, ==, 0);
46062 @@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
46063 object->n_ops++;
46064 atomic_inc(&op->usage);
46065 list_add_tail(&op->pend_link, &object->pending_ops);
46066 - fscache_stat(&fscache_n_op_pend);
46067 + fscache_stat_unchecked(&fscache_n_op_pend);
46068 ret = 0;
46069 } else if (object->state == FSCACHE_OBJECT_DYING ||
46070 object->state == FSCACHE_OBJECT_LC_DYING ||
46071 object->state == FSCACHE_OBJECT_WITHDRAWING) {
46072 - fscache_stat(&fscache_n_op_rejected);
46073 + fscache_stat_unchecked(&fscache_n_op_rejected);
46074 ret = -ENOBUFS;
46075 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
46076 fscache_report_unexpected_submission(object, op, ostate);
46077 @@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
46078
46079 ret = -EBUSY;
46080 if (!list_empty(&op->pend_link)) {
46081 - fscache_stat(&fscache_n_op_cancelled);
46082 + fscache_stat_unchecked(&fscache_n_op_cancelled);
46083 list_del_init(&op->pend_link);
46084 object->n_ops--;
46085 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
46086 @@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
46087 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
46088 BUG();
46089
46090 - fscache_stat(&fscache_n_op_release);
46091 + fscache_stat_unchecked(&fscache_n_op_release);
46092
46093 if (op->release) {
46094 op->release(op);
46095 @@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
46096 * lock, and defer it otherwise */
46097 if (!spin_trylock(&object->lock)) {
46098 _debug("defer put");
46099 - fscache_stat(&fscache_n_op_deferred_release);
46100 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
46101
46102 cache = object->cache;
46103 spin_lock(&cache->op_gc_list_lock);
46104 @@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
46105
46106 _debug("GC DEFERRED REL OBJ%x OP%x",
46107 object->debug_id, op->debug_id);
46108 - fscache_stat(&fscache_n_op_gc);
46109 + fscache_stat_unchecked(&fscache_n_op_gc);
46110
46111 ASSERTCMP(atomic_read(&op->usage), ==, 0);
46112
46113 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
46114 index 3f7a59b..cf196cc 100644
46115 --- a/fs/fscache/page.c
46116 +++ b/fs/fscache/page.c
46117 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
46118 val = radix_tree_lookup(&cookie->stores, page->index);
46119 if (!val) {
46120 rcu_read_unlock();
46121 - fscache_stat(&fscache_n_store_vmscan_not_storing);
46122 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
46123 __fscache_uncache_page(cookie, page);
46124 return true;
46125 }
46126 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
46127 spin_unlock(&cookie->stores_lock);
46128
46129 if (xpage) {
46130 - fscache_stat(&fscache_n_store_vmscan_cancelled);
46131 - fscache_stat(&fscache_n_store_radix_deletes);
46132 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
46133 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
46134 ASSERTCMP(xpage, ==, page);
46135 } else {
46136 - fscache_stat(&fscache_n_store_vmscan_gone);
46137 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
46138 }
46139
46140 wake_up_bit(&cookie->flags, 0);
46141 @@ -107,7 +107,7 @@ page_busy:
46142 /* we might want to wait here, but that could deadlock the allocator as
46143 * the work threads writing to the cache may all end up sleeping
46144 * on memory allocation */
46145 - fscache_stat(&fscache_n_store_vmscan_busy);
46146 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
46147 return false;
46148 }
46149 EXPORT_SYMBOL(__fscache_maybe_release_page);
46150 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
46151 FSCACHE_COOKIE_STORING_TAG);
46152 if (!radix_tree_tag_get(&cookie->stores, page->index,
46153 FSCACHE_COOKIE_PENDING_TAG)) {
46154 - fscache_stat(&fscache_n_store_radix_deletes);
46155 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
46156 xpage = radix_tree_delete(&cookie->stores, page->index);
46157 }
46158 spin_unlock(&cookie->stores_lock);
46159 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
46160
46161 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
46162
46163 - fscache_stat(&fscache_n_attr_changed_calls);
46164 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
46165
46166 if (fscache_object_is_active(object)) {
46167 fscache_stat(&fscache_n_cop_attr_changed);
46168 @@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
46169
46170 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
46171
46172 - fscache_stat(&fscache_n_attr_changed);
46173 + fscache_stat_unchecked(&fscache_n_attr_changed);
46174
46175 op = kzalloc(sizeof(*op), GFP_KERNEL);
46176 if (!op) {
46177 - fscache_stat(&fscache_n_attr_changed_nomem);
46178 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
46179 _leave(" = -ENOMEM");
46180 return -ENOMEM;
46181 }
46182 @@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
46183 if (fscache_submit_exclusive_op(object, op) < 0)
46184 goto nobufs;
46185 spin_unlock(&cookie->lock);
46186 - fscache_stat(&fscache_n_attr_changed_ok);
46187 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
46188 fscache_put_operation(op);
46189 _leave(" = 0");
46190 return 0;
46191 @@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
46192 nobufs:
46193 spin_unlock(&cookie->lock);
46194 kfree(op);
46195 - fscache_stat(&fscache_n_attr_changed_nobufs);
46196 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
46197 _leave(" = %d", -ENOBUFS);
46198 return -ENOBUFS;
46199 }
46200 @@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
46201 /* allocate a retrieval operation and attempt to submit it */
46202 op = kzalloc(sizeof(*op), GFP_NOIO);
46203 if (!op) {
46204 - fscache_stat(&fscache_n_retrievals_nomem);
46205 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
46206 return NULL;
46207 }
46208
46209 @@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
46210 return 0;
46211 }
46212
46213 - fscache_stat(&fscache_n_retrievals_wait);
46214 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
46215
46216 jif = jiffies;
46217 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
46218 fscache_wait_bit_interruptible,
46219 TASK_INTERRUPTIBLE) != 0) {
46220 - fscache_stat(&fscache_n_retrievals_intr);
46221 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
46222 _leave(" = -ERESTARTSYS");
46223 return -ERESTARTSYS;
46224 }
46225 @@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
46226 */
46227 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
46228 struct fscache_retrieval *op,
46229 - atomic_t *stat_op_waits,
46230 - atomic_t *stat_object_dead)
46231 + atomic_unchecked_t *stat_op_waits,
46232 + atomic_unchecked_t *stat_object_dead)
46233 {
46234 int ret;
46235
46236 @@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
46237 goto check_if_dead;
46238
46239 _debug(">>> WT");
46240 - fscache_stat(stat_op_waits);
46241 + fscache_stat_unchecked(stat_op_waits);
46242 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
46243 fscache_wait_bit_interruptible,
46244 TASK_INTERRUPTIBLE) < 0) {
46245 @@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
46246
46247 check_if_dead:
46248 if (unlikely(fscache_object_is_dead(object))) {
46249 - fscache_stat(stat_object_dead);
46250 + fscache_stat_unchecked(stat_object_dead);
46251 return -ENOBUFS;
46252 }
46253 return 0;
46254 @@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
46255
46256 _enter("%p,%p,,,", cookie, page);
46257
46258 - fscache_stat(&fscache_n_retrievals);
46259 + fscache_stat_unchecked(&fscache_n_retrievals);
46260
46261 if (hlist_empty(&cookie->backing_objects))
46262 goto nobufs;
46263 @@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
46264 goto nobufs_unlock;
46265 spin_unlock(&cookie->lock);
46266
46267 - fscache_stat(&fscache_n_retrieval_ops);
46268 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
46269
46270 /* pin the netfs read context in case we need to do the actual netfs
46271 * read because we've encountered a cache read failure */
46272 @@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
46273
46274 error:
46275 if (ret == -ENOMEM)
46276 - fscache_stat(&fscache_n_retrievals_nomem);
46277 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
46278 else if (ret == -ERESTARTSYS)
46279 - fscache_stat(&fscache_n_retrievals_intr);
46280 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
46281 else if (ret == -ENODATA)
46282 - fscache_stat(&fscache_n_retrievals_nodata);
46283 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
46284 else if (ret < 0)
46285 - fscache_stat(&fscache_n_retrievals_nobufs);
46286 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
46287 else
46288 - fscache_stat(&fscache_n_retrievals_ok);
46289 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
46290
46291 fscache_put_retrieval(op);
46292 _leave(" = %d", ret);
46293 @@ -429,7 +429,7 @@ nobufs_unlock:
46294 spin_unlock(&cookie->lock);
46295 kfree(op);
46296 nobufs:
46297 - fscache_stat(&fscache_n_retrievals_nobufs);
46298 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
46299 _leave(" = -ENOBUFS");
46300 return -ENOBUFS;
46301 }
46302 @@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
46303
46304 _enter("%p,,%d,,,", cookie, *nr_pages);
46305
46306 - fscache_stat(&fscache_n_retrievals);
46307 + fscache_stat_unchecked(&fscache_n_retrievals);
46308
46309 if (hlist_empty(&cookie->backing_objects))
46310 goto nobufs;
46311 @@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
46312 goto nobufs_unlock;
46313 spin_unlock(&cookie->lock);
46314
46315 - fscache_stat(&fscache_n_retrieval_ops);
46316 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
46317
46318 /* pin the netfs read context in case we need to do the actual netfs
46319 * read because we've encountered a cache read failure */
46320 @@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
46321
46322 error:
46323 if (ret == -ENOMEM)
46324 - fscache_stat(&fscache_n_retrievals_nomem);
46325 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
46326 else if (ret == -ERESTARTSYS)
46327 - fscache_stat(&fscache_n_retrievals_intr);
46328 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
46329 else if (ret == -ENODATA)
46330 - fscache_stat(&fscache_n_retrievals_nodata);
46331 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
46332 else if (ret < 0)
46333 - fscache_stat(&fscache_n_retrievals_nobufs);
46334 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
46335 else
46336 - fscache_stat(&fscache_n_retrievals_ok);
46337 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
46338
46339 fscache_put_retrieval(op);
46340 _leave(" = %d", ret);
46341 @@ -545,7 +545,7 @@ nobufs_unlock:
46342 spin_unlock(&cookie->lock);
46343 kfree(op);
46344 nobufs:
46345 - fscache_stat(&fscache_n_retrievals_nobufs);
46346 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
46347 _leave(" = -ENOBUFS");
46348 return -ENOBUFS;
46349 }
46350 @@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
46351
46352 _enter("%p,%p,,,", cookie, page);
46353
46354 - fscache_stat(&fscache_n_allocs);
46355 + fscache_stat_unchecked(&fscache_n_allocs);
46356
46357 if (hlist_empty(&cookie->backing_objects))
46358 goto nobufs;
46359 @@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
46360 goto nobufs_unlock;
46361 spin_unlock(&cookie->lock);
46362
46363 - fscache_stat(&fscache_n_alloc_ops);
46364 + fscache_stat_unchecked(&fscache_n_alloc_ops);
46365
46366 ret = fscache_wait_for_retrieval_activation(
46367 object, op,
46368 @@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
46369
46370 error:
46371 if (ret == -ERESTARTSYS)
46372 - fscache_stat(&fscache_n_allocs_intr);
46373 + fscache_stat_unchecked(&fscache_n_allocs_intr);
46374 else if (ret < 0)
46375 - fscache_stat(&fscache_n_allocs_nobufs);
46376 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
46377 else
46378 - fscache_stat(&fscache_n_allocs_ok);
46379 + fscache_stat_unchecked(&fscache_n_allocs_ok);
46380
46381 fscache_put_retrieval(op);
46382 _leave(" = %d", ret);
46383 @@ -625,7 +625,7 @@ nobufs_unlock:
46384 spin_unlock(&cookie->lock);
46385 kfree(op);
46386 nobufs:
46387 - fscache_stat(&fscache_n_allocs_nobufs);
46388 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
46389 _leave(" = -ENOBUFS");
46390 return -ENOBUFS;
46391 }
46392 @@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
46393
46394 spin_lock(&cookie->stores_lock);
46395
46396 - fscache_stat(&fscache_n_store_calls);
46397 + fscache_stat_unchecked(&fscache_n_store_calls);
46398
46399 /* find a page to store */
46400 page = NULL;
46401 @@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
46402 page = results[0];
46403 _debug("gang %d [%lx]", n, page->index);
46404 if (page->index > op->store_limit) {
46405 - fscache_stat(&fscache_n_store_pages_over_limit);
46406 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
46407 goto superseded;
46408 }
46409
46410 @@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
46411 spin_unlock(&cookie->stores_lock);
46412 spin_unlock(&object->lock);
46413
46414 - fscache_stat(&fscache_n_store_pages);
46415 + fscache_stat_unchecked(&fscache_n_store_pages);
46416 fscache_stat(&fscache_n_cop_write_page);
46417 ret = object->cache->ops->write_page(op, page);
46418 fscache_stat_d(&fscache_n_cop_write_page);
46419 @@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
46420 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
46421 ASSERT(PageFsCache(page));
46422
46423 - fscache_stat(&fscache_n_stores);
46424 + fscache_stat_unchecked(&fscache_n_stores);
46425
46426 op = kzalloc(sizeof(*op), GFP_NOIO);
46427 if (!op)
46428 @@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
46429 spin_unlock(&cookie->stores_lock);
46430 spin_unlock(&object->lock);
46431
46432 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
46433 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
46434 op->store_limit = object->store_limit;
46435
46436 if (fscache_submit_op(object, &op->op) < 0)
46437 @@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
46438
46439 spin_unlock(&cookie->lock);
46440 radix_tree_preload_end();
46441 - fscache_stat(&fscache_n_store_ops);
46442 - fscache_stat(&fscache_n_stores_ok);
46443 + fscache_stat_unchecked(&fscache_n_store_ops);
46444 + fscache_stat_unchecked(&fscache_n_stores_ok);
46445
46446 /* the work queue now carries its own ref on the object */
46447 fscache_put_operation(&op->op);
46448 @@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
46449 return 0;
46450
46451 already_queued:
46452 - fscache_stat(&fscache_n_stores_again);
46453 + fscache_stat_unchecked(&fscache_n_stores_again);
46454 already_pending:
46455 spin_unlock(&cookie->stores_lock);
46456 spin_unlock(&object->lock);
46457 spin_unlock(&cookie->lock);
46458 radix_tree_preload_end();
46459 kfree(op);
46460 - fscache_stat(&fscache_n_stores_ok);
46461 + fscache_stat_unchecked(&fscache_n_stores_ok);
46462 _leave(" = 0");
46463 return 0;
46464
46465 @@ -851,14 +851,14 @@ nobufs:
46466 spin_unlock(&cookie->lock);
46467 radix_tree_preload_end();
46468 kfree(op);
46469 - fscache_stat(&fscache_n_stores_nobufs);
46470 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
46471 _leave(" = -ENOBUFS");
46472 return -ENOBUFS;
46473
46474 nomem_free:
46475 kfree(op);
46476 nomem:
46477 - fscache_stat(&fscache_n_stores_oom);
46478 + fscache_stat_unchecked(&fscache_n_stores_oom);
46479 _leave(" = -ENOMEM");
46480 return -ENOMEM;
46481 }
46482 @@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
46483 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
46484 ASSERTCMP(page, !=, NULL);
46485
46486 - fscache_stat(&fscache_n_uncaches);
46487 + fscache_stat_unchecked(&fscache_n_uncaches);
46488
46489 /* cache withdrawal may beat us to it */
46490 if (!PageFsCache(page))
46491 @@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
46492 unsigned long loop;
46493
46494 #ifdef CONFIG_FSCACHE_STATS
46495 - atomic_add(pagevec->nr, &fscache_n_marks);
46496 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
46497 #endif
46498
46499 for (loop = 0; loop < pagevec->nr; loop++) {
46500 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
46501 index 4765190..2a067f2 100644
46502 --- a/fs/fscache/stats.c
46503 +++ b/fs/fscache/stats.c
46504 @@ -18,95 +18,95 @@
46505 /*
46506 * operation counters
46507 */
46508 -atomic_t fscache_n_op_pend;
46509 -atomic_t fscache_n_op_run;
46510 -atomic_t fscache_n_op_enqueue;
46511 -atomic_t fscache_n_op_requeue;
46512 -atomic_t fscache_n_op_deferred_release;
46513 -atomic_t fscache_n_op_release;
46514 -atomic_t fscache_n_op_gc;
46515 -atomic_t fscache_n_op_cancelled;
46516 -atomic_t fscache_n_op_rejected;
46517 +atomic_unchecked_t fscache_n_op_pend;
46518 +atomic_unchecked_t fscache_n_op_run;
46519 +atomic_unchecked_t fscache_n_op_enqueue;
46520 +atomic_unchecked_t fscache_n_op_requeue;
46521 +atomic_unchecked_t fscache_n_op_deferred_release;
46522 +atomic_unchecked_t fscache_n_op_release;
46523 +atomic_unchecked_t fscache_n_op_gc;
46524 +atomic_unchecked_t fscache_n_op_cancelled;
46525 +atomic_unchecked_t fscache_n_op_rejected;
46526
46527 -atomic_t fscache_n_attr_changed;
46528 -atomic_t fscache_n_attr_changed_ok;
46529 -atomic_t fscache_n_attr_changed_nobufs;
46530 -atomic_t fscache_n_attr_changed_nomem;
46531 -atomic_t fscache_n_attr_changed_calls;
46532 +atomic_unchecked_t fscache_n_attr_changed;
46533 +atomic_unchecked_t fscache_n_attr_changed_ok;
46534 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
46535 +atomic_unchecked_t fscache_n_attr_changed_nomem;
46536 +atomic_unchecked_t fscache_n_attr_changed_calls;
46537
46538 -atomic_t fscache_n_allocs;
46539 -atomic_t fscache_n_allocs_ok;
46540 -atomic_t fscache_n_allocs_wait;
46541 -atomic_t fscache_n_allocs_nobufs;
46542 -atomic_t fscache_n_allocs_intr;
46543 -atomic_t fscache_n_allocs_object_dead;
46544 -atomic_t fscache_n_alloc_ops;
46545 -atomic_t fscache_n_alloc_op_waits;
46546 +atomic_unchecked_t fscache_n_allocs;
46547 +atomic_unchecked_t fscache_n_allocs_ok;
46548 +atomic_unchecked_t fscache_n_allocs_wait;
46549 +atomic_unchecked_t fscache_n_allocs_nobufs;
46550 +atomic_unchecked_t fscache_n_allocs_intr;
46551 +atomic_unchecked_t fscache_n_allocs_object_dead;
46552 +atomic_unchecked_t fscache_n_alloc_ops;
46553 +atomic_unchecked_t fscache_n_alloc_op_waits;
46554
46555 -atomic_t fscache_n_retrievals;
46556 -atomic_t fscache_n_retrievals_ok;
46557 -atomic_t fscache_n_retrievals_wait;
46558 -atomic_t fscache_n_retrievals_nodata;
46559 -atomic_t fscache_n_retrievals_nobufs;
46560 -atomic_t fscache_n_retrievals_intr;
46561 -atomic_t fscache_n_retrievals_nomem;
46562 -atomic_t fscache_n_retrievals_object_dead;
46563 -atomic_t fscache_n_retrieval_ops;
46564 -atomic_t fscache_n_retrieval_op_waits;
46565 +atomic_unchecked_t fscache_n_retrievals;
46566 +atomic_unchecked_t fscache_n_retrievals_ok;
46567 +atomic_unchecked_t fscache_n_retrievals_wait;
46568 +atomic_unchecked_t fscache_n_retrievals_nodata;
46569 +atomic_unchecked_t fscache_n_retrievals_nobufs;
46570 +atomic_unchecked_t fscache_n_retrievals_intr;
46571 +atomic_unchecked_t fscache_n_retrievals_nomem;
46572 +atomic_unchecked_t fscache_n_retrievals_object_dead;
46573 +atomic_unchecked_t fscache_n_retrieval_ops;
46574 +atomic_unchecked_t fscache_n_retrieval_op_waits;
46575
46576 -atomic_t fscache_n_stores;
46577 -atomic_t fscache_n_stores_ok;
46578 -atomic_t fscache_n_stores_again;
46579 -atomic_t fscache_n_stores_nobufs;
46580 -atomic_t fscache_n_stores_oom;
46581 -atomic_t fscache_n_store_ops;
46582 -atomic_t fscache_n_store_calls;
46583 -atomic_t fscache_n_store_pages;
46584 -atomic_t fscache_n_store_radix_deletes;
46585 -atomic_t fscache_n_store_pages_over_limit;
46586 +atomic_unchecked_t fscache_n_stores;
46587 +atomic_unchecked_t fscache_n_stores_ok;
46588 +atomic_unchecked_t fscache_n_stores_again;
46589 +atomic_unchecked_t fscache_n_stores_nobufs;
46590 +atomic_unchecked_t fscache_n_stores_oom;
46591 +atomic_unchecked_t fscache_n_store_ops;
46592 +atomic_unchecked_t fscache_n_store_calls;
46593 +atomic_unchecked_t fscache_n_store_pages;
46594 +atomic_unchecked_t fscache_n_store_radix_deletes;
46595 +atomic_unchecked_t fscache_n_store_pages_over_limit;
46596
46597 -atomic_t fscache_n_store_vmscan_not_storing;
46598 -atomic_t fscache_n_store_vmscan_gone;
46599 -atomic_t fscache_n_store_vmscan_busy;
46600 -atomic_t fscache_n_store_vmscan_cancelled;
46601 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
46602 +atomic_unchecked_t fscache_n_store_vmscan_gone;
46603 +atomic_unchecked_t fscache_n_store_vmscan_busy;
46604 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
46605
46606 -atomic_t fscache_n_marks;
46607 -atomic_t fscache_n_uncaches;
46608 +atomic_unchecked_t fscache_n_marks;
46609 +atomic_unchecked_t fscache_n_uncaches;
46610
46611 -atomic_t fscache_n_acquires;
46612 -atomic_t fscache_n_acquires_null;
46613 -atomic_t fscache_n_acquires_no_cache;
46614 -atomic_t fscache_n_acquires_ok;
46615 -atomic_t fscache_n_acquires_nobufs;
46616 -atomic_t fscache_n_acquires_oom;
46617 +atomic_unchecked_t fscache_n_acquires;
46618 +atomic_unchecked_t fscache_n_acquires_null;
46619 +atomic_unchecked_t fscache_n_acquires_no_cache;
46620 +atomic_unchecked_t fscache_n_acquires_ok;
46621 +atomic_unchecked_t fscache_n_acquires_nobufs;
46622 +atomic_unchecked_t fscache_n_acquires_oom;
46623
46624 -atomic_t fscache_n_updates;
46625 -atomic_t fscache_n_updates_null;
46626 -atomic_t fscache_n_updates_run;
46627 +atomic_unchecked_t fscache_n_updates;
46628 +atomic_unchecked_t fscache_n_updates_null;
46629 +atomic_unchecked_t fscache_n_updates_run;
46630
46631 -atomic_t fscache_n_relinquishes;
46632 -atomic_t fscache_n_relinquishes_null;
46633 -atomic_t fscache_n_relinquishes_waitcrt;
46634 -atomic_t fscache_n_relinquishes_retire;
46635 +atomic_unchecked_t fscache_n_relinquishes;
46636 +atomic_unchecked_t fscache_n_relinquishes_null;
46637 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
46638 +atomic_unchecked_t fscache_n_relinquishes_retire;
46639
46640 -atomic_t fscache_n_cookie_index;
46641 -atomic_t fscache_n_cookie_data;
46642 -atomic_t fscache_n_cookie_special;
46643 +atomic_unchecked_t fscache_n_cookie_index;
46644 +atomic_unchecked_t fscache_n_cookie_data;
46645 +atomic_unchecked_t fscache_n_cookie_special;
46646
46647 -atomic_t fscache_n_object_alloc;
46648 -atomic_t fscache_n_object_no_alloc;
46649 -atomic_t fscache_n_object_lookups;
46650 -atomic_t fscache_n_object_lookups_negative;
46651 -atomic_t fscache_n_object_lookups_positive;
46652 -atomic_t fscache_n_object_lookups_timed_out;
46653 -atomic_t fscache_n_object_created;
46654 -atomic_t fscache_n_object_avail;
46655 -atomic_t fscache_n_object_dead;
46656 +atomic_unchecked_t fscache_n_object_alloc;
46657 +atomic_unchecked_t fscache_n_object_no_alloc;
46658 +atomic_unchecked_t fscache_n_object_lookups;
46659 +atomic_unchecked_t fscache_n_object_lookups_negative;
46660 +atomic_unchecked_t fscache_n_object_lookups_positive;
46661 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
46662 +atomic_unchecked_t fscache_n_object_created;
46663 +atomic_unchecked_t fscache_n_object_avail;
46664 +atomic_unchecked_t fscache_n_object_dead;
46665
46666 -atomic_t fscache_n_checkaux_none;
46667 -atomic_t fscache_n_checkaux_okay;
46668 -atomic_t fscache_n_checkaux_update;
46669 -atomic_t fscache_n_checkaux_obsolete;
46670 +atomic_unchecked_t fscache_n_checkaux_none;
46671 +atomic_unchecked_t fscache_n_checkaux_okay;
46672 +atomic_unchecked_t fscache_n_checkaux_update;
46673 +atomic_unchecked_t fscache_n_checkaux_obsolete;
46674
46675 atomic_t fscache_n_cop_alloc_object;
46676 atomic_t fscache_n_cop_lookup_object;
46677 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
46678 seq_puts(m, "FS-Cache statistics\n");
46679
46680 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
46681 - atomic_read(&fscache_n_cookie_index),
46682 - atomic_read(&fscache_n_cookie_data),
46683 - atomic_read(&fscache_n_cookie_special));
46684 + atomic_read_unchecked(&fscache_n_cookie_index),
46685 + atomic_read_unchecked(&fscache_n_cookie_data),
46686 + atomic_read_unchecked(&fscache_n_cookie_special));
46687
46688 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
46689 - atomic_read(&fscache_n_object_alloc),
46690 - atomic_read(&fscache_n_object_no_alloc),
46691 - atomic_read(&fscache_n_object_avail),
46692 - atomic_read(&fscache_n_object_dead));
46693 + atomic_read_unchecked(&fscache_n_object_alloc),
46694 + atomic_read_unchecked(&fscache_n_object_no_alloc),
46695 + atomic_read_unchecked(&fscache_n_object_avail),
46696 + atomic_read_unchecked(&fscache_n_object_dead));
46697 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
46698 - atomic_read(&fscache_n_checkaux_none),
46699 - atomic_read(&fscache_n_checkaux_okay),
46700 - atomic_read(&fscache_n_checkaux_update),
46701 - atomic_read(&fscache_n_checkaux_obsolete));
46702 + atomic_read_unchecked(&fscache_n_checkaux_none),
46703 + atomic_read_unchecked(&fscache_n_checkaux_okay),
46704 + atomic_read_unchecked(&fscache_n_checkaux_update),
46705 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
46706
46707 seq_printf(m, "Pages : mrk=%u unc=%u\n",
46708 - atomic_read(&fscache_n_marks),
46709 - atomic_read(&fscache_n_uncaches));
46710 + atomic_read_unchecked(&fscache_n_marks),
46711 + atomic_read_unchecked(&fscache_n_uncaches));
46712
46713 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
46714 " oom=%u\n",
46715 - atomic_read(&fscache_n_acquires),
46716 - atomic_read(&fscache_n_acquires_null),
46717 - atomic_read(&fscache_n_acquires_no_cache),
46718 - atomic_read(&fscache_n_acquires_ok),
46719 - atomic_read(&fscache_n_acquires_nobufs),
46720 - atomic_read(&fscache_n_acquires_oom));
46721 + atomic_read_unchecked(&fscache_n_acquires),
46722 + atomic_read_unchecked(&fscache_n_acquires_null),
46723 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
46724 + atomic_read_unchecked(&fscache_n_acquires_ok),
46725 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
46726 + atomic_read_unchecked(&fscache_n_acquires_oom));
46727
46728 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
46729 - atomic_read(&fscache_n_object_lookups),
46730 - atomic_read(&fscache_n_object_lookups_negative),
46731 - atomic_read(&fscache_n_object_lookups_positive),
46732 - atomic_read(&fscache_n_object_created),
46733 - atomic_read(&fscache_n_object_lookups_timed_out));
46734 + atomic_read_unchecked(&fscache_n_object_lookups),
46735 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
46736 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
46737 + atomic_read_unchecked(&fscache_n_object_created),
46738 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
46739
46740 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
46741 - atomic_read(&fscache_n_updates),
46742 - atomic_read(&fscache_n_updates_null),
46743 - atomic_read(&fscache_n_updates_run));
46744 + atomic_read_unchecked(&fscache_n_updates),
46745 + atomic_read_unchecked(&fscache_n_updates_null),
46746 + atomic_read_unchecked(&fscache_n_updates_run));
46747
46748 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
46749 - atomic_read(&fscache_n_relinquishes),
46750 - atomic_read(&fscache_n_relinquishes_null),
46751 - atomic_read(&fscache_n_relinquishes_waitcrt),
46752 - atomic_read(&fscache_n_relinquishes_retire));
46753 + atomic_read_unchecked(&fscache_n_relinquishes),
46754 + atomic_read_unchecked(&fscache_n_relinquishes_null),
46755 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
46756 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
46757
46758 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
46759 - atomic_read(&fscache_n_attr_changed),
46760 - atomic_read(&fscache_n_attr_changed_ok),
46761 - atomic_read(&fscache_n_attr_changed_nobufs),
46762 - atomic_read(&fscache_n_attr_changed_nomem),
46763 - atomic_read(&fscache_n_attr_changed_calls));
46764 + atomic_read_unchecked(&fscache_n_attr_changed),
46765 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
46766 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
46767 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
46768 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
46769
46770 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
46771 - atomic_read(&fscache_n_allocs),
46772 - atomic_read(&fscache_n_allocs_ok),
46773 - atomic_read(&fscache_n_allocs_wait),
46774 - atomic_read(&fscache_n_allocs_nobufs),
46775 - atomic_read(&fscache_n_allocs_intr));
46776 + atomic_read_unchecked(&fscache_n_allocs),
46777 + atomic_read_unchecked(&fscache_n_allocs_ok),
46778 + atomic_read_unchecked(&fscache_n_allocs_wait),
46779 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
46780 + atomic_read_unchecked(&fscache_n_allocs_intr));
46781 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
46782 - atomic_read(&fscache_n_alloc_ops),
46783 - atomic_read(&fscache_n_alloc_op_waits),
46784 - atomic_read(&fscache_n_allocs_object_dead));
46785 + atomic_read_unchecked(&fscache_n_alloc_ops),
46786 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
46787 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
46788
46789 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
46790 " int=%u oom=%u\n",
46791 - atomic_read(&fscache_n_retrievals),
46792 - atomic_read(&fscache_n_retrievals_ok),
46793 - atomic_read(&fscache_n_retrievals_wait),
46794 - atomic_read(&fscache_n_retrievals_nodata),
46795 - atomic_read(&fscache_n_retrievals_nobufs),
46796 - atomic_read(&fscache_n_retrievals_intr),
46797 - atomic_read(&fscache_n_retrievals_nomem));
46798 + atomic_read_unchecked(&fscache_n_retrievals),
46799 + atomic_read_unchecked(&fscache_n_retrievals_ok),
46800 + atomic_read_unchecked(&fscache_n_retrievals_wait),
46801 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
46802 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
46803 + atomic_read_unchecked(&fscache_n_retrievals_intr),
46804 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
46805 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
46806 - atomic_read(&fscache_n_retrieval_ops),
46807 - atomic_read(&fscache_n_retrieval_op_waits),
46808 - atomic_read(&fscache_n_retrievals_object_dead));
46809 + atomic_read_unchecked(&fscache_n_retrieval_ops),
46810 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
46811 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
46812
46813 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
46814 - atomic_read(&fscache_n_stores),
46815 - atomic_read(&fscache_n_stores_ok),
46816 - atomic_read(&fscache_n_stores_again),
46817 - atomic_read(&fscache_n_stores_nobufs),
46818 - atomic_read(&fscache_n_stores_oom));
46819 + atomic_read_unchecked(&fscache_n_stores),
46820 + atomic_read_unchecked(&fscache_n_stores_ok),
46821 + atomic_read_unchecked(&fscache_n_stores_again),
46822 + atomic_read_unchecked(&fscache_n_stores_nobufs),
46823 + atomic_read_unchecked(&fscache_n_stores_oom));
46824 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
46825 - atomic_read(&fscache_n_store_ops),
46826 - atomic_read(&fscache_n_store_calls),
46827 - atomic_read(&fscache_n_store_pages),
46828 - atomic_read(&fscache_n_store_radix_deletes),
46829 - atomic_read(&fscache_n_store_pages_over_limit));
46830 + atomic_read_unchecked(&fscache_n_store_ops),
46831 + atomic_read_unchecked(&fscache_n_store_calls),
46832 + atomic_read_unchecked(&fscache_n_store_pages),
46833 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
46834 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
46835
46836 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
46837 - atomic_read(&fscache_n_store_vmscan_not_storing),
46838 - atomic_read(&fscache_n_store_vmscan_gone),
46839 - atomic_read(&fscache_n_store_vmscan_busy),
46840 - atomic_read(&fscache_n_store_vmscan_cancelled));
46841 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
46842 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
46843 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
46844 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
46845
46846 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
46847 - atomic_read(&fscache_n_op_pend),
46848 - atomic_read(&fscache_n_op_run),
46849 - atomic_read(&fscache_n_op_enqueue),
46850 - atomic_read(&fscache_n_op_cancelled),
46851 - atomic_read(&fscache_n_op_rejected));
46852 + atomic_read_unchecked(&fscache_n_op_pend),
46853 + atomic_read_unchecked(&fscache_n_op_run),
46854 + atomic_read_unchecked(&fscache_n_op_enqueue),
46855 + atomic_read_unchecked(&fscache_n_op_cancelled),
46856 + atomic_read_unchecked(&fscache_n_op_rejected));
46857 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
46858 - atomic_read(&fscache_n_op_deferred_release),
46859 - atomic_read(&fscache_n_op_release),
46860 - atomic_read(&fscache_n_op_gc));
46861 + atomic_read_unchecked(&fscache_n_op_deferred_release),
46862 + atomic_read_unchecked(&fscache_n_op_release),
46863 + atomic_read_unchecked(&fscache_n_op_gc));
46864
46865 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
46866 atomic_read(&fscache_n_cop_alloc_object),
46867 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
46868 index 3426521..3b75162 100644
46869 --- a/fs/fuse/cuse.c
46870 +++ b/fs/fuse/cuse.c
46871 @@ -587,10 +587,12 @@ static int __init cuse_init(void)
46872 INIT_LIST_HEAD(&cuse_conntbl[i]);
46873
46874 /* inherit and extend fuse_dev_operations */
46875 - cuse_channel_fops = fuse_dev_operations;
46876 - cuse_channel_fops.owner = THIS_MODULE;
46877 - cuse_channel_fops.open = cuse_channel_open;
46878 - cuse_channel_fops.release = cuse_channel_release;
46879 + pax_open_kernel();
46880 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
46881 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
46882 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
46883 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
46884 + pax_close_kernel();
46885
46886 cuse_class = class_create(THIS_MODULE, "cuse");
46887 if (IS_ERR(cuse_class))
46888 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
46889 index 5f3368a..8306426 100644
46890 --- a/fs/fuse/dev.c
46891 +++ b/fs/fuse/dev.c
46892 @@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
46893 ret = 0;
46894 pipe_lock(pipe);
46895
46896 - if (!pipe->readers) {
46897 + if (!atomic_read(&pipe->readers)) {
46898 send_sig(SIGPIPE, current, 0);
46899 if (!ret)
46900 ret = -EPIPE;
46901 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
46902 index 2066328..f5add3b 100644
46903 --- a/fs/fuse/dir.c
46904 +++ b/fs/fuse/dir.c
46905 @@ -1175,7 +1175,7 @@ static char *read_link(struct dentry *dentry)
46906 return link;
46907 }
46908
46909 -static void free_link(char *link)
46910 +static void free_link(const char *link)
46911 {
46912 if (!IS_ERR(link))
46913 free_page((unsigned long) link);
46914 diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
46915 index 5698746..6086012 100644
46916 --- a/fs/gfs2/inode.c
46917 +++ b/fs/gfs2/inode.c
46918 @@ -1487,7 +1487,7 @@ out:
46919
46920 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46921 {
46922 - char *s = nd_get_link(nd);
46923 + const char *s = nd_get_link(nd);
46924 if (!IS_ERR(s))
46925 kfree(s);
46926 }
46927 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
46928 index c60267e..193d9e4 100644
46929 --- a/fs/hugetlbfs/inode.c
46930 +++ b/fs/hugetlbfs/inode.c
46931 @@ -902,7 +902,7 @@ static struct file_system_type hugetlbfs_fs_type = {
46932 .kill_sb = kill_litter_super,
46933 };
46934
46935 -static struct vfsmount *hugetlbfs_vfsmount;
46936 +struct vfsmount *hugetlbfs_vfsmount;
46937
46938 static int can_do_hugetlb_shm(void)
46939 {
46940 diff --git a/fs/inode.c b/fs/inode.c
46941 index 83ab215..8842101 100644
46942 --- a/fs/inode.c
46943 +++ b/fs/inode.c
46944 @@ -870,8 +870,8 @@ unsigned int get_next_ino(void)
46945
46946 #ifdef CONFIG_SMP
46947 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
46948 - static atomic_t shared_last_ino;
46949 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
46950 + static atomic_unchecked_t shared_last_ino;
46951 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
46952
46953 res = next - LAST_INO_BATCH;
46954 }
46955 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
46956 index eafb8d3..f423d37 100644
46957 --- a/fs/jffs2/erase.c
46958 +++ b/fs/jffs2/erase.c
46959 @@ -438,7 +438,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
46960 struct jffs2_unknown_node marker = {
46961 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
46962 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
46963 - .totlen = cpu_to_je32(c->cleanmarker_size)
46964 + .totlen = cpu_to_je32(c->cleanmarker_size),
46965 + .hdr_crc = cpu_to_je32(0)
46966 };
46967
46968 jffs2_prealloc_raw_node_refs(c, jeb, 1);
46969 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
46970 index 30e8f47..21f600c 100644
46971 --- a/fs/jffs2/wbuf.c
46972 +++ b/fs/jffs2/wbuf.c
46973 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
46974 {
46975 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
46976 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
46977 - .totlen = constant_cpu_to_je32(8)
46978 + .totlen = constant_cpu_to_je32(8),
46979 + .hdr_crc = constant_cpu_to_je32(0)
46980 };
46981
46982 /*
46983 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
46984 index 682bca6..86b8e6e 100644
46985 --- a/fs/jfs/super.c
46986 +++ b/fs/jfs/super.c
46987 @@ -801,7 +801,7 @@ static int __init init_jfs_fs(void)
46988
46989 jfs_inode_cachep =
46990 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
46991 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
46992 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
46993 init_once);
46994 if (jfs_inode_cachep == NULL)
46995 return -ENOMEM;
46996 diff --git a/fs/libfs.c b/fs/libfs.c
46997 index 5b2dbb3..7442d54 100644
46998 --- a/fs/libfs.c
46999 +++ b/fs/libfs.c
47000 @@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
47001
47002 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
47003 struct dentry *next;
47004 + char d_name[sizeof(next->d_iname)];
47005 + const unsigned char *name;
47006 +
47007 next = list_entry(p, struct dentry, d_u.d_child);
47008 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
47009 if (!simple_positive(next)) {
47010 @@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
47011
47012 spin_unlock(&next->d_lock);
47013 spin_unlock(&dentry->d_lock);
47014 - if (filldir(dirent, next->d_name.name,
47015 + name = next->d_name.name;
47016 + if (name == next->d_iname) {
47017 + memcpy(d_name, name, next->d_name.len);
47018 + name = d_name;
47019 + }
47020 + if (filldir(dirent, name,
47021 next->d_name.len, filp->f_pos,
47022 next->d_inode->i_ino,
47023 dt_type(next->d_inode)) < 0)
47024 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
47025 index 8392cb8..80d6193 100644
47026 --- a/fs/lockd/clntproc.c
47027 +++ b/fs/lockd/clntproc.c
47028 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
47029 /*
47030 * Cookie counter for NLM requests
47031 */
47032 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
47033 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
47034
47035 void nlmclnt_next_cookie(struct nlm_cookie *c)
47036 {
47037 - u32 cookie = atomic_inc_return(&nlm_cookie);
47038 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
47039
47040 memcpy(c->data, &cookie, 4);
47041 c->len=4;
47042 diff --git a/fs/locks.c b/fs/locks.c
47043 index 0d68f1f..f216b79 100644
47044 --- a/fs/locks.c
47045 +++ b/fs/locks.c
47046 @@ -2075,16 +2075,16 @@ void locks_remove_flock(struct file *filp)
47047 return;
47048
47049 if (filp->f_op && filp->f_op->flock) {
47050 - struct file_lock fl = {
47051 + struct file_lock flock = {
47052 .fl_pid = current->tgid,
47053 .fl_file = filp,
47054 .fl_flags = FL_FLOCK,
47055 .fl_type = F_UNLCK,
47056 .fl_end = OFFSET_MAX,
47057 };
47058 - filp->f_op->flock(filp, F_SETLKW, &fl);
47059 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
47060 - fl.fl_ops->fl_release_private(&fl);
47061 + filp->f_op->flock(filp, F_SETLKW, &flock);
47062 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
47063 + flock.fl_ops->fl_release_private(&flock);
47064 }
47065
47066 lock_flocks();
47067 diff --git a/fs/namei.c b/fs/namei.c
47068 index 46ea9cc..c7cf3a3 100644
47069 --- a/fs/namei.c
47070 +++ b/fs/namei.c
47071 @@ -278,16 +278,32 @@ int generic_permission(struct inode *inode, int mask)
47072 if (ret != -EACCES)
47073 return ret;
47074
47075 +#ifdef CONFIG_GRKERNSEC
47076 + /* we'll block if we have to log due to a denied capability use */
47077 + if (mask & MAY_NOT_BLOCK)
47078 + return -ECHILD;
47079 +#endif
47080 +
47081 if (S_ISDIR(inode->i_mode)) {
47082 /* DACs are overridable for directories */
47083 - if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
47084 - return 0;
47085 if (!(mask & MAY_WRITE))
47086 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
47087 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
47088 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
47089 return 0;
47090 + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
47091 + return 0;
47092 return -EACCES;
47093 }
47094 /*
47095 + * Searching includes executable on directories, else just read.
47096 + */
47097 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
47098 + if (mask == MAY_READ)
47099 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
47100 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
47101 + return 0;
47102 +
47103 + /*
47104 * Read/write DACs are always overridable.
47105 * Executable DACs are overridable when there is
47106 * at least one exec bit set.
47107 @@ -296,14 +312,6 @@ int generic_permission(struct inode *inode, int mask)
47108 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
47109 return 0;
47110
47111 - /*
47112 - * Searching includes executable on directories, else just read.
47113 - */
47114 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
47115 - if (mask == MAY_READ)
47116 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
47117 - return 0;
47118 -
47119 return -EACCES;
47120 }
47121
47122 @@ -652,11 +660,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
47123 return error;
47124 }
47125
47126 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
47127 + dentry->d_inode, dentry, nd->path.mnt)) {
47128 + error = -EACCES;
47129 + *p = ERR_PTR(error); /* no ->put_link(), please */
47130 + path_put(&nd->path);
47131 + return error;
47132 + }
47133 +
47134 nd->last_type = LAST_BIND;
47135 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
47136 error = PTR_ERR(*p);
47137 if (!IS_ERR(*p)) {
47138 - char *s = nd_get_link(nd);
47139 + const char *s = nd_get_link(nd);
47140 error = 0;
47141 if (s)
47142 error = __vfs_follow_link(nd, s);
47143 @@ -1650,6 +1666,21 @@ static int path_lookupat(int dfd, const char *name,
47144 if (!err)
47145 err = complete_walk(nd);
47146
47147 + if (!(nd->flags & LOOKUP_PARENT)) {
47148 +#ifdef CONFIG_GRKERNSEC
47149 + if (flags & LOOKUP_RCU) {
47150 + if (!err)
47151 + path_put(&nd->path);
47152 + err = -ECHILD;
47153 + } else
47154 +#endif
47155 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
47156 + if (!err)
47157 + path_put(&nd->path);
47158 + err = -ENOENT;
47159 + }
47160 + }
47161 +
47162 if (!err && nd->flags & LOOKUP_DIRECTORY) {
47163 if (!nd->inode->i_op->lookup) {
47164 path_put(&nd->path);
47165 @@ -1677,6 +1708,15 @@ static int do_path_lookup(int dfd, const char *name,
47166 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
47167
47168 if (likely(!retval)) {
47169 + if (*name != '/' && nd->path.dentry && nd->inode) {
47170 +#ifdef CONFIG_GRKERNSEC
47171 + if (flags & LOOKUP_RCU)
47172 + return -ECHILD;
47173 +#endif
47174 + if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
47175 + return -ENOENT;
47176 + }
47177 +
47178 if (unlikely(!audit_dummy_context())) {
47179 if (nd->path.dentry && nd->inode)
47180 audit_inode(name, nd->path.dentry);
47181 @@ -2071,6 +2111,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
47182 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
47183 return -EPERM;
47184
47185 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
47186 + return -EPERM;
47187 + if (gr_handle_rawio(inode))
47188 + return -EPERM;
47189 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
47190 + return -EACCES;
47191 +
47192 return 0;
47193 }
47194
47195 @@ -2132,6 +2179,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
47196 error = complete_walk(nd);
47197 if (error)
47198 return ERR_PTR(error);
47199 +#ifdef CONFIG_GRKERNSEC
47200 + if (nd->flags & LOOKUP_RCU) {
47201 + error = -ECHILD;
47202 + goto exit;
47203 + }
47204 +#endif
47205 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
47206 + error = -ENOENT;
47207 + goto exit;
47208 + }
47209 audit_inode(pathname, nd->path.dentry);
47210 if (open_flag & O_CREAT) {
47211 error = -EISDIR;
47212 @@ -2142,6 +2199,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
47213 error = complete_walk(nd);
47214 if (error)
47215 return ERR_PTR(error);
47216 +#ifdef CONFIG_GRKERNSEC
47217 + if (nd->flags & LOOKUP_RCU) {
47218 + error = -ECHILD;
47219 + goto exit;
47220 + }
47221 +#endif
47222 + if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
47223 + error = -ENOENT;
47224 + goto exit;
47225 + }
47226 audit_inode(pathname, dir);
47227 goto ok;
47228 }
47229 @@ -2163,6 +2230,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
47230 error = complete_walk(nd);
47231 if (error)
47232 return ERR_PTR(error);
47233 +#ifdef CONFIG_GRKERNSEC
47234 + if (nd->flags & LOOKUP_RCU) {
47235 + error = -ECHILD;
47236 + goto exit;
47237 + }
47238 +#endif
47239 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
47240 + error = -ENOENT;
47241 + goto exit;
47242 + }
47243
47244 error = -ENOTDIR;
47245 if (nd->flags & LOOKUP_DIRECTORY) {
47246 @@ -2203,6 +2280,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
47247 /* Negative dentry, just create the file */
47248 if (!dentry->d_inode) {
47249 umode_t mode = op->mode;
47250 +
47251 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
47252 + error = -EACCES;
47253 + goto exit_mutex_unlock;
47254 + }
47255 +
47256 if (!IS_POSIXACL(dir->d_inode))
47257 mode &= ~current_umask();
47258 /*
47259 @@ -2226,6 +2309,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
47260 error = vfs_create(dir->d_inode, dentry, mode, nd);
47261 if (error)
47262 goto exit_mutex_unlock;
47263 + else
47264 + gr_handle_create(path->dentry, path->mnt);
47265 mutex_unlock(&dir->d_inode->i_mutex);
47266 dput(nd->path.dentry);
47267 nd->path.dentry = dentry;
47268 @@ -2235,6 +2320,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
47269 /*
47270 * It already exists.
47271 */
47272 +
47273 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
47274 + error = -ENOENT;
47275 + goto exit_mutex_unlock;
47276 + }
47277 +
47278 + /* only check if O_CREAT is specified, all other checks need to go
47279 + into may_open */
47280 + if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
47281 + error = -EACCES;
47282 + goto exit_mutex_unlock;
47283 + }
47284 +
47285 mutex_unlock(&dir->d_inode->i_mutex);
47286 audit_inode(pathname, path->dentry);
47287
47288 @@ -2447,6 +2545,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
47289 *path = nd.path;
47290 return dentry;
47291 eexist:
47292 + if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
47293 + dput(dentry);
47294 + dentry = ERR_PTR(-ENOENT);
47295 + goto fail;
47296 + }
47297 dput(dentry);
47298 dentry = ERR_PTR(-EEXIST);
47299 fail:
47300 @@ -2469,6 +2572,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
47301 }
47302 EXPORT_SYMBOL(user_path_create);
47303
47304 +static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
47305 +{
47306 + char *tmp = getname(pathname);
47307 + struct dentry *res;
47308 + if (IS_ERR(tmp))
47309 + return ERR_CAST(tmp);
47310 + res = kern_path_create(dfd, tmp, path, is_dir);
47311 + if (IS_ERR(res))
47312 + putname(tmp);
47313 + else
47314 + *to = tmp;
47315 + return res;
47316 +}
47317 +
47318 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
47319 {
47320 int error = may_create(dir, dentry);
47321 @@ -2536,6 +2653,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
47322 error = mnt_want_write(path.mnt);
47323 if (error)
47324 goto out_dput;
47325 +
47326 + if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
47327 + error = -EPERM;
47328 + goto out_drop_write;
47329 + }
47330 +
47331 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
47332 + error = -EACCES;
47333 + goto out_drop_write;
47334 + }
47335 +
47336 error = security_path_mknod(&path, dentry, mode, dev);
47337 if (error)
47338 goto out_drop_write;
47339 @@ -2553,6 +2681,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
47340 }
47341 out_drop_write:
47342 mnt_drop_write(path.mnt);
47343 +
47344 + if (!error)
47345 + gr_handle_create(dentry, path.mnt);
47346 out_dput:
47347 dput(dentry);
47348 mutex_unlock(&path.dentry->d_inode->i_mutex);
47349 @@ -2602,12 +2733,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
47350 error = mnt_want_write(path.mnt);
47351 if (error)
47352 goto out_dput;
47353 +
47354 + if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
47355 + error = -EACCES;
47356 + goto out_drop_write;
47357 + }
47358 +
47359 error = security_path_mkdir(&path, dentry, mode);
47360 if (error)
47361 goto out_drop_write;
47362 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
47363 out_drop_write:
47364 mnt_drop_write(path.mnt);
47365 +
47366 + if (!error)
47367 + gr_handle_create(dentry, path.mnt);
47368 out_dput:
47369 dput(dentry);
47370 mutex_unlock(&path.dentry->d_inode->i_mutex);
47371 @@ -2687,6 +2827,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
47372 char * name;
47373 struct dentry *dentry;
47374 struct nameidata nd;
47375 + ino_t saved_ino = 0;
47376 + dev_t saved_dev = 0;
47377
47378 error = user_path_parent(dfd, pathname, &nd, &name);
47379 if (error)
47380 @@ -2715,6 +2857,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
47381 error = -ENOENT;
47382 goto exit3;
47383 }
47384 +
47385 + saved_ino = dentry->d_inode->i_ino;
47386 + saved_dev = gr_get_dev_from_dentry(dentry);
47387 +
47388 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
47389 + error = -EACCES;
47390 + goto exit3;
47391 + }
47392 +
47393 error = mnt_want_write(nd.path.mnt);
47394 if (error)
47395 goto exit3;
47396 @@ -2722,6 +2873,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
47397 if (error)
47398 goto exit4;
47399 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
47400 + if (!error && (saved_dev || saved_ino))
47401 + gr_handle_delete(saved_ino, saved_dev);
47402 exit4:
47403 mnt_drop_write(nd.path.mnt);
47404 exit3:
47405 @@ -2784,6 +2937,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
47406 struct dentry *dentry;
47407 struct nameidata nd;
47408 struct inode *inode = NULL;
47409 + ino_t saved_ino = 0;
47410 + dev_t saved_dev = 0;
47411
47412 error = user_path_parent(dfd, pathname, &nd, &name);
47413 if (error)
47414 @@ -2806,6 +2961,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
47415 if (!inode)
47416 goto slashes;
47417 ihold(inode);
47418 +
47419 + if (inode->i_nlink <= 1) {
47420 + saved_ino = inode->i_ino;
47421 + saved_dev = gr_get_dev_from_dentry(dentry);
47422 + }
47423 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
47424 + error = -EACCES;
47425 + goto exit2;
47426 + }
47427 +
47428 error = mnt_want_write(nd.path.mnt);
47429 if (error)
47430 goto exit2;
47431 @@ -2813,6 +2978,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
47432 if (error)
47433 goto exit3;
47434 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
47435 + if (!error && (saved_ino || saved_dev))
47436 + gr_handle_delete(saved_ino, saved_dev);
47437 exit3:
47438 mnt_drop_write(nd.path.mnt);
47439 exit2:
47440 @@ -2888,10 +3055,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
47441 error = mnt_want_write(path.mnt);
47442 if (error)
47443 goto out_dput;
47444 +
47445 + if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
47446 + error = -EACCES;
47447 + goto out_drop_write;
47448 + }
47449 +
47450 error = security_path_symlink(&path, dentry, from);
47451 if (error)
47452 goto out_drop_write;
47453 error = vfs_symlink(path.dentry->d_inode, dentry, from);
47454 + if (!error)
47455 + gr_handle_create(dentry, path.mnt);
47456 out_drop_write:
47457 mnt_drop_write(path.mnt);
47458 out_dput:
47459 @@ -2963,6 +3138,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
47460 {
47461 struct dentry *new_dentry;
47462 struct path old_path, new_path;
47463 + char *to = NULL;
47464 int how = 0;
47465 int error;
47466
47467 @@ -2986,7 +3162,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
47468 if (error)
47469 return error;
47470
47471 - new_dentry = user_path_create(newdfd, newname, &new_path, 0);
47472 + new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
47473 error = PTR_ERR(new_dentry);
47474 if (IS_ERR(new_dentry))
47475 goto out;
47476 @@ -2997,13 +3173,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
47477 error = mnt_want_write(new_path.mnt);
47478 if (error)
47479 goto out_dput;
47480 +
47481 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
47482 + old_path.dentry->d_inode,
47483 + old_path.dentry->d_inode->i_mode, to)) {
47484 + error = -EACCES;
47485 + goto out_drop_write;
47486 + }
47487 +
47488 + if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
47489 + old_path.dentry, old_path.mnt, to)) {
47490 + error = -EACCES;
47491 + goto out_drop_write;
47492 + }
47493 +
47494 error = security_path_link(old_path.dentry, &new_path, new_dentry);
47495 if (error)
47496 goto out_drop_write;
47497 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
47498 + if (!error)
47499 + gr_handle_create(new_dentry, new_path.mnt);
47500 out_drop_write:
47501 mnt_drop_write(new_path.mnt);
47502 out_dput:
47503 + putname(to);
47504 dput(new_dentry);
47505 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
47506 path_put(&new_path);
47507 @@ -3231,6 +3424,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
47508 if (new_dentry == trap)
47509 goto exit5;
47510
47511 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
47512 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
47513 + to);
47514 + if (error)
47515 + goto exit5;
47516 +
47517 error = mnt_want_write(oldnd.path.mnt);
47518 if (error)
47519 goto exit5;
47520 @@ -3240,6 +3439,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
47521 goto exit6;
47522 error = vfs_rename(old_dir->d_inode, old_dentry,
47523 new_dir->d_inode, new_dentry);
47524 + if (!error)
47525 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
47526 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
47527 exit6:
47528 mnt_drop_write(oldnd.path.mnt);
47529 exit5:
47530 @@ -3265,6 +3467,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
47531
47532 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
47533 {
47534 + char tmpbuf[64];
47535 + const char *newlink;
47536 int len;
47537
47538 len = PTR_ERR(link);
47539 @@ -3274,7 +3478,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
47540 len = strlen(link);
47541 if (len > (unsigned) buflen)
47542 len = buflen;
47543 - if (copy_to_user(buffer, link, len))
47544 +
47545 + if (len < sizeof(tmpbuf)) {
47546 + memcpy(tmpbuf, link, len);
47547 + newlink = tmpbuf;
47548 + } else
47549 + newlink = link;
47550 +
47551 + if (copy_to_user(buffer, newlink, len))
47552 len = -EFAULT;
47553 out:
47554 return len;
47555 diff --git a/fs/namespace.c b/fs/namespace.c
47556 index e608199..9609cb9 100644
47557 --- a/fs/namespace.c
47558 +++ b/fs/namespace.c
47559 @@ -1155,6 +1155,9 @@ static int do_umount(struct mount *mnt, int flags)
47560 if (!(sb->s_flags & MS_RDONLY))
47561 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
47562 up_write(&sb->s_umount);
47563 +
47564 + gr_log_remount(mnt->mnt_devname, retval);
47565 +
47566 return retval;
47567 }
47568
47569 @@ -1174,6 +1177,9 @@ static int do_umount(struct mount *mnt, int flags)
47570 br_write_unlock(vfsmount_lock);
47571 up_write(&namespace_sem);
47572 release_mounts(&umount_list);
47573 +
47574 + gr_log_unmount(mnt->mnt_devname, retval);
47575 +
47576 return retval;
47577 }
47578
47579 @@ -2175,6 +2181,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
47580 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
47581 MS_STRICTATIME);
47582
47583 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
47584 + retval = -EPERM;
47585 + goto dput_out;
47586 + }
47587 +
47588 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
47589 + retval = -EPERM;
47590 + goto dput_out;
47591 + }
47592 +
47593 if (flags & MS_REMOUNT)
47594 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
47595 data_page);
47596 @@ -2189,6 +2205,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
47597 dev_name, data_page);
47598 dput_out:
47599 path_put(&path);
47600 +
47601 + gr_log_mount(dev_name, dir_name, retval);
47602 +
47603 return retval;
47604 }
47605
47606 @@ -2470,6 +2489,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
47607 if (error)
47608 goto out2;
47609
47610 + if (gr_handle_chroot_pivot()) {
47611 + error = -EPERM;
47612 + goto out2;
47613 + }
47614 +
47615 get_fs_root(current->fs, &root);
47616 error = lock_mount(&old);
47617 if (error)
47618 diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h
47619 index 32c0658..b1c2045e 100644
47620 --- a/fs/ncpfs/ncplib_kernel.h
47621 +++ b/fs/ncpfs/ncplib_kernel.h
47622 @@ -130,7 +130,7 @@ static inline int ncp_is_nfs_extras(struct ncp_server* server, unsigned int voln
47623 int ncp__io2vol(struct ncp_server *, unsigned char *, unsigned int *,
47624 const unsigned char *, unsigned int, int);
47625 int ncp__vol2io(struct ncp_server *, unsigned char *, unsigned int *,
47626 - const unsigned char *, unsigned int, int);
47627 + const unsigned char *, unsigned int, int) __size_overflow(5);
47628
47629 #define NCP_ESC ':'
47630 #define NCP_IO_TABLE(sb) (NCP_SBP(sb)->nls_io)
47631 @@ -146,7 +146,7 @@ int ncp__vol2io(struct ncp_server *, unsigned char *, unsigned int *,
47632 int ncp__io2vol(unsigned char *, unsigned int *,
47633 const unsigned char *, unsigned int, int);
47634 int ncp__vol2io(unsigned char *, unsigned int *,
47635 - const unsigned char *, unsigned int, int);
47636 + const unsigned char *, unsigned int, int) __size_overflow(5);
47637
47638 #define NCP_IO_TABLE(sb) NULL
47639 #define ncp_tolower(t, c) tolower(c)
47640 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
47641 index f649fba..236bf92 100644
47642 --- a/fs/nfs/inode.c
47643 +++ b/fs/nfs/inode.c
47644 @@ -151,7 +151,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
47645 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
47646 nfsi->attrtimeo_timestamp = jiffies;
47647
47648 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
47649 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
47650 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
47651 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
47652 else
47653 @@ -1003,16 +1003,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
47654 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
47655 }
47656
47657 -static atomic_long_t nfs_attr_generation_counter;
47658 +static atomic_long_unchecked_t nfs_attr_generation_counter;
47659
47660 static unsigned long nfs_read_attr_generation_counter(void)
47661 {
47662 - return atomic_long_read(&nfs_attr_generation_counter);
47663 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
47664 }
47665
47666 unsigned long nfs_inc_attr_generation_counter(void)
47667 {
47668 - return atomic_long_inc_return(&nfs_attr_generation_counter);
47669 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
47670 }
47671
47672 void nfs_fattr_init(struct nfs_fattr *fattr)
47673 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
47674 index b96fe94..a4dbece 100644
47675 --- a/fs/nfsd/vfs.c
47676 +++ b/fs/nfsd/vfs.c
47677 @@ -925,7 +925,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
47678 } else {
47679 oldfs = get_fs();
47680 set_fs(KERNEL_DS);
47681 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
47682 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
47683 set_fs(oldfs);
47684 }
47685
47686 @@ -1029,7 +1029,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
47687
47688 /* Write the data. */
47689 oldfs = get_fs(); set_fs(KERNEL_DS);
47690 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
47691 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
47692 set_fs(oldfs);
47693 if (host_err < 0)
47694 goto out_nfserr;
47695 @@ -1564,7 +1564,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
47696 */
47697
47698 oldfs = get_fs(); set_fs(KERNEL_DS);
47699 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
47700 + host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
47701 set_fs(oldfs);
47702
47703 if (host_err < 0)
47704 diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
47705 index 3568c8a..e0240d8 100644
47706 --- a/fs/notify/fanotify/fanotify_user.c
47707 +++ b/fs/notify/fanotify/fanotify_user.c
47708 @@ -278,7 +278,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
47709 goto out_close_fd;
47710
47711 ret = -EFAULT;
47712 - if (copy_to_user(buf, &fanotify_event_metadata,
47713 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
47714 + copy_to_user(buf, &fanotify_event_metadata,
47715 fanotify_event_metadata.event_len))
47716 goto out_kill_access_response;
47717
47718 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
47719 index ee18815..7aa5d01 100644
47720 --- a/fs/notify/notification.c
47721 +++ b/fs/notify/notification.c
47722 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
47723 * get set to 0 so it will never get 'freed'
47724 */
47725 static struct fsnotify_event *q_overflow_event;
47726 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47727 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47728
47729 /**
47730 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
47731 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47732 */
47733 u32 fsnotify_get_cookie(void)
47734 {
47735 - return atomic_inc_return(&fsnotify_sync_cookie);
47736 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
47737 }
47738 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
47739
47740 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
47741 index 99e3610..02c1068 100644
47742 --- a/fs/ntfs/dir.c
47743 +++ b/fs/ntfs/dir.c
47744 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
47745 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
47746 ~(s64)(ndir->itype.index.block_size - 1)));
47747 /* Bounds checks. */
47748 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
47749 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
47750 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
47751 "inode 0x%lx or driver bug.", vdir->i_ino);
47752 goto err_out;
47753 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
47754 index c587e2d..3641eaa 100644
47755 --- a/fs/ntfs/file.c
47756 +++ b/fs/ntfs/file.c
47757 @@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
47758 #endif /* NTFS_RW */
47759 };
47760
47761 -const struct file_operations ntfs_empty_file_ops = {};
47762 +const struct file_operations ntfs_empty_file_ops __read_only;
47763
47764 -const struct inode_operations ntfs_empty_inode_ops = {};
47765 +const struct inode_operations ntfs_empty_inode_ops __read_only;
47766 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
47767 index 210c352..a174f83 100644
47768 --- a/fs/ocfs2/localalloc.c
47769 +++ b/fs/ocfs2/localalloc.c
47770 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
47771 goto bail;
47772 }
47773
47774 - atomic_inc(&osb->alloc_stats.moves);
47775 + atomic_inc_unchecked(&osb->alloc_stats.moves);
47776
47777 bail:
47778 if (handle)
47779 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
47780 index d355e6e..578d905 100644
47781 --- a/fs/ocfs2/ocfs2.h
47782 +++ b/fs/ocfs2/ocfs2.h
47783 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
47784
47785 struct ocfs2_alloc_stats
47786 {
47787 - atomic_t moves;
47788 - atomic_t local_data;
47789 - atomic_t bitmap_data;
47790 - atomic_t bg_allocs;
47791 - atomic_t bg_extends;
47792 + atomic_unchecked_t moves;
47793 + atomic_unchecked_t local_data;
47794 + atomic_unchecked_t bitmap_data;
47795 + atomic_unchecked_t bg_allocs;
47796 + atomic_unchecked_t bg_extends;
47797 };
47798
47799 enum ocfs2_local_alloc_state
47800 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
47801 index f169da4..9112253 100644
47802 --- a/fs/ocfs2/suballoc.c
47803 +++ b/fs/ocfs2/suballoc.c
47804 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
47805 mlog_errno(status);
47806 goto bail;
47807 }
47808 - atomic_inc(&osb->alloc_stats.bg_extends);
47809 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
47810
47811 /* You should never ask for this much metadata */
47812 BUG_ON(bits_wanted >
47813 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
47814 mlog_errno(status);
47815 goto bail;
47816 }
47817 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47818 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47819
47820 *suballoc_loc = res.sr_bg_blkno;
47821 *suballoc_bit_start = res.sr_bit_offset;
47822 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
47823 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
47824 res->sr_bits);
47825
47826 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47827 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47828
47829 BUG_ON(res->sr_bits != 1);
47830
47831 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
47832 mlog_errno(status);
47833 goto bail;
47834 }
47835 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47836 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47837
47838 BUG_ON(res.sr_bits != 1);
47839
47840 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
47841 cluster_start,
47842 num_clusters);
47843 if (!status)
47844 - atomic_inc(&osb->alloc_stats.local_data);
47845 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
47846 } else {
47847 if (min_clusters > (osb->bitmap_cpg - 1)) {
47848 /* The only paths asking for contiguousness
47849 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
47850 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
47851 res.sr_bg_blkno,
47852 res.sr_bit_offset);
47853 - atomic_inc(&osb->alloc_stats.bitmap_data);
47854 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
47855 *num_clusters = res.sr_bits;
47856 }
47857 }
47858 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
47859 index 604e12c..8426483 100644
47860 --- a/fs/ocfs2/super.c
47861 +++ b/fs/ocfs2/super.c
47862 @@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
47863 "%10s => GlobalAllocs: %d LocalAllocs: %d "
47864 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
47865 "Stats",
47866 - atomic_read(&osb->alloc_stats.bitmap_data),
47867 - atomic_read(&osb->alloc_stats.local_data),
47868 - atomic_read(&osb->alloc_stats.bg_allocs),
47869 - atomic_read(&osb->alloc_stats.moves),
47870 - atomic_read(&osb->alloc_stats.bg_extends));
47871 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
47872 + atomic_read_unchecked(&osb->alloc_stats.local_data),
47873 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
47874 + atomic_read_unchecked(&osb->alloc_stats.moves),
47875 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
47876
47877 out += snprintf(buf + out, len - out,
47878 "%10s => State: %u Descriptor: %llu Size: %u bits "
47879 @@ -2117,11 +2117,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
47880 spin_lock_init(&osb->osb_xattr_lock);
47881 ocfs2_init_steal_slots(osb);
47882
47883 - atomic_set(&osb->alloc_stats.moves, 0);
47884 - atomic_set(&osb->alloc_stats.local_data, 0);
47885 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
47886 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
47887 - atomic_set(&osb->alloc_stats.bg_extends, 0);
47888 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
47889 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
47890 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
47891 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
47892 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
47893
47894 /* Copy the blockcheck stats from the superblock probe */
47895 osb->osb_ecc_stats = *stats;
47896 diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
47897 index 5d22872..523db20 100644
47898 --- a/fs/ocfs2/symlink.c
47899 +++ b/fs/ocfs2/symlink.c
47900 @@ -142,7 +142,7 @@ bail:
47901
47902 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
47903 {
47904 - char *link = nd_get_link(nd);
47905 + const char *link = nd_get_link(nd);
47906 if (!IS_ERR(link))
47907 kfree(link);
47908 }
47909 diff --git a/fs/open.c b/fs/open.c
47910 index 77becc0..aad7bd9 100644
47911 --- a/fs/open.c
47912 +++ b/fs/open.c
47913 @@ -112,6 +112,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
47914 error = locks_verify_truncate(inode, NULL, length);
47915 if (!error)
47916 error = security_path_truncate(&path);
47917 +
47918 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
47919 + error = -EACCES;
47920 +
47921 if (!error)
47922 error = do_truncate(path.dentry, length, 0, NULL);
47923
47924 @@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
47925 if (__mnt_is_readonly(path.mnt))
47926 res = -EROFS;
47927
47928 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
47929 + res = -EACCES;
47930 +
47931 out_path_release:
47932 path_put(&path);
47933 out:
47934 @@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
47935 if (error)
47936 goto dput_and_out;
47937
47938 + gr_log_chdir(path.dentry, path.mnt);
47939 +
47940 set_fs_pwd(current->fs, &path);
47941
47942 dput_and_out:
47943 @@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
47944 goto out_putf;
47945
47946 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
47947 +
47948 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
47949 + error = -EPERM;
47950 +
47951 + if (!error)
47952 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
47953 +
47954 if (!error)
47955 set_fs_pwd(current->fs, &file->f_path);
47956 out_putf:
47957 @@ -438,7 +454,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
47958 if (error)
47959 goto dput_and_out;
47960
47961 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
47962 + goto dput_and_out;
47963 +
47964 set_fs_root(current->fs, &path);
47965 +
47966 + gr_handle_chroot_chdir(&path);
47967 +
47968 error = 0;
47969 dput_and_out:
47970 path_put(&path);
47971 @@ -456,6 +478,16 @@ static int chmod_common(struct path *path, umode_t mode)
47972 if (error)
47973 return error;
47974 mutex_lock(&inode->i_mutex);
47975 +
47976 + if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
47977 + error = -EACCES;
47978 + goto out_unlock;
47979 + }
47980 + if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
47981 + error = -EACCES;
47982 + goto out_unlock;
47983 + }
47984 +
47985 error = security_path_chmod(path, mode);
47986 if (error)
47987 goto out_unlock;
47988 @@ -506,6 +538,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
47989 int error;
47990 struct iattr newattrs;
47991
47992 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
47993 + return -EACCES;
47994 +
47995 newattrs.ia_valid = ATTR_CTIME;
47996 if (user != (uid_t) -1) {
47997 newattrs.ia_valid |= ATTR_UID;
47998 diff --git a/fs/pipe.c b/fs/pipe.c
47999 index 82e651b..8a68573 100644
48000 --- a/fs/pipe.c
48001 +++ b/fs/pipe.c
48002 @@ -437,9 +437,9 @@ redo:
48003 }
48004 if (bufs) /* More to do? */
48005 continue;
48006 - if (!pipe->writers)
48007 + if (!atomic_read(&pipe->writers))
48008 break;
48009 - if (!pipe->waiting_writers) {
48010 + if (!atomic_read(&pipe->waiting_writers)) {
48011 /* syscall merging: Usually we must not sleep
48012 * if O_NONBLOCK is set, or if we got some data.
48013 * But if a writer sleeps in kernel space, then
48014 @@ -503,7 +503,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
48015 mutex_lock(&inode->i_mutex);
48016 pipe = inode->i_pipe;
48017
48018 - if (!pipe->readers) {
48019 + if (!atomic_read(&pipe->readers)) {
48020 send_sig(SIGPIPE, current, 0);
48021 ret = -EPIPE;
48022 goto out;
48023 @@ -552,7 +552,7 @@ redo1:
48024 for (;;) {
48025 int bufs;
48026
48027 - if (!pipe->readers) {
48028 + if (!atomic_read(&pipe->readers)) {
48029 send_sig(SIGPIPE, current, 0);
48030 if (!ret)
48031 ret = -EPIPE;
48032 @@ -643,9 +643,9 @@ redo2:
48033 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
48034 do_wakeup = 0;
48035 }
48036 - pipe->waiting_writers++;
48037 + atomic_inc(&pipe->waiting_writers);
48038 pipe_wait(pipe);
48039 - pipe->waiting_writers--;
48040 + atomic_dec(&pipe->waiting_writers);
48041 }
48042 out:
48043 mutex_unlock(&inode->i_mutex);
48044 @@ -712,7 +712,7 @@ pipe_poll(struct file *filp, poll_table *wait)
48045 mask = 0;
48046 if (filp->f_mode & FMODE_READ) {
48047 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
48048 - if (!pipe->writers && filp->f_version != pipe->w_counter)
48049 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
48050 mask |= POLLHUP;
48051 }
48052
48053 @@ -722,7 +722,7 @@ pipe_poll(struct file *filp, poll_table *wait)
48054 * Most Unices do not set POLLERR for FIFOs but on Linux they
48055 * behave exactly like pipes for poll().
48056 */
48057 - if (!pipe->readers)
48058 + if (!atomic_read(&pipe->readers))
48059 mask |= POLLERR;
48060 }
48061
48062 @@ -736,10 +736,10 @@ pipe_release(struct inode *inode, int decr, int decw)
48063
48064 mutex_lock(&inode->i_mutex);
48065 pipe = inode->i_pipe;
48066 - pipe->readers -= decr;
48067 - pipe->writers -= decw;
48068 + atomic_sub(decr, &pipe->readers);
48069 + atomic_sub(decw, &pipe->writers);
48070
48071 - if (!pipe->readers && !pipe->writers) {
48072 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
48073 free_pipe_info(inode);
48074 } else {
48075 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
48076 @@ -829,7 +829,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
48077
48078 if (inode->i_pipe) {
48079 ret = 0;
48080 - inode->i_pipe->readers++;
48081 + atomic_inc(&inode->i_pipe->readers);
48082 }
48083
48084 mutex_unlock(&inode->i_mutex);
48085 @@ -846,7 +846,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
48086
48087 if (inode->i_pipe) {
48088 ret = 0;
48089 - inode->i_pipe->writers++;
48090 + atomic_inc(&inode->i_pipe->writers);
48091 }
48092
48093 mutex_unlock(&inode->i_mutex);
48094 @@ -864,9 +864,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
48095 if (inode->i_pipe) {
48096 ret = 0;
48097 if (filp->f_mode & FMODE_READ)
48098 - inode->i_pipe->readers++;
48099 + atomic_inc(&inode->i_pipe->readers);
48100 if (filp->f_mode & FMODE_WRITE)
48101 - inode->i_pipe->writers++;
48102 + atomic_inc(&inode->i_pipe->writers);
48103 }
48104
48105 mutex_unlock(&inode->i_mutex);
48106 @@ -958,7 +958,7 @@ void free_pipe_info(struct inode *inode)
48107 inode->i_pipe = NULL;
48108 }
48109
48110 -static struct vfsmount *pipe_mnt __read_mostly;
48111 +struct vfsmount *pipe_mnt __read_mostly;
48112
48113 /*
48114 * pipefs_dname() is called from d_path().
48115 @@ -988,7 +988,8 @@ static struct inode * get_pipe_inode(void)
48116 goto fail_iput;
48117 inode->i_pipe = pipe;
48118
48119 - pipe->readers = pipe->writers = 1;
48120 + atomic_set(&pipe->readers, 1);
48121 + atomic_set(&pipe->writers, 1);
48122 inode->i_fop = &rdwr_pipefifo_fops;
48123
48124 /*
48125 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
48126 index 15af622..0e9f4467 100644
48127 --- a/fs/proc/Kconfig
48128 +++ b/fs/proc/Kconfig
48129 @@ -30,12 +30,12 @@ config PROC_FS
48130
48131 config PROC_KCORE
48132 bool "/proc/kcore support" if !ARM
48133 - depends on PROC_FS && MMU
48134 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
48135
48136 config PROC_VMCORE
48137 bool "/proc/vmcore support"
48138 - depends on PROC_FS && CRASH_DUMP
48139 - default y
48140 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
48141 + default n
48142 help
48143 Exports the dump image of crashed kernel in ELF format.
48144
48145 @@ -59,8 +59,8 @@ config PROC_SYSCTL
48146 limited in memory.
48147
48148 config PROC_PAGE_MONITOR
48149 - default y
48150 - depends on PROC_FS && MMU
48151 + default n
48152 + depends on PROC_FS && MMU && !GRKERNSEC
48153 bool "Enable /proc page monitoring" if EXPERT
48154 help
48155 Various /proc files exist to monitor process memory utilization:
48156 diff --git a/fs/proc/array.c b/fs/proc/array.c
48157 index c602b8d..a7de642 100644
48158 --- a/fs/proc/array.c
48159 +++ b/fs/proc/array.c
48160 @@ -60,6 +60,7 @@
48161 #include <linux/tty.h>
48162 #include <linux/string.h>
48163 #include <linux/mman.h>
48164 +#include <linux/grsecurity.h>
48165 #include <linux/proc_fs.h>
48166 #include <linux/ioport.h>
48167 #include <linux/uaccess.h>
48168 @@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
48169 seq_putc(m, '\n');
48170 }
48171
48172 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48173 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
48174 +{
48175 + if (p->mm)
48176 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
48177 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
48178 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
48179 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
48180 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
48181 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
48182 + else
48183 + seq_printf(m, "PaX:\t-----\n");
48184 +}
48185 +#endif
48186 +
48187 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
48188 struct pid *pid, struct task_struct *task)
48189 {
48190 @@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
48191 task_cpus_allowed(m, task);
48192 cpuset_task_status_allowed(m, task);
48193 task_context_switch_counts(m, task);
48194 +
48195 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48196 + task_pax(m, task);
48197 +#endif
48198 +
48199 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
48200 + task_grsec_rbac(m, task);
48201 +#endif
48202 +
48203 return 0;
48204 }
48205
48206 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48207 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
48208 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
48209 + _mm->pax_flags & MF_PAX_SEGMEXEC))
48210 +#endif
48211 +
48212 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
48213 struct pid *pid, struct task_struct *task, int whole)
48214 {
48215 @@ -378,6 +409,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
48216 char tcomm[sizeof(task->comm)];
48217 unsigned long flags;
48218
48219 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48220 + if (current->exec_id != m->exec_id) {
48221 + gr_log_badprocpid("stat");
48222 + return 0;
48223 + }
48224 +#endif
48225 +
48226 state = *get_task_state(task);
48227 vsize = eip = esp = 0;
48228 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
48229 @@ -449,6 +487,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
48230 gtime = task->gtime;
48231 }
48232
48233 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48234 + if (PAX_RAND_FLAGS(mm)) {
48235 + eip = 0;
48236 + esp = 0;
48237 + wchan = 0;
48238 + }
48239 +#endif
48240 +#ifdef CONFIG_GRKERNSEC_HIDESYM
48241 + wchan = 0;
48242 + eip =0;
48243 + esp =0;
48244 +#endif
48245 +
48246 /* scale priority and nice values from timeslices to -20..20 */
48247 /* to make it look like a "normal" Unix priority/nice value */
48248 priority = task_prio(task);
48249 @@ -489,9 +540,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
48250 vsize,
48251 mm ? get_mm_rss(mm) : 0,
48252 rsslim,
48253 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48254 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
48255 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
48256 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
48257 +#else
48258 mm ? (permitted ? mm->start_code : 1) : 0,
48259 mm ? (permitted ? mm->end_code : 1) : 0,
48260 (permitted && mm) ? mm->start_stack : 0,
48261 +#endif
48262 esp,
48263 eip,
48264 /* The signal information here is obsolete.
48265 @@ -536,8 +593,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
48266 struct pid *pid, struct task_struct *task)
48267 {
48268 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
48269 - struct mm_struct *mm = get_task_mm(task);
48270 + struct mm_struct *mm;
48271
48272 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48273 + if (current->exec_id != m->exec_id) {
48274 + gr_log_badprocpid("statm");
48275 + return 0;
48276 + }
48277 +#endif
48278 + mm = get_task_mm(task);
48279 if (mm) {
48280 size = task_statm(mm, &shared, &text, &data, &resident);
48281 mmput(mm);
48282 @@ -547,3 +611,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
48283
48284 return 0;
48285 }
48286 +
48287 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
48288 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
48289 +{
48290 + u32 curr_ip = 0;
48291 + unsigned long flags;
48292 +
48293 + if (lock_task_sighand(task, &flags)) {
48294 + curr_ip = task->signal->curr_ip;
48295 + unlock_task_sighand(task, &flags);
48296 + }
48297 +
48298 + return sprintf(buffer, "%pI4\n", &curr_ip);
48299 +}
48300 +#endif
48301 diff --git a/fs/proc/base.c b/fs/proc/base.c
48302 index d4548dd..d101f84 100644
48303 --- a/fs/proc/base.c
48304 +++ b/fs/proc/base.c
48305 @@ -109,6 +109,14 @@ struct pid_entry {
48306 union proc_op op;
48307 };
48308
48309 +struct getdents_callback {
48310 + struct linux_dirent __user * current_dir;
48311 + struct linux_dirent __user * previous;
48312 + struct file * file;
48313 + int count;
48314 + int error;
48315 +};
48316 +
48317 #define NOD(NAME, MODE, IOP, FOP, OP) { \
48318 .name = (NAME), \
48319 .len = sizeof(NAME) - 1, \
48320 @@ -213,6 +221,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
48321 if (!mm->arg_end)
48322 goto out_mm; /* Shh! No looking before we're done */
48323
48324 + if (gr_acl_handle_procpidmem(task))
48325 + goto out_mm;
48326 +
48327 len = mm->arg_end - mm->arg_start;
48328
48329 if (len > PAGE_SIZE)
48330 @@ -240,12 +251,28 @@ out:
48331 return res;
48332 }
48333
48334 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48335 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
48336 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
48337 + _mm->pax_flags & MF_PAX_SEGMEXEC))
48338 +#endif
48339 +
48340 static int proc_pid_auxv(struct task_struct *task, char *buffer)
48341 {
48342 struct mm_struct *mm = mm_for_maps(task);
48343 int res = PTR_ERR(mm);
48344 if (mm && !IS_ERR(mm)) {
48345 unsigned int nwords = 0;
48346 +
48347 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48348 + /* allow if we're currently ptracing this task */
48349 + if (PAX_RAND_FLAGS(mm) &&
48350 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
48351 + mmput(mm);
48352 + return 0;
48353 + }
48354 +#endif
48355 +
48356 do {
48357 nwords += 2;
48358 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
48359 @@ -259,7 +286,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
48360 }
48361
48362
48363 -#ifdef CONFIG_KALLSYMS
48364 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48365 /*
48366 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
48367 * Returns the resolved symbol. If that fails, simply return the address.
48368 @@ -298,7 +325,7 @@ static void unlock_trace(struct task_struct *task)
48369 mutex_unlock(&task->signal->cred_guard_mutex);
48370 }
48371
48372 -#ifdef CONFIG_STACKTRACE
48373 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48374
48375 #define MAX_STACK_TRACE_DEPTH 64
48376
48377 @@ -489,7 +516,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
48378 return count;
48379 }
48380
48381 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
48382 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
48383 static int proc_pid_syscall(struct task_struct *task, char *buffer)
48384 {
48385 long nr;
48386 @@ -518,7 +545,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
48387 /************************************************************************/
48388
48389 /* permission checks */
48390 -static int proc_fd_access_allowed(struct inode *inode)
48391 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
48392 {
48393 struct task_struct *task;
48394 int allowed = 0;
48395 @@ -528,7 +555,10 @@ static int proc_fd_access_allowed(struct inode *inode)
48396 */
48397 task = get_proc_task(inode);
48398 if (task) {
48399 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
48400 + if (log)
48401 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
48402 + else
48403 + allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
48404 put_task_struct(task);
48405 }
48406 return allowed;
48407 @@ -566,10 +596,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
48408 struct task_struct *task,
48409 int hide_pid_min)
48410 {
48411 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48412 + return false;
48413 +
48414 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48415 + rcu_read_lock();
48416 + {
48417 + const struct cred *tmpcred = current_cred();
48418 + const struct cred *cred = __task_cred(task);
48419 +
48420 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
48421 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48422 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
48423 +#endif
48424 + ) {
48425 + rcu_read_unlock();
48426 + return true;
48427 + }
48428 + }
48429 + rcu_read_unlock();
48430 +
48431 + if (!pid->hide_pid)
48432 + return false;
48433 +#endif
48434 +
48435 if (pid->hide_pid < hide_pid_min)
48436 return true;
48437 if (in_group_p(pid->pid_gid))
48438 return true;
48439 +
48440 return ptrace_may_access(task, PTRACE_MODE_READ);
48441 }
48442
48443 @@ -587,7 +642,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
48444 put_task_struct(task);
48445
48446 if (!has_perms) {
48447 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48448 + {
48449 +#else
48450 if (pid->hide_pid == 2) {
48451 +#endif
48452 /*
48453 * Let's make getdents(), stat(), and open()
48454 * consistent with each other. If a process
48455 @@ -702,6 +761,10 @@ static int mem_open(struct inode* inode, struct file* file)
48456 file->f_mode |= FMODE_UNSIGNED_OFFSET;
48457 file->private_data = mm;
48458
48459 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48460 + file->f_version = current->exec_id;
48461 +#endif
48462 +
48463 return 0;
48464 }
48465
48466 @@ -713,6 +776,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
48467 ssize_t copied;
48468 char *page;
48469
48470 +#ifdef CONFIG_GRKERNSEC
48471 + if (write)
48472 + return -EPERM;
48473 +#endif
48474 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48475 + if (file->f_version != current->exec_id) {
48476 + gr_log_badprocpid("mem");
48477 + return 0;
48478 + }
48479 +#endif
48480 +
48481 if (!mm)
48482 return 0;
48483
48484 @@ -813,6 +887,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
48485 if (!task)
48486 goto out_no_task;
48487
48488 + if (gr_acl_handle_procpidmem(task))
48489 + goto out;
48490 +
48491 ret = -ENOMEM;
48492 page = (char *)__get_free_page(GFP_TEMPORARY);
48493 if (!page)
48494 @@ -1434,7 +1511,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
48495 path_put(&nd->path);
48496
48497 /* Are we allowed to snoop on the tasks file descriptors? */
48498 - if (!proc_fd_access_allowed(inode))
48499 + if (!proc_fd_access_allowed(inode, 0))
48500 goto out;
48501
48502 error = PROC_I(inode)->op.proc_get_link(dentry, &nd->path);
48503 @@ -1473,8 +1550,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
48504 struct path path;
48505
48506 /* Are we allowed to snoop on the tasks file descriptors? */
48507 - if (!proc_fd_access_allowed(inode))
48508 - goto out;
48509 + /* logging this is needed for learning on chromium to work properly,
48510 + but we don't want to flood the logs from 'ps' which does a readlink
48511 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
48512 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
48513 + */
48514 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
48515 + if (!proc_fd_access_allowed(inode,0))
48516 + goto out;
48517 + } else {
48518 + if (!proc_fd_access_allowed(inode,1))
48519 + goto out;
48520 + }
48521
48522 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
48523 if (error)
48524 @@ -1539,7 +1626,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
48525 rcu_read_lock();
48526 cred = __task_cred(task);
48527 inode->i_uid = cred->euid;
48528 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48529 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48530 +#else
48531 inode->i_gid = cred->egid;
48532 +#endif
48533 rcu_read_unlock();
48534 }
48535 security_task_to_inode(task, inode);
48536 @@ -1575,10 +1666,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
48537 return -ENOENT;
48538 }
48539 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
48540 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48541 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
48542 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48543 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
48544 +#endif
48545 task_dumpable(task)) {
48546 cred = __task_cred(task);
48547 stat->uid = cred->euid;
48548 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48549 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
48550 +#else
48551 stat->gid = cred->egid;
48552 +#endif
48553 }
48554 }
48555 rcu_read_unlock();
48556 @@ -1616,11 +1716,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
48557
48558 if (task) {
48559 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
48560 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48561 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
48562 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48563 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
48564 +#endif
48565 task_dumpable(task)) {
48566 rcu_read_lock();
48567 cred = __task_cred(task);
48568 inode->i_uid = cred->euid;
48569 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48570 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48571 +#else
48572 inode->i_gid = cred->egid;
48573 +#endif
48574 rcu_read_unlock();
48575 } else {
48576 inode->i_uid = 0;
48577 @@ -1738,7 +1847,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
48578 int fd = proc_fd(inode);
48579
48580 if (task) {
48581 - files = get_files_struct(task);
48582 + if (!gr_acl_handle_procpidmem(task))
48583 + files = get_files_struct(task);
48584 put_task_struct(task);
48585 }
48586 if (files) {
48587 @@ -2355,11 +2465,21 @@ static const struct file_operations proc_map_files_operations = {
48588 */
48589 static int proc_fd_permission(struct inode *inode, int mask)
48590 {
48591 + struct task_struct *task;
48592 int rv = generic_permission(inode, mask);
48593 - if (rv == 0)
48594 - return 0;
48595 +
48596 if (task_pid(current) == proc_pid(inode))
48597 rv = 0;
48598 +
48599 + task = get_proc_task(inode);
48600 + if (task == NULL)
48601 + return rv;
48602 +
48603 + if (gr_acl_handle_procpidmem(task))
48604 + rv = -EACCES;
48605 +
48606 + put_task_struct(task);
48607 +
48608 return rv;
48609 }
48610
48611 @@ -2469,6 +2589,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
48612 if (!task)
48613 goto out_no_task;
48614
48615 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48616 + goto out;
48617 +
48618 /*
48619 * Yes, it does not scale. And it should not. Don't add
48620 * new entries into /proc/<tgid>/ without very good reasons.
48621 @@ -2513,6 +2636,9 @@ static int proc_pident_readdir(struct file *filp,
48622 if (!task)
48623 goto out_no_task;
48624
48625 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48626 + goto out;
48627 +
48628 ret = 0;
48629 i = filp->f_pos;
48630 switch (i) {
48631 @@ -2783,7 +2909,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
48632 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
48633 void *cookie)
48634 {
48635 - char *s = nd_get_link(nd);
48636 + const char *s = nd_get_link(nd);
48637 if (!IS_ERR(s))
48638 __putname(s);
48639 }
48640 @@ -2984,7 +3110,7 @@ static const struct pid_entry tgid_base_stuff[] = {
48641 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
48642 #endif
48643 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
48644 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
48645 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
48646 INF("syscall", S_IRUGO, proc_pid_syscall),
48647 #endif
48648 INF("cmdline", S_IRUGO, proc_pid_cmdline),
48649 @@ -3009,10 +3135,10 @@ static const struct pid_entry tgid_base_stuff[] = {
48650 #ifdef CONFIG_SECURITY
48651 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
48652 #endif
48653 -#ifdef CONFIG_KALLSYMS
48654 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48655 INF("wchan", S_IRUGO, proc_pid_wchan),
48656 #endif
48657 -#ifdef CONFIG_STACKTRACE
48658 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48659 ONE("stack", S_IRUGO, proc_pid_stack),
48660 #endif
48661 #ifdef CONFIG_SCHEDSTATS
48662 @@ -3046,6 +3172,9 @@ static const struct pid_entry tgid_base_stuff[] = {
48663 #ifdef CONFIG_HARDWALL
48664 INF("hardwall", S_IRUGO, proc_pid_hardwall),
48665 #endif
48666 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
48667 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
48668 +#endif
48669 };
48670
48671 static int proc_tgid_base_readdir(struct file * filp,
48672 @@ -3172,7 +3301,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
48673 if (!inode)
48674 goto out;
48675
48676 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48677 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
48678 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48679 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48680 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
48681 +#else
48682 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
48683 +#endif
48684 inode->i_op = &proc_tgid_base_inode_operations;
48685 inode->i_fop = &proc_tgid_base_operations;
48686 inode->i_flags|=S_IMMUTABLE;
48687 @@ -3214,7 +3350,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
48688 if (!task)
48689 goto out;
48690
48691 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48692 + goto out_put_task;
48693 +
48694 result = proc_pid_instantiate(dir, dentry, task, NULL);
48695 +out_put_task:
48696 put_task_struct(task);
48697 out:
48698 return result;
48699 @@ -3277,6 +3417,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
48700 static int fake_filldir(void *buf, const char *name, int namelen,
48701 loff_t offset, u64 ino, unsigned d_type)
48702 {
48703 + struct getdents_callback * __buf = (struct getdents_callback *) buf;
48704 + __buf->error = -EINVAL;
48705 return 0;
48706 }
48707
48708 @@ -3343,7 +3485,7 @@ static const struct pid_entry tid_base_stuff[] = {
48709 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
48710 #endif
48711 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
48712 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
48713 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
48714 INF("syscall", S_IRUGO, proc_pid_syscall),
48715 #endif
48716 INF("cmdline", S_IRUGO, proc_pid_cmdline),
48717 @@ -3367,10 +3509,10 @@ static const struct pid_entry tid_base_stuff[] = {
48718 #ifdef CONFIG_SECURITY
48719 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
48720 #endif
48721 -#ifdef CONFIG_KALLSYMS
48722 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48723 INF("wchan", S_IRUGO, proc_pid_wchan),
48724 #endif
48725 -#ifdef CONFIG_STACKTRACE
48726 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48727 ONE("stack", S_IRUGO, proc_pid_stack),
48728 #endif
48729 #ifdef CONFIG_SCHEDSTATS
48730 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
48731 index 82676e3..5f8518a 100644
48732 --- a/fs/proc/cmdline.c
48733 +++ b/fs/proc/cmdline.c
48734 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
48735
48736 static int __init proc_cmdline_init(void)
48737 {
48738 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
48739 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
48740 +#else
48741 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
48742 +#endif
48743 return 0;
48744 }
48745 module_init(proc_cmdline_init);
48746 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
48747 index b143471..bb105e5 100644
48748 --- a/fs/proc/devices.c
48749 +++ b/fs/proc/devices.c
48750 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
48751
48752 static int __init proc_devices_init(void)
48753 {
48754 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
48755 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
48756 +#else
48757 proc_create("devices", 0, NULL, &proc_devinfo_operations);
48758 +#endif
48759 return 0;
48760 }
48761 module_init(proc_devices_init);
48762 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
48763 index 84fd323..f698a32 100644
48764 --- a/fs/proc/inode.c
48765 +++ b/fs/proc/inode.c
48766 @@ -21,12 +21,18 @@
48767 #include <linux/seq_file.h>
48768 #include <linux/slab.h>
48769 #include <linux/mount.h>
48770 +#include <linux/grsecurity.h>
48771
48772 #include <asm/system.h>
48773 #include <asm/uaccess.h>
48774
48775 #include "internal.h"
48776
48777 +#ifdef CONFIG_PROC_SYSCTL
48778 +extern const struct inode_operations proc_sys_inode_operations;
48779 +extern const struct inode_operations proc_sys_dir_operations;
48780 +#endif
48781 +
48782 static void proc_evict_inode(struct inode *inode)
48783 {
48784 struct proc_dir_entry *de;
48785 @@ -52,6 +58,13 @@ static void proc_evict_inode(struct inode *inode)
48786 ns_ops = PROC_I(inode)->ns_ops;
48787 if (ns_ops && ns_ops->put)
48788 ns_ops->put(PROC_I(inode)->ns);
48789 +
48790 +#ifdef CONFIG_PROC_SYSCTL
48791 + if (inode->i_op == &proc_sys_inode_operations ||
48792 + inode->i_op == &proc_sys_dir_operations)
48793 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
48794 +#endif
48795 +
48796 }
48797
48798 static struct kmem_cache * proc_inode_cachep;
48799 @@ -457,7 +470,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
48800 if (de->mode) {
48801 inode->i_mode = de->mode;
48802 inode->i_uid = de->uid;
48803 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48804 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48805 +#else
48806 inode->i_gid = de->gid;
48807 +#endif
48808 }
48809 if (de->size)
48810 inode->i_size = de->size;
48811 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
48812 index 2925775..4f08fae 100644
48813 --- a/fs/proc/internal.h
48814 +++ b/fs/proc/internal.h
48815 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
48816 struct pid *pid, struct task_struct *task);
48817 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
48818 struct pid *pid, struct task_struct *task);
48819 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
48820 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
48821 +#endif
48822 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
48823
48824 extern const struct file_operations proc_maps_operations;
48825 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
48826 index d245cb2..f4e8498 100644
48827 --- a/fs/proc/kcore.c
48828 +++ b/fs/proc/kcore.c
48829 @@ -478,9 +478,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
48830 * the addresses in the elf_phdr on our list.
48831 */
48832 start = kc_offset_to_vaddr(*fpos - elf_buflen);
48833 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
48834 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
48835 + if (tsz > buflen)
48836 tsz = buflen;
48837 -
48838 +
48839 while (buflen) {
48840 struct kcore_list *m;
48841
48842 @@ -509,20 +510,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
48843 kfree(elf_buf);
48844 } else {
48845 if (kern_addr_valid(start)) {
48846 - unsigned long n;
48847 + char *elf_buf;
48848 + mm_segment_t oldfs;
48849
48850 - n = copy_to_user(buffer, (char *)start, tsz);
48851 - /*
48852 - * We cannot distingush between fault on source
48853 - * and fault on destination. When this happens
48854 - * we clear too and hope it will trigger the
48855 - * EFAULT again.
48856 - */
48857 - if (n) {
48858 - if (clear_user(buffer + tsz - n,
48859 - n))
48860 + elf_buf = kmalloc(tsz, GFP_KERNEL);
48861 + if (!elf_buf)
48862 + return -ENOMEM;
48863 + oldfs = get_fs();
48864 + set_fs(KERNEL_DS);
48865 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
48866 + set_fs(oldfs);
48867 + if (copy_to_user(buffer, elf_buf, tsz)) {
48868 + kfree(elf_buf);
48869 return -EFAULT;
48870 + }
48871 }
48872 + set_fs(oldfs);
48873 + kfree(elf_buf);
48874 } else {
48875 if (clear_user(buffer, tsz))
48876 return -EFAULT;
48877 @@ -542,6 +546,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
48878
48879 static int open_kcore(struct inode *inode, struct file *filp)
48880 {
48881 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
48882 + return -EPERM;
48883 +#endif
48884 if (!capable(CAP_SYS_RAWIO))
48885 return -EPERM;
48886 if (kcore_need_update)
48887 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
48888 index 80e4645..53e5fcf 100644
48889 --- a/fs/proc/meminfo.c
48890 +++ b/fs/proc/meminfo.c
48891 @@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
48892 vmi.used >> 10,
48893 vmi.largest_chunk >> 10
48894 #ifdef CONFIG_MEMORY_FAILURE
48895 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
48896 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
48897 #endif
48898 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
48899 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
48900 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
48901 index b1822dd..df622cb 100644
48902 --- a/fs/proc/nommu.c
48903 +++ b/fs/proc/nommu.c
48904 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
48905 if (len < 1)
48906 len = 1;
48907 seq_printf(m, "%*c", len, ' ');
48908 - seq_path(m, &file->f_path, "");
48909 + seq_path(m, &file->f_path, "\n\\");
48910 }
48911
48912 seq_putc(m, '\n');
48913 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
48914 index 06e1cc1..177cd98 100644
48915 --- a/fs/proc/proc_net.c
48916 +++ b/fs/proc/proc_net.c
48917 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
48918 struct task_struct *task;
48919 struct nsproxy *ns;
48920 struct net *net = NULL;
48921 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48922 + const struct cred *cred = current_cred();
48923 +#endif
48924 +
48925 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48926 + if (cred->fsuid)
48927 + return net;
48928 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48929 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
48930 + return net;
48931 +#endif
48932
48933 rcu_read_lock();
48934 task = pid_task(proc_pid(dir), PIDTYPE_PID);
48935 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
48936 index 53c3bce..10ad159 100644
48937 --- a/fs/proc/proc_sysctl.c
48938 +++ b/fs/proc/proc_sysctl.c
48939 @@ -9,11 +9,13 @@
48940 #include <linux/namei.h>
48941 #include "internal.h"
48942
48943 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
48944 +
48945 static const struct dentry_operations proc_sys_dentry_operations;
48946 static const struct file_operations proc_sys_file_operations;
48947 -static const struct inode_operations proc_sys_inode_operations;
48948 +const struct inode_operations proc_sys_inode_operations;
48949 static const struct file_operations proc_sys_dir_file_operations;
48950 -static const struct inode_operations proc_sys_dir_operations;
48951 +const struct inode_operations proc_sys_dir_operations;
48952
48953 void proc_sys_poll_notify(struct ctl_table_poll *poll)
48954 {
48955 @@ -131,8 +133,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
48956
48957 err = NULL;
48958 d_set_d_op(dentry, &proc_sys_dentry_operations);
48959 +
48960 + gr_handle_proc_create(dentry, inode);
48961 +
48962 d_add(dentry, inode);
48963
48964 + if (gr_handle_sysctl(p, MAY_EXEC))
48965 + err = ERR_PTR(-ENOENT);
48966 +
48967 out:
48968 sysctl_head_finish(head);
48969 return err;
48970 @@ -163,6 +171,12 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
48971 if (!table->proc_handler)
48972 goto out;
48973
48974 +#ifdef CONFIG_GRKERNSEC
48975 + error = -EPERM;
48976 + if (write && !capable(CAP_SYS_ADMIN))
48977 + goto out;
48978 +#endif
48979 +
48980 /* careful: calling conventions are nasty here */
48981 res = count;
48982 error = table->proc_handler(table, write, buf, &res, ppos);
48983 @@ -260,6 +274,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
48984 return -ENOMEM;
48985 } else {
48986 d_set_d_op(child, &proc_sys_dentry_operations);
48987 +
48988 + gr_handle_proc_create(child, inode);
48989 +
48990 d_add(child, inode);
48991 }
48992 } else {
48993 @@ -288,6 +305,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
48994 if (*pos < file->f_pos)
48995 continue;
48996
48997 + if (gr_handle_sysctl(table, 0))
48998 + continue;
48999 +
49000 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
49001 if (res)
49002 return res;
49003 @@ -413,6 +433,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
49004 if (IS_ERR(head))
49005 return PTR_ERR(head);
49006
49007 + if (table && gr_handle_sysctl(table, MAY_EXEC))
49008 + return -ENOENT;
49009 +
49010 generic_fillattr(inode, stat);
49011 if (table)
49012 stat->mode = (stat->mode & S_IFMT) | table->mode;
49013 @@ -435,13 +458,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
49014 .llseek = generic_file_llseek,
49015 };
49016
49017 -static const struct inode_operations proc_sys_inode_operations = {
49018 +const struct inode_operations proc_sys_inode_operations = {
49019 .permission = proc_sys_permission,
49020 .setattr = proc_sys_setattr,
49021 .getattr = proc_sys_getattr,
49022 };
49023
49024 -static const struct inode_operations proc_sys_dir_operations = {
49025 +const struct inode_operations proc_sys_dir_operations = {
49026 .lookup = proc_sys_lookup,
49027 .permission = proc_sys_permission,
49028 .setattr = proc_sys_setattr,
49029 diff --git a/fs/proc/root.c b/fs/proc/root.c
49030 index 46a15d8..335631a 100644
49031 --- a/fs/proc/root.c
49032 +++ b/fs/proc/root.c
49033 @@ -187,7 +187,15 @@ void __init proc_root_init(void)
49034 #ifdef CONFIG_PROC_DEVICETREE
49035 proc_device_tree_init();
49036 #endif
49037 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
49038 +#ifdef CONFIG_GRKERNSEC_PROC_USER
49039 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
49040 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49041 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
49042 +#endif
49043 +#else
49044 proc_mkdir("bus", NULL);
49045 +#endif
49046 proc_sys_init();
49047 }
49048
49049 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
49050 index 3efa725..23c925b 100644
49051 --- a/fs/proc/task_mmu.c
49052 +++ b/fs/proc/task_mmu.c
49053 @@ -11,6 +11,7 @@
49054 #include <linux/rmap.h>
49055 #include <linux/swap.h>
49056 #include <linux/swapops.h>
49057 +#include <linux/grsecurity.h>
49058
49059 #include <asm/elf.h>
49060 #include <asm/uaccess.h>
49061 @@ -52,8 +53,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
49062 "VmExe:\t%8lu kB\n"
49063 "VmLib:\t%8lu kB\n"
49064 "VmPTE:\t%8lu kB\n"
49065 - "VmSwap:\t%8lu kB\n",
49066 - hiwater_vm << (PAGE_SHIFT-10),
49067 + "VmSwap:\t%8lu kB\n"
49068 +
49069 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
49070 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
49071 +#endif
49072 +
49073 + ,hiwater_vm << (PAGE_SHIFT-10),
49074 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
49075 mm->locked_vm << (PAGE_SHIFT-10),
49076 mm->pinned_vm << (PAGE_SHIFT-10),
49077 @@ -62,7 +68,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
49078 data << (PAGE_SHIFT-10),
49079 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
49080 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
49081 - swap << (PAGE_SHIFT-10));
49082 + swap << (PAGE_SHIFT-10)
49083 +
49084 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
49085 + , mm->context.user_cs_base, mm->context.user_cs_limit
49086 +#endif
49087 +
49088 + );
49089 }
49090
49091 unsigned long task_vsize(struct mm_struct *mm)
49092 @@ -209,6 +221,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
49093 return ret;
49094 }
49095
49096 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49097 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
49098 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
49099 + _mm->pax_flags & MF_PAX_SEGMEXEC))
49100 +#endif
49101 +
49102 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
49103 {
49104 struct mm_struct *mm = vma->vm_mm;
49105 @@ -227,13 +245,13 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
49106 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
49107 }
49108
49109 - /* We don't show the stack guard page in /proc/maps */
49110 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49111 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
49112 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
49113 +#else
49114 start = vma->vm_start;
49115 - if (stack_guard_page_start(vma, start))
49116 - start += PAGE_SIZE;
49117 end = vma->vm_end;
49118 - if (stack_guard_page_end(vma, end))
49119 - end -= PAGE_SIZE;
49120 +#endif
49121
49122 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
49123 start,
49124 @@ -242,7 +260,11 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
49125 flags & VM_WRITE ? 'w' : '-',
49126 flags & VM_EXEC ? 'x' : '-',
49127 flags & VM_MAYSHARE ? 's' : 'p',
49128 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49129 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
49130 +#else
49131 pgoff,
49132 +#endif
49133 MAJOR(dev), MINOR(dev), ino, &len);
49134
49135 /*
49136 @@ -251,7 +273,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
49137 */
49138 if (file) {
49139 pad_len_spaces(m, len);
49140 - seq_path(m, &file->f_path, "\n");
49141 + seq_path(m, &file->f_path, "\n\\");
49142 } else {
49143 const char *name = arch_vma_name(vma);
49144 if (!name) {
49145 @@ -259,8 +281,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
49146 if (vma->vm_start <= mm->brk &&
49147 vma->vm_end >= mm->start_brk) {
49148 name = "[heap]";
49149 - } else if (vma->vm_start <= mm->start_stack &&
49150 - vma->vm_end >= mm->start_stack) {
49151 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
49152 + (vma->vm_start <= mm->start_stack &&
49153 + vma->vm_end >= mm->start_stack)) {
49154 name = "[stack]";
49155 }
49156 } else {
49157 @@ -281,6 +304,13 @@ static int show_map(struct seq_file *m, void *v)
49158 struct proc_maps_private *priv = m->private;
49159 struct task_struct *task = priv->task;
49160
49161 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49162 + if (current->exec_id != m->exec_id) {
49163 + gr_log_badprocpid("maps");
49164 + return 0;
49165 + }
49166 +#endif
49167 +
49168 show_map_vma(m, vma);
49169
49170 if (m->count < m->size) /* vma is copied successfully */
49171 @@ -437,12 +467,23 @@ static int show_smap(struct seq_file *m, void *v)
49172 .private = &mss,
49173 };
49174
49175 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49176 + if (current->exec_id != m->exec_id) {
49177 + gr_log_badprocpid("smaps");
49178 + return 0;
49179 + }
49180 +#endif
49181 memset(&mss, 0, sizeof mss);
49182 - mss.vma = vma;
49183 - /* mmap_sem is held in m_start */
49184 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
49185 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
49186 -
49187 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49188 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
49189 +#endif
49190 + mss.vma = vma;
49191 + /* mmap_sem is held in m_start */
49192 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
49193 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
49194 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49195 + }
49196 +#endif
49197 show_map_vma(m, vma);
49198
49199 seq_printf(m,
49200 @@ -460,7 +501,11 @@ static int show_smap(struct seq_file *m, void *v)
49201 "KernelPageSize: %8lu kB\n"
49202 "MMUPageSize: %8lu kB\n"
49203 "Locked: %8lu kB\n",
49204 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49205 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
49206 +#else
49207 (vma->vm_end - vma->vm_start) >> 10,
49208 +#endif
49209 mss.resident >> 10,
49210 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
49211 mss.shared_clean >> 10,
49212 @@ -1024,6 +1069,13 @@ static int show_numa_map(struct seq_file *m, void *v)
49213 int n;
49214 char buffer[50];
49215
49216 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49217 + if (current->exec_id != m->exec_id) {
49218 + gr_log_badprocpid("numa_maps");
49219 + return 0;
49220 + }
49221 +#endif
49222 +
49223 if (!mm)
49224 return 0;
49225
49226 @@ -1041,11 +1093,15 @@ static int show_numa_map(struct seq_file *m, void *v)
49227 mpol_to_str(buffer, sizeof(buffer), pol, 0);
49228 mpol_cond_put(pol);
49229
49230 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49231 + seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
49232 +#else
49233 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
49234 +#endif
49235
49236 if (file) {
49237 seq_printf(m, " file=");
49238 - seq_path(m, &file->f_path, "\n\t= ");
49239 + seq_path(m, &file->f_path, "\n\t\\= ");
49240 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
49241 seq_printf(m, " heap");
49242 } else if (vma->vm_start <= mm->start_stack &&
49243 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
49244 index 980de54..2a4db5f 100644
49245 --- a/fs/proc/task_nommu.c
49246 +++ b/fs/proc/task_nommu.c
49247 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
49248 else
49249 bytes += kobjsize(mm);
49250
49251 - if (current->fs && current->fs->users > 1)
49252 + if (current->fs && atomic_read(&current->fs->users) > 1)
49253 sbytes += kobjsize(current->fs);
49254 else
49255 bytes += kobjsize(current->fs);
49256 @@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
49257
49258 if (file) {
49259 pad_len_spaces(m, len);
49260 - seq_path(m, &file->f_path, "");
49261 + seq_path(m, &file->f_path, "\n\\");
49262 } else if (mm) {
49263 if (vma->vm_start <= mm->start_stack &&
49264 vma->vm_end >= mm->start_stack) {
49265 diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
49266 index d67908b..d13f6a6 100644
49267 --- a/fs/quota/netlink.c
49268 +++ b/fs/quota/netlink.c
49269 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
49270 void quota_send_warning(short type, unsigned int id, dev_t dev,
49271 const char warntype)
49272 {
49273 - static atomic_t seq;
49274 + static atomic_unchecked_t seq;
49275 struct sk_buff *skb;
49276 void *msg_head;
49277 int ret;
49278 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
49279 "VFS: Not enough memory to send quota warning.\n");
49280 return;
49281 }
49282 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
49283 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
49284 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
49285 if (!msg_head) {
49286 printk(KERN_ERR
49287 diff --git a/fs/readdir.c b/fs/readdir.c
49288 index 356f715..c918d38 100644
49289 --- a/fs/readdir.c
49290 +++ b/fs/readdir.c
49291 @@ -17,6 +17,7 @@
49292 #include <linux/security.h>
49293 #include <linux/syscalls.h>
49294 #include <linux/unistd.h>
49295 +#include <linux/namei.h>
49296
49297 #include <asm/uaccess.h>
49298
49299 @@ -67,6 +68,7 @@ struct old_linux_dirent {
49300
49301 struct readdir_callback {
49302 struct old_linux_dirent __user * dirent;
49303 + struct file * file;
49304 int result;
49305 };
49306
49307 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
49308 buf->result = -EOVERFLOW;
49309 return -EOVERFLOW;
49310 }
49311 +
49312 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49313 + return 0;
49314 +
49315 buf->result++;
49316 dirent = buf->dirent;
49317 if (!access_ok(VERIFY_WRITE, dirent,
49318 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
49319
49320 buf.result = 0;
49321 buf.dirent = dirent;
49322 + buf.file = file;
49323
49324 error = vfs_readdir(file, fillonedir, &buf);
49325 if (buf.result)
49326 @@ -142,6 +149,7 @@ struct linux_dirent {
49327 struct getdents_callback {
49328 struct linux_dirent __user * current_dir;
49329 struct linux_dirent __user * previous;
49330 + struct file * file;
49331 int count;
49332 int error;
49333 };
49334 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
49335 buf->error = -EOVERFLOW;
49336 return -EOVERFLOW;
49337 }
49338 +
49339 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49340 + return 0;
49341 +
49342 dirent = buf->previous;
49343 if (dirent) {
49344 if (__put_user(offset, &dirent->d_off))
49345 @@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
49346 buf.previous = NULL;
49347 buf.count = count;
49348 buf.error = 0;
49349 + buf.file = file;
49350
49351 error = vfs_readdir(file, filldir, &buf);
49352 if (error >= 0)
49353 @@ -229,6 +242,7 @@ out:
49354 struct getdents_callback64 {
49355 struct linux_dirent64 __user * current_dir;
49356 struct linux_dirent64 __user * previous;
49357 + struct file *file;
49358 int count;
49359 int error;
49360 };
49361 @@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
49362 buf->error = -EINVAL; /* only used if we fail.. */
49363 if (reclen > buf->count)
49364 return -EINVAL;
49365 +
49366 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49367 + return 0;
49368 +
49369 dirent = buf->previous;
49370 if (dirent) {
49371 if (__put_user(offset, &dirent->d_off))
49372 @@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
49373
49374 buf.current_dir = dirent;
49375 buf.previous = NULL;
49376 + buf.file = file;
49377 buf.count = count;
49378 buf.error = 0;
49379
49380 @@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
49381 error = buf.error;
49382 lastdirent = buf.previous;
49383 if (lastdirent) {
49384 - typeof(lastdirent->d_off) d_off = file->f_pos;
49385 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
49386 if (__put_user(d_off, &lastdirent->d_off))
49387 error = -EFAULT;
49388 else
49389 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
49390 index 60c0804..d814f98 100644
49391 --- a/fs/reiserfs/do_balan.c
49392 +++ b/fs/reiserfs/do_balan.c
49393 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
49394 return;
49395 }
49396
49397 - atomic_inc(&(fs_generation(tb->tb_sb)));
49398 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
49399 do_balance_starts(tb);
49400
49401 /* balance leaf returns 0 except if combining L R and S into
49402 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
49403 index 7a99811..a7c96c4 100644
49404 --- a/fs/reiserfs/procfs.c
49405 +++ b/fs/reiserfs/procfs.c
49406 @@ -113,7 +113,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
49407 "SMALL_TAILS " : "NO_TAILS ",
49408 replay_only(sb) ? "REPLAY_ONLY " : "",
49409 convert_reiserfs(sb) ? "CONV " : "",
49410 - atomic_read(&r->s_generation_counter),
49411 + atomic_read_unchecked(&r->s_generation_counter),
49412 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
49413 SF(s_do_balance), SF(s_unneeded_left_neighbor),
49414 SF(s_good_search_by_key_reada), SF(s_bmaps),
49415 diff --git a/fs/select.c b/fs/select.c
49416 index e782258..3b4b44c 100644
49417 --- a/fs/select.c
49418 +++ b/fs/select.c
49419 @@ -20,6 +20,7 @@
49420 #include <linux/module.h>
49421 #include <linux/slab.h>
49422 #include <linux/poll.h>
49423 +#include <linux/security.h>
49424 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
49425 #include <linux/file.h>
49426 #include <linux/fdtable.h>
49427 @@ -837,6 +838,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
49428 struct poll_list *walk = head;
49429 unsigned long todo = nfds;
49430
49431 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
49432 if (nfds > rlimit(RLIMIT_NOFILE))
49433 return -EINVAL;
49434
49435 diff --git a/fs/seq_file.c b/fs/seq_file.c
49436 index 4023d6b..ab46c6a 100644
49437 --- a/fs/seq_file.c
49438 +++ b/fs/seq_file.c
49439 @@ -9,6 +9,7 @@
49440 #include <linux/module.h>
49441 #include <linux/seq_file.h>
49442 #include <linux/slab.h>
49443 +#include <linux/sched.h>
49444
49445 #include <asm/uaccess.h>
49446 #include <asm/page.h>
49447 @@ -40,6 +41,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
49448 memset(p, 0, sizeof(*p));
49449 mutex_init(&p->lock);
49450 p->op = op;
49451 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49452 + p->exec_id = current->exec_id;
49453 +#endif
49454
49455 /*
49456 * Wrappers around seq_open(e.g. swaps_open) need to be
49457 @@ -549,7 +553,7 @@ static void single_stop(struct seq_file *p, void *v)
49458 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
49459 void *data)
49460 {
49461 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
49462 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
49463 int res = -ENOMEM;
49464
49465 if (op) {
49466 diff --git a/fs/splice.c b/fs/splice.c
49467 index 96d7b28..fd465ac 100644
49468 --- a/fs/splice.c
49469 +++ b/fs/splice.c
49470 @@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
49471 pipe_lock(pipe);
49472
49473 for (;;) {
49474 - if (!pipe->readers) {
49475 + if (!atomic_read(&pipe->readers)) {
49476 send_sig(SIGPIPE, current, 0);
49477 if (!ret)
49478 ret = -EPIPE;
49479 @@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
49480 do_wakeup = 0;
49481 }
49482
49483 - pipe->waiting_writers++;
49484 + atomic_inc(&pipe->waiting_writers);
49485 pipe_wait(pipe);
49486 - pipe->waiting_writers--;
49487 + atomic_dec(&pipe->waiting_writers);
49488 }
49489
49490 pipe_unlock(pipe);
49491 @@ -560,7 +560,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
49492 old_fs = get_fs();
49493 set_fs(get_ds());
49494 /* The cast to a user pointer is valid due to the set_fs() */
49495 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
49496 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
49497 set_fs(old_fs);
49498
49499 return res;
49500 @@ -575,7 +575,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
49501 old_fs = get_fs();
49502 set_fs(get_ds());
49503 /* The cast to a user pointer is valid due to the set_fs() */
49504 - res = vfs_write(file, (const char __user *)buf, count, &pos);
49505 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
49506 set_fs(old_fs);
49507
49508 return res;
49509 @@ -626,7 +626,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
49510 goto err;
49511
49512 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
49513 - vec[i].iov_base = (void __user *) page_address(page);
49514 + vec[i].iov_base = (void __force_user *) page_address(page);
49515 vec[i].iov_len = this_len;
49516 spd.pages[i] = page;
49517 spd.nr_pages++;
49518 @@ -848,10 +848,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
49519 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
49520 {
49521 while (!pipe->nrbufs) {
49522 - if (!pipe->writers)
49523 + if (!atomic_read(&pipe->writers))
49524 return 0;
49525
49526 - if (!pipe->waiting_writers && sd->num_spliced)
49527 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
49528 return 0;
49529
49530 if (sd->flags & SPLICE_F_NONBLOCK)
49531 @@ -1184,7 +1184,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
49532 * out of the pipe right after the splice_to_pipe(). So set
49533 * PIPE_READERS appropriately.
49534 */
49535 - pipe->readers = 1;
49536 + atomic_set(&pipe->readers, 1);
49537
49538 current->splice_pipe = pipe;
49539 }
49540 @@ -1736,9 +1736,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49541 ret = -ERESTARTSYS;
49542 break;
49543 }
49544 - if (!pipe->writers)
49545 + if (!atomic_read(&pipe->writers))
49546 break;
49547 - if (!pipe->waiting_writers) {
49548 + if (!atomic_read(&pipe->waiting_writers)) {
49549 if (flags & SPLICE_F_NONBLOCK) {
49550 ret = -EAGAIN;
49551 break;
49552 @@ -1770,7 +1770,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49553 pipe_lock(pipe);
49554
49555 while (pipe->nrbufs >= pipe->buffers) {
49556 - if (!pipe->readers) {
49557 + if (!atomic_read(&pipe->readers)) {
49558 send_sig(SIGPIPE, current, 0);
49559 ret = -EPIPE;
49560 break;
49561 @@ -1783,9 +1783,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49562 ret = -ERESTARTSYS;
49563 break;
49564 }
49565 - pipe->waiting_writers++;
49566 + atomic_inc(&pipe->waiting_writers);
49567 pipe_wait(pipe);
49568 - pipe->waiting_writers--;
49569 + atomic_dec(&pipe->waiting_writers);
49570 }
49571
49572 pipe_unlock(pipe);
49573 @@ -1821,14 +1821,14 @@ retry:
49574 pipe_double_lock(ipipe, opipe);
49575
49576 do {
49577 - if (!opipe->readers) {
49578 + if (!atomic_read(&opipe->readers)) {
49579 send_sig(SIGPIPE, current, 0);
49580 if (!ret)
49581 ret = -EPIPE;
49582 break;
49583 }
49584
49585 - if (!ipipe->nrbufs && !ipipe->writers)
49586 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
49587 break;
49588
49589 /*
49590 @@ -1925,7 +1925,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
49591 pipe_double_lock(ipipe, opipe);
49592
49593 do {
49594 - if (!opipe->readers) {
49595 + if (!atomic_read(&opipe->readers)) {
49596 send_sig(SIGPIPE, current, 0);
49597 if (!ret)
49598 ret = -EPIPE;
49599 @@ -1970,7 +1970,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
49600 * return EAGAIN if we have the potential of some data in the
49601 * future, otherwise just return 0
49602 */
49603 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
49604 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
49605 ret = -EAGAIN;
49606
49607 pipe_unlock(ipipe);
49608 diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
49609 index a475983..9c6a1f0 100644
49610 --- a/fs/sysfs/bin.c
49611 +++ b/fs/sysfs/bin.c
49612 @@ -67,6 +67,8 @@ fill_read(struct file *file, char *buffer, loff_t off, size_t count)
49613 }
49614
49615 static ssize_t
49616 +read(struct file *file, char __user *userbuf, size_t bytes, loff_t *off) __size_overflow(3);
49617 +static ssize_t
49618 read(struct file *file, char __user *userbuf, size_t bytes, loff_t *off)
49619 {
49620 struct bin_buffer *bb = file->private_data;
49621 diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
49622 index 7fdf6a7..e6cd8ad 100644
49623 --- a/fs/sysfs/dir.c
49624 +++ b/fs/sysfs/dir.c
49625 @@ -642,6 +642,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
49626 struct sysfs_dirent *sd;
49627 int rc;
49628
49629 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
49630 + const char *parent_name = parent_sd->s_name;
49631 +
49632 + mode = S_IFDIR | S_IRWXU;
49633 +
49634 + if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
49635 + (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
49636 + (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
49637 + (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
49638 + mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
49639 +#endif
49640 +
49641 /* allocate */
49642 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
49643 if (!sd)
49644 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
49645 index 00012e3..8392349 100644
49646 --- a/fs/sysfs/file.c
49647 +++ b/fs/sysfs/file.c
49648 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
49649
49650 struct sysfs_open_dirent {
49651 atomic_t refcnt;
49652 - atomic_t event;
49653 + atomic_unchecked_t event;
49654 wait_queue_head_t poll;
49655 struct list_head buffers; /* goes through sysfs_buffer.list */
49656 };
49657 @@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
49658 if (!sysfs_get_active(attr_sd))
49659 return -ENODEV;
49660
49661 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
49662 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
49663 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
49664
49665 sysfs_put_active(attr_sd);
49666 @@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
49667 return -ENOMEM;
49668
49669 atomic_set(&new_od->refcnt, 0);
49670 - atomic_set(&new_od->event, 1);
49671 + atomic_set_unchecked(&new_od->event, 1);
49672 init_waitqueue_head(&new_od->poll);
49673 INIT_LIST_HEAD(&new_od->buffers);
49674 goto retry;
49675 @@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
49676
49677 sysfs_put_active(attr_sd);
49678
49679 - if (buffer->event != atomic_read(&od->event))
49680 + if (buffer->event != atomic_read_unchecked(&od->event))
49681 goto trigger;
49682
49683 return DEFAULT_POLLMASK;
49684 @@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
49685
49686 od = sd->s_attr.open;
49687 if (od) {
49688 - atomic_inc(&od->event);
49689 + atomic_inc_unchecked(&od->event);
49690 wake_up_interruptible(&od->poll);
49691 }
49692
49693 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
49694 index a7ac78f..02158e1 100644
49695 --- a/fs/sysfs/symlink.c
49696 +++ b/fs/sysfs/symlink.c
49697 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
49698
49699 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
49700 {
49701 - char *page = nd_get_link(nd);
49702 + const char *page = nd_get_link(nd);
49703 if (!IS_ERR(page))
49704 free_page((unsigned long)page);
49705 }
49706 diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
49707 index f922cba..062fb02 100644
49708 --- a/fs/ubifs/debug.c
49709 +++ b/fs/ubifs/debug.c
49710 @@ -2819,6 +2819,7 @@ static ssize_t dfs_file_read(struct file *file, char __user *u, size_t count,
49711 * debugfs file. Returns %0 or %1 in case of success and a negative error code
49712 * in case of failure.
49713 */
49714 +static int interpret_user_input(const char __user *u, size_t count) __size_overflow(2);
49715 static int interpret_user_input(const char __user *u, size_t count)
49716 {
49717 size_t buf_size;
49718 @@ -2837,6 +2838,8 @@ static int interpret_user_input(const char __user *u, size_t count)
49719 }
49720
49721 static ssize_t dfs_file_write(struct file *file, const char __user *u,
49722 + size_t count, loff_t *ppos) __size_overflow(3);
49723 +static ssize_t dfs_file_write(struct file *file, const char __user *u,
49724 size_t count, loff_t *ppos)
49725 {
49726 struct ubifs_info *c = file->private_data;
49727 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
49728 index c175b4d..8f36a16 100644
49729 --- a/fs/udf/misc.c
49730 +++ b/fs/udf/misc.c
49731 @@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
49732
49733 u8 udf_tag_checksum(const struct tag *t)
49734 {
49735 - u8 *data = (u8 *)t;
49736 + const u8 *data = (const u8 *)t;
49737 u8 checksum = 0;
49738 int i;
49739 for (i = 0; i < sizeof(struct tag); ++i)
49740 diff --git a/fs/utimes.c b/fs/utimes.c
49741 index ba653f3..06ea4b1 100644
49742 --- a/fs/utimes.c
49743 +++ b/fs/utimes.c
49744 @@ -1,6 +1,7 @@
49745 #include <linux/compiler.h>
49746 #include <linux/file.h>
49747 #include <linux/fs.h>
49748 +#include <linux/security.h>
49749 #include <linux/linkage.h>
49750 #include <linux/mount.h>
49751 #include <linux/namei.h>
49752 @@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
49753 goto mnt_drop_write_and_out;
49754 }
49755 }
49756 +
49757 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
49758 + error = -EACCES;
49759 + goto mnt_drop_write_and_out;
49760 + }
49761 +
49762 mutex_lock(&inode->i_mutex);
49763 error = notify_change(path->dentry, &newattrs);
49764 mutex_unlock(&inode->i_mutex);
49765 diff --git a/fs/xattr.c b/fs/xattr.c
49766 index 82f4337..236473c 100644
49767 --- a/fs/xattr.c
49768 +++ b/fs/xattr.c
49769 @@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
49770 * Extended attribute SET operations
49771 */
49772 static long
49773 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
49774 +setxattr(struct path *path, const char __user *name, const void __user *value,
49775 size_t size, int flags)
49776 {
49777 int error;
49778 @@ -339,7 +339,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
49779 return PTR_ERR(kvalue);
49780 }
49781
49782 - error = vfs_setxattr(d, kname, kvalue, size, flags);
49783 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
49784 + error = -EACCES;
49785 + goto out;
49786 + }
49787 +
49788 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
49789 +out:
49790 kfree(kvalue);
49791 return error;
49792 }
49793 @@ -356,7 +362,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
49794 return error;
49795 error = mnt_want_write(path.mnt);
49796 if (!error) {
49797 - error = setxattr(path.dentry, name, value, size, flags);
49798 + error = setxattr(&path, name, value, size, flags);
49799 mnt_drop_write(path.mnt);
49800 }
49801 path_put(&path);
49802 @@ -375,7 +381,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
49803 return error;
49804 error = mnt_want_write(path.mnt);
49805 if (!error) {
49806 - error = setxattr(path.dentry, name, value, size, flags);
49807 + error = setxattr(&path, name, value, size, flags);
49808 mnt_drop_write(path.mnt);
49809 }
49810 path_put(&path);
49811 @@ -386,17 +392,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
49812 const void __user *,value, size_t, size, int, flags)
49813 {
49814 struct file *f;
49815 - struct dentry *dentry;
49816 int error = -EBADF;
49817
49818 f = fget(fd);
49819 if (!f)
49820 return error;
49821 - dentry = f->f_path.dentry;
49822 - audit_inode(NULL, dentry);
49823 + audit_inode(NULL, f->f_path.dentry);
49824 error = mnt_want_write_file(f);
49825 if (!error) {
49826 - error = setxattr(dentry, name, value, size, flags);
49827 + error = setxattr(&f->f_path, name, value, size, flags);
49828 mnt_drop_write_file(f);
49829 }
49830 fput(f);
49831 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
49832 index 8d5a506..7f62712 100644
49833 --- a/fs/xattr_acl.c
49834 +++ b/fs/xattr_acl.c
49835 @@ -17,8 +17,8 @@
49836 struct posix_acl *
49837 posix_acl_from_xattr(const void *value, size_t size)
49838 {
49839 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
49840 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
49841 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
49842 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
49843 int count;
49844 struct posix_acl *acl;
49845 struct posix_acl_entry *acl_e;
49846 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
49847 index 188ef2f..adcf864 100644
49848 --- a/fs/xfs/xfs_bmap.c
49849 +++ b/fs/xfs/xfs_bmap.c
49850 @@ -190,7 +190,7 @@ xfs_bmap_validate_ret(
49851 int nmap,
49852 int ret_nmap);
49853 #else
49854 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
49855 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
49856 #endif /* DEBUG */
49857
49858 STATIC int
49859 diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
49860 index 79d05e8..e3e5861 100644
49861 --- a/fs/xfs/xfs_dir2_sf.c
49862 +++ b/fs/xfs/xfs_dir2_sf.c
49863 @@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
49864 }
49865
49866 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
49867 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
49868 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
49869 + char name[sfep->namelen];
49870 + memcpy(name, sfep->name, sfep->namelen);
49871 + if (filldir(dirent, name, sfep->namelen,
49872 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
49873 + *offset = off & 0x7fffffff;
49874 + return 0;
49875 + }
49876 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
49877 off & 0x7fffffff, ino, DT_UNKNOWN)) {
49878 *offset = off & 0x7fffffff;
49879 return 0;
49880 diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
49881 index 76f3ca5..f57f712 100644
49882 --- a/fs/xfs/xfs_ioctl.c
49883 +++ b/fs/xfs/xfs_ioctl.c
49884 @@ -128,7 +128,7 @@ xfs_find_handle(
49885 }
49886
49887 error = -EFAULT;
49888 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
49889 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
49890 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
49891 goto out_put;
49892
49893 diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
49894 index ab30253..4d86958 100644
49895 --- a/fs/xfs/xfs_iops.c
49896 +++ b/fs/xfs/xfs_iops.c
49897 @@ -447,7 +447,7 @@ xfs_vn_put_link(
49898 struct nameidata *nd,
49899 void *p)
49900 {
49901 - char *s = nd_get_link(nd);
49902 + const char *s = nd_get_link(nd);
49903
49904 if (!IS_ERR(s))
49905 kfree(s);
49906 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
49907 new file mode 100644
49908 index 0000000..ba6f598
49909 --- /dev/null
49910 +++ b/grsecurity/Kconfig
49911 @@ -0,0 +1,1079 @@
49912 +#
49913 +# grecurity configuration
49914 +#
49915 +
49916 +menu "Grsecurity"
49917 +
49918 +config GRKERNSEC
49919 + bool "Grsecurity"
49920 + select CRYPTO
49921 + select CRYPTO_SHA256
49922 + help
49923 + If you say Y here, you will be able to configure many features
49924 + that will enhance the security of your system. It is highly
49925 + recommended that you say Y here and read through the help
49926 + for each option so that you fully understand the features and
49927 + can evaluate their usefulness for your machine.
49928 +
49929 +choice
49930 + prompt "Security Level"
49931 + depends on GRKERNSEC
49932 + default GRKERNSEC_CUSTOM
49933 +
49934 +config GRKERNSEC_LOW
49935 + bool "Low"
49936 + select GRKERNSEC_LINK
49937 + select GRKERNSEC_FIFO
49938 + select GRKERNSEC_RANDNET
49939 + select GRKERNSEC_DMESG
49940 + select GRKERNSEC_CHROOT
49941 + select GRKERNSEC_CHROOT_CHDIR
49942 +
49943 + help
49944 + If you choose this option, several of the grsecurity options will
49945 + be enabled that will give you greater protection against a number
49946 + of attacks, while assuring that none of your software will have any
49947 + conflicts with the additional security measures. If you run a lot
49948 + of unusual software, or you are having problems with the higher
49949 + security levels, you should say Y here. With this option, the
49950 + following features are enabled:
49951 +
49952 + - Linking restrictions
49953 + - FIFO restrictions
49954 + - Restricted dmesg
49955 + - Enforced chdir("/") on chroot
49956 + - Runtime module disabling
49957 +
49958 +config GRKERNSEC_MEDIUM
49959 + bool "Medium"
49960 + select PAX
49961 + select PAX_EI_PAX
49962 + select PAX_PT_PAX_FLAGS
49963 + select PAX_HAVE_ACL_FLAGS
49964 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
49965 + select GRKERNSEC_CHROOT
49966 + select GRKERNSEC_CHROOT_SYSCTL
49967 + select GRKERNSEC_LINK
49968 + select GRKERNSEC_FIFO
49969 + select GRKERNSEC_DMESG
49970 + select GRKERNSEC_RANDNET
49971 + select GRKERNSEC_FORKFAIL
49972 + select GRKERNSEC_TIME
49973 + select GRKERNSEC_SIGNAL
49974 + select GRKERNSEC_CHROOT
49975 + select GRKERNSEC_CHROOT_UNIX
49976 + select GRKERNSEC_CHROOT_MOUNT
49977 + select GRKERNSEC_CHROOT_PIVOT
49978 + select GRKERNSEC_CHROOT_DOUBLE
49979 + select GRKERNSEC_CHROOT_CHDIR
49980 + select GRKERNSEC_CHROOT_MKNOD
49981 + select GRKERNSEC_PROC
49982 + select GRKERNSEC_PROC_USERGROUP
49983 + select PAX_RANDUSTACK
49984 + select PAX_ASLR
49985 + select PAX_RANDMMAP
49986 + select PAX_REFCOUNT if (X86 || SPARC64)
49987 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
49988 +
49989 + help
49990 + If you say Y here, several features in addition to those included
49991 + in the low additional security level will be enabled. These
49992 + features provide even more security to your system, though in rare
49993 + cases they may be incompatible with very old or poorly written
49994 + software. If you enable this option, make sure that your auth
49995 + service (identd) is running as gid 1001. With this option,
49996 + the following features (in addition to those provided in the
49997 + low additional security level) will be enabled:
49998 +
49999 + - Failed fork logging
50000 + - Time change logging
50001 + - Signal logging
50002 + - Deny mounts in chroot
50003 + - Deny double chrooting
50004 + - Deny sysctl writes in chroot
50005 + - Deny mknod in chroot
50006 + - Deny access to abstract AF_UNIX sockets out of chroot
50007 + - Deny pivot_root in chroot
50008 + - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
50009 + - /proc restrictions with special GID set to 10 (usually wheel)
50010 + - Address Space Layout Randomization (ASLR)
50011 + - Prevent exploitation of most refcount overflows
50012 + - Bounds checking of copying between the kernel and userland
50013 +
50014 +config GRKERNSEC_HIGH
50015 + bool "High"
50016 + select GRKERNSEC_LINK
50017 + select GRKERNSEC_FIFO
50018 + select GRKERNSEC_DMESG
50019 + select GRKERNSEC_FORKFAIL
50020 + select GRKERNSEC_TIME
50021 + select GRKERNSEC_SIGNAL
50022 + select GRKERNSEC_CHROOT
50023 + select GRKERNSEC_CHROOT_SHMAT
50024 + select GRKERNSEC_CHROOT_UNIX
50025 + select GRKERNSEC_CHROOT_MOUNT
50026 + select GRKERNSEC_CHROOT_FCHDIR
50027 + select GRKERNSEC_CHROOT_PIVOT
50028 + select GRKERNSEC_CHROOT_DOUBLE
50029 + select GRKERNSEC_CHROOT_CHDIR
50030 + select GRKERNSEC_CHROOT_MKNOD
50031 + select GRKERNSEC_CHROOT_CAPS
50032 + select GRKERNSEC_CHROOT_SYSCTL
50033 + select GRKERNSEC_CHROOT_FINDTASK
50034 + select GRKERNSEC_SYSFS_RESTRICT
50035 + select GRKERNSEC_PROC
50036 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
50037 + select GRKERNSEC_HIDESYM
50038 + select GRKERNSEC_BRUTE
50039 + select GRKERNSEC_PROC_USERGROUP
50040 + select GRKERNSEC_KMEM
50041 + select GRKERNSEC_RESLOG
50042 + select GRKERNSEC_RANDNET
50043 + select GRKERNSEC_PROC_ADD
50044 + select GRKERNSEC_CHROOT_CHMOD
50045 + select GRKERNSEC_CHROOT_NICE
50046 + select GRKERNSEC_SETXID if (X86)
50047 + select GRKERNSEC_AUDIT_MOUNT
50048 + select GRKERNSEC_MODHARDEN if (MODULES)
50049 + select GRKERNSEC_HARDEN_PTRACE
50050 + select GRKERNSEC_PTRACE_READEXEC
50051 + select GRKERNSEC_VM86 if (X86_32)
50052 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
50053 + select PAX
50054 + select PAX_RANDUSTACK
50055 + select PAX_ASLR
50056 + select PAX_RANDMMAP
50057 + select PAX_NOEXEC
50058 + select PAX_MPROTECT
50059 + select PAX_EI_PAX
50060 + select PAX_PT_PAX_FLAGS
50061 + select PAX_HAVE_ACL_FLAGS
50062 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
50063 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
50064 + select PAX_RANDKSTACK if (X86_TSC && X86)
50065 + select PAX_SEGMEXEC if (X86_32)
50066 + select PAX_PAGEEXEC
50067 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
50068 + select PAX_EMUTRAMP if (PARISC)
50069 + select PAX_EMUSIGRT if (PARISC)
50070 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
50071 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
50072 + select PAX_REFCOUNT if (X86 || SPARC64)
50073 + select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
50074 + help
50075 + If you say Y here, many of the features of grsecurity will be
50076 + enabled, which will protect you against many kinds of attacks
50077 + against your system. The heightened security comes at a cost
50078 + of an increased chance of incompatibilities with rare software
50079 + on your machine. Since this security level enables PaX, you should
50080 + view <http://pax.grsecurity.net> and read about the PaX
50081 + project. While you are there, download chpax and run it on
50082 + binaries that cause problems with PaX. Also remember that
50083 + since the /proc restrictions are enabled, you must run your
50084 + identd as gid 1001. This security level enables the following
50085 + features in addition to those listed in the low and medium
50086 + security levels:
50087 +
50088 + - Additional /proc restrictions
50089 + - Chmod restrictions in chroot
50090 + - No signals, ptrace, or viewing of processes outside of chroot
50091 + - Capability restrictions in chroot
50092 + - Deny fchdir out of chroot
50093 + - Priority restrictions in chroot
50094 + - Segmentation-based implementation of PaX
50095 + - Mprotect restrictions
50096 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
50097 + - Kernel stack randomization
50098 + - Mount/unmount/remount logging
50099 + - Kernel symbol hiding
50100 + - Hardening of module auto-loading
50101 + - Ptrace restrictions
50102 + - Restricted vm86 mode
50103 + - Restricted sysfs/debugfs
50104 + - Active kernel exploit response
50105 +
50106 +config GRKERNSEC_CUSTOM
50107 + bool "Custom"
50108 + help
50109 + If you say Y here, you will be able to configure every grsecurity
50110 + option, which allows you to enable many more features that aren't
50111 + covered in the basic security levels. These additional features
50112 + include TPE, socket restrictions, and the sysctl system for
50113 + grsecurity. It is advised that you read through the help for
50114 + each option to determine its usefulness in your situation.
50115 +
50116 +endchoice
50117 +
50118 +menu "Memory Protections"
50119 +depends on GRKERNSEC
50120 +
50121 +config GRKERNSEC_KMEM
50122 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
50123 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
50124 + help
50125 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
50126 + be written to or read from to modify or leak the contents of the running
50127 + kernel. /dev/port will also not be allowed to be opened. If you have module
50128 + support disabled, enabling this will close up four ways that are
50129 + currently used to insert malicious code into the running kernel.
50130 + Even with all these features enabled, we still highly recommend that
50131 + you use the RBAC system, as it is still possible for an attacker to
50132 + modify the running kernel through privileged I/O granted by ioperm/iopl.
50133 + If you are not using XFree86, you may be able to stop this additional
50134 + case by enabling the 'Disable privileged I/O' option. Though nothing
50135 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
50136 + but only to video memory, which is the only writing we allow in this
50137 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
50138 + not be allowed to mprotect it with PROT_WRITE later.
50139 + It is highly recommended that you say Y here if you meet all the
50140 + conditions above.
50141 +
50142 +config GRKERNSEC_VM86
50143 + bool "Restrict VM86 mode"
50144 + depends on X86_32
50145 +
50146 + help
50147 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
50148 + make use of a special execution mode on 32bit x86 processors called
50149 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
50150 + video cards and will still work with this option enabled. The purpose
50151 + of the option is to prevent exploitation of emulation errors in
50152 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
50153 + Nearly all users should be able to enable this option.
50154 +
50155 +config GRKERNSEC_IO
50156 + bool "Disable privileged I/O"
50157 + depends on X86
50158 + select RTC_CLASS
50159 + select RTC_INTF_DEV
50160 + select RTC_DRV_CMOS
50161 +
50162 + help
50163 + If you say Y here, all ioperm and iopl calls will return an error.
50164 + Ioperm and iopl can be used to modify the running kernel.
50165 + Unfortunately, some programs need this access to operate properly,
50166 + the most notable of which are XFree86 and hwclock. hwclock can be
50167 + remedied by having RTC support in the kernel, so real-time
50168 + clock support is enabled if this option is enabled, to ensure
50169 + that hwclock operates correctly. XFree86 still will not
50170 + operate correctly with this option enabled, so DO NOT CHOOSE Y
50171 + IF YOU USE XFree86. If you use XFree86 and you still want to
50172 + protect your kernel against modification, use the RBAC system.
50173 +
50174 +config GRKERNSEC_PROC_MEMMAP
50175 + bool "Harden ASLR against information leaks and entropy reduction"
50176 + default y if (PAX_NOEXEC || PAX_ASLR)
50177 + depends on PAX_NOEXEC || PAX_ASLR
50178 + help
50179 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
50180 + give no information about the addresses of its mappings if
50181 + PaX features that rely on random addresses are enabled on the task.
50182 + In addition to sanitizing this information and disabling other
50183 + dangerous sources of information, this option causes reads of sensitive
50184 + /proc/<pid> entries where the file descriptor was opened in a different
50185 + task than the one performing the read. Such attempts are logged.
50186 + This option also limits argv/env strings for suid/sgid binaries
50187 + to 512KB to prevent a complete exhaustion of the stack entropy provided
50188 + by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
50189 + binaries to prevent alternative mmap layouts from being abused.
50190 +
50191 + If you use PaX it is essential that you say Y here as it closes up
50192 + several holes that make full ASLR useless locally.
50193 +
50194 +config GRKERNSEC_BRUTE
50195 + bool "Deter exploit bruteforcing"
50196 + help
50197 + If you say Y here, attempts to bruteforce exploits against forking
50198 + daemons such as apache or sshd, as well as against suid/sgid binaries
50199 + will be deterred. When a child of a forking daemon is killed by PaX
50200 + or crashes due to an illegal instruction or other suspicious signal,
50201 + the parent process will be delayed 30 seconds upon every subsequent
50202 + fork until the administrator is able to assess the situation and
50203 + restart the daemon.
50204 + In the suid/sgid case, the attempt is logged, the user has all their
50205 + processes terminated, and they are prevented from executing any further
50206 + processes for 15 minutes.
50207 + It is recommended that you also enable signal logging in the auditing
50208 + section so that logs are generated when a process triggers a suspicious
50209 + signal.
50210 + If the sysctl option is enabled, a sysctl option with name
50211 + "deter_bruteforce" is created.
50212 +
50213 +
50214 +config GRKERNSEC_MODHARDEN
50215 + bool "Harden module auto-loading"
50216 + depends on MODULES
50217 + help
50218 + If you say Y here, module auto-loading in response to use of some
50219 + feature implemented by an unloaded module will be restricted to
50220 + root users. Enabling this option helps defend against attacks
50221 + by unprivileged users who abuse the auto-loading behavior to
50222 + cause a vulnerable module to load that is then exploited.
50223 +
50224 + If this option prevents a legitimate use of auto-loading for a
50225 + non-root user, the administrator can execute modprobe manually
50226 + with the exact name of the module mentioned in the alert log.
50227 + Alternatively, the administrator can add the module to the list
50228 + of modules loaded at boot by modifying init scripts.
50229 +
50230 + Modification of init scripts will most likely be needed on
50231 + Ubuntu servers with encrypted home directory support enabled,
50232 + as the first non-root user logging in will cause the ecb(aes),
50233 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
50234 +
50235 +config GRKERNSEC_HIDESYM
50236 + bool "Hide kernel symbols"
50237 + help
50238 + If you say Y here, getting information on loaded modules, and
50239 + displaying all kernel symbols through a syscall will be restricted
50240 + to users with CAP_SYS_MODULE. For software compatibility reasons,
50241 + /proc/kallsyms will be restricted to the root user. The RBAC
50242 + system can hide that entry even from root.
50243 +
50244 + This option also prevents leaking of kernel addresses through
50245 + several /proc entries.
50246 +
50247 + Note that this option is only effective provided the following
50248 + conditions are met:
50249 + 1) The kernel using grsecurity is not precompiled by some distribution
50250 + 2) You have also enabled GRKERNSEC_DMESG
50251 + 3) You are using the RBAC system and hiding other files such as your
50252 + kernel image and System.map. Alternatively, enabling this option
50253 + causes the permissions on /boot, /lib/modules, and the kernel
50254 + source directory to change at compile time to prevent
50255 + reading by non-root users.
50256 + If the above conditions are met, this option will aid in providing a
50257 + useful protection against local kernel exploitation of overflows
50258 + and arbitrary read/write vulnerabilities.
50259 +
50260 +config GRKERNSEC_KERN_LOCKOUT
50261 + bool "Active kernel exploit response"
50262 + depends on X86 || ARM || PPC || SPARC
50263 + help
50264 + If you say Y here, when a PaX alert is triggered due to suspicious
50265 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
50266 + or an OOPs occurs due to bad memory accesses, instead of just
50267 + terminating the offending process (and potentially allowing
50268 + a subsequent exploit from the same user), we will take one of two
50269 + actions:
50270 + If the user was root, we will panic the system
50271 + If the user was non-root, we will log the attempt, terminate
50272 + all processes owned by the user, then prevent them from creating
50273 + any new processes until the system is restarted
50274 + This deters repeated kernel exploitation/bruteforcing attempts
50275 + and is useful for later forensics.
50276 +
50277 +endmenu
50278 +menu "Role Based Access Control Options"
50279 +depends on GRKERNSEC
50280 +
50281 +config GRKERNSEC_RBAC_DEBUG
50282 + bool
50283 +
50284 +config GRKERNSEC_NO_RBAC
50285 + bool "Disable RBAC system"
50286 + help
50287 + If you say Y here, the /dev/grsec device will be removed from the kernel,
50288 + preventing the RBAC system from being enabled. You should only say Y
50289 + here if you have no intention of using the RBAC system, so as to prevent
50290 + an attacker with root access from misusing the RBAC system to hide files
50291 + and processes when loadable module support and /dev/[k]mem have been
50292 + locked down.
50293 +
50294 +config GRKERNSEC_ACL_HIDEKERN
50295 + bool "Hide kernel processes"
50296 + help
50297 + If you say Y here, all kernel threads will be hidden to all
50298 + processes but those whose subject has the "view hidden processes"
50299 + flag.
50300 +
50301 +config GRKERNSEC_ACL_MAXTRIES
50302 + int "Maximum tries before password lockout"
50303 + default 3
50304 + help
50305 + This option enforces the maximum number of times a user can attempt
50306 + to authorize themselves with the grsecurity RBAC system before being
50307 + denied the ability to attempt authorization again for a specified time.
50308 + The lower the number, the harder it will be to brute-force a password.
50309 +
50310 +config GRKERNSEC_ACL_TIMEOUT
50311 + int "Time to wait after max password tries, in seconds"
50312 + default 30
50313 + help
50314 + This option specifies the time the user must wait after attempting to
50315 + authorize to the RBAC system with the maximum number of invalid
50316 + passwords. The higher the number, the harder it will be to brute-force
50317 + a password.
50318 +
50319 +endmenu
50320 +menu "Filesystem Protections"
50321 +depends on GRKERNSEC
50322 +
50323 +config GRKERNSEC_PROC
50324 + bool "Proc restrictions"
50325 + help
50326 + If you say Y here, the permissions of the /proc filesystem
50327 + will be altered to enhance system security and privacy. You MUST
50328 + choose either a user only restriction or a user and group restriction.
50329 + Depending upon the option you choose, you can either restrict users to
50330 + see only the processes they themselves run, or choose a group that can
50331 + view all processes and files normally restricted to root if you choose
50332 + the "restrict to user only" option. NOTE: If you're running identd or
50333 + ntpd as a non-root user, you will have to run it as the group you
50334 + specify here.
50335 +
50336 +config GRKERNSEC_PROC_USER
50337 + bool "Restrict /proc to user only"
50338 + depends on GRKERNSEC_PROC
50339 + help
50340 + If you say Y here, non-root users will only be able to view their own
50341 + processes, and restricts them from viewing network-related information,
50342 + and viewing kernel symbol and module information.
50343 +
50344 +config GRKERNSEC_PROC_USERGROUP
50345 + bool "Allow special group"
50346 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
50347 + help
50348 + If you say Y here, you will be able to select a group that will be
50349 + able to view all processes and network-related information. If you've
50350 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
50351 + remain hidden. This option is useful if you want to run identd as
50352 + a non-root user.
50353 +
50354 +config GRKERNSEC_PROC_GID
50355 + int "GID for special group"
50356 + depends on GRKERNSEC_PROC_USERGROUP
50357 + default 1001
50358 +
50359 +config GRKERNSEC_PROC_ADD
50360 + bool "Additional restrictions"
50361 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
50362 + help
50363 + If you say Y here, additional restrictions will be placed on
50364 + /proc that keep normal users from viewing device information and
50365 + slabinfo information that could be useful for exploits.
50366 +
50367 +config GRKERNSEC_LINK
50368 + bool "Linking restrictions"
50369 + help
50370 + If you say Y here, /tmp race exploits will be prevented, since users
50371 + will no longer be able to follow symlinks owned by other users in
50372 + world-writable +t directories (e.g. /tmp), unless the owner of the
50373 + symlink is the owner of the directory. users will also not be
50374 + able to hardlink to files they do not own. If the sysctl option is
50375 + enabled, a sysctl option with name "linking_restrictions" is created.
50376 +
50377 +config GRKERNSEC_FIFO
50378 + bool "FIFO restrictions"
50379 + help
50380 + If you say Y here, users will not be able to write to FIFOs they don't
50381 + own in world-writable +t directories (e.g. /tmp), unless the owner of
50382 + the FIFO is the same owner of the directory it's held in. If the sysctl
50383 + option is enabled, a sysctl option with name "fifo_restrictions" is
50384 + created.
50385 +
50386 +config GRKERNSEC_SYSFS_RESTRICT
50387 + bool "Sysfs/debugfs restriction"
50388 + depends on SYSFS
50389 + help
50390 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
50391 + any filesystem normally mounted under it (e.g. debugfs) will be
50392 + mostly accessible only by root. These filesystems generally provide access
50393 + to hardware and debug information that isn't appropriate for unprivileged
50394 + users of the system. Sysfs and debugfs have also become a large source
50395 + of new vulnerabilities, ranging from infoleaks to local compromise.
50396 + There has been very little oversight with an eye toward security involved
50397 + in adding new exporters of information to these filesystems, so their
50398 + use is discouraged.
50399 + For reasons of compatibility, a few directories have been whitelisted
50400 + for access by non-root users:
50401 + /sys/fs/selinux
50402 + /sys/fs/fuse
50403 + /sys/devices/system/cpu
50404 +
50405 +config GRKERNSEC_ROFS
50406 + bool "Runtime read-only mount protection"
50407 + help
50408 + If you say Y here, a sysctl option with name "romount_protect" will
50409 + be created. By setting this option to 1 at runtime, filesystems
50410 + will be protected in the following ways:
50411 + * No new writable mounts will be allowed
50412 + * Existing read-only mounts won't be able to be remounted read/write
50413 + * Write operations will be denied on all block devices
50414 + This option acts independently of grsec_lock: once it is set to 1,
50415 + it cannot be turned off. Therefore, please be mindful of the resulting
50416 + behavior if this option is enabled in an init script on a read-only
50417 + filesystem. This feature is mainly intended for secure embedded systems.
50418 +
50419 +config GRKERNSEC_CHROOT
50420 + bool "Chroot jail restrictions"
50421 + help
50422 + If you say Y here, you will be able to choose several options that will
50423 + make breaking out of a chrooted jail much more difficult. If you
50424 + encounter no software incompatibilities with the following options, it
50425 + is recommended that you enable each one.
50426 +
50427 +config GRKERNSEC_CHROOT_MOUNT
50428 + bool "Deny mounts"
50429 + depends on GRKERNSEC_CHROOT
50430 + help
50431 + If you say Y here, processes inside a chroot will not be able to
50432 + mount or remount filesystems. If the sysctl option is enabled, a
50433 + sysctl option with name "chroot_deny_mount" is created.
50434 +
50435 +config GRKERNSEC_CHROOT_DOUBLE
50436 + bool "Deny double-chroots"
50437 + depends on GRKERNSEC_CHROOT
50438 + help
50439 + If you say Y here, processes inside a chroot will not be able to chroot
50440 + again outside the chroot. This is a widely used method of breaking
50441 + out of a chroot jail and should not be allowed. If the sysctl
50442 + option is enabled, a sysctl option with name
50443 + "chroot_deny_chroot" is created.
50444 +
50445 +config GRKERNSEC_CHROOT_PIVOT
50446 + bool "Deny pivot_root in chroot"
50447 + depends on GRKERNSEC_CHROOT
50448 + help
50449 + If you say Y here, processes inside a chroot will not be able to use
50450 + a function called pivot_root() that was introduced in Linux 2.3.41. It
50451 + works similar to chroot in that it changes the root filesystem. This
50452 + function could be misused in a chrooted process to attempt to break out
50453 + of the chroot, and therefore should not be allowed. If the sysctl
50454 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
50455 + created.
50456 +
50457 +config GRKERNSEC_CHROOT_CHDIR
50458 + bool "Enforce chdir(\"/\") on all chroots"
50459 + depends on GRKERNSEC_CHROOT
50460 + help
50461 + If you say Y here, the current working directory of all newly-chrooted
50462 + applications will be set to the the root directory of the chroot.
50463 + The man page on chroot(2) states:
50464 + Note that this call does not change the current working
50465 + directory, so that `.' can be outside the tree rooted at
50466 + `/'. In particular, the super-user can escape from a
50467 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
50468 +
50469 + It is recommended that you say Y here, since it's not known to break
50470 + any software. If the sysctl option is enabled, a sysctl option with
50471 + name "chroot_enforce_chdir" is created.
50472 +
50473 +config GRKERNSEC_CHROOT_CHMOD
50474 + bool "Deny (f)chmod +s"
50475 + depends on GRKERNSEC_CHROOT
50476 + help
50477 + If you say Y here, processes inside a chroot will not be able to chmod
50478 + or fchmod files to make them have suid or sgid bits. This protects
50479 + against another published method of breaking a chroot. If the sysctl
50480 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
50481 + created.
50482 +
50483 +config GRKERNSEC_CHROOT_FCHDIR
50484 + bool "Deny fchdir out of chroot"
50485 + depends on GRKERNSEC_CHROOT
50486 + help
50487 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
50488 + to a file descriptor of the chrooting process that points to a directory
50489 + outside the filesystem will be stopped. If the sysctl option
50490 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
50491 +
50492 +config GRKERNSEC_CHROOT_MKNOD
50493 + bool "Deny mknod"
50494 + depends on GRKERNSEC_CHROOT
50495 + help
50496 + If you say Y here, processes inside a chroot will not be allowed to
50497 + mknod. The problem with using mknod inside a chroot is that it
50498 + would allow an attacker to create a device entry that is the same
50499 + as one on the physical root of your system, which could range from
50500 + anything from the console device to a device for your harddrive (which
50501 + they could then use to wipe the drive or steal data). It is recommended
50502 + that you say Y here, unless you run into software incompatibilities.
50503 + If the sysctl option is enabled, a sysctl option with name
50504 + "chroot_deny_mknod" is created.
50505 +
50506 +config GRKERNSEC_CHROOT_SHMAT
50507 + bool "Deny shmat() out of chroot"
50508 + depends on GRKERNSEC_CHROOT
50509 + help
50510 + If you say Y here, processes inside a chroot will not be able to attach
50511 + to shared memory segments that were created outside of the chroot jail.
50512 + It is recommended that you say Y here. If the sysctl option is enabled,
50513 + a sysctl option with name "chroot_deny_shmat" is created.
50514 +
50515 +config GRKERNSEC_CHROOT_UNIX
50516 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
50517 + depends on GRKERNSEC_CHROOT
50518 + help
50519 + If you say Y here, processes inside a chroot will not be able to
50520 + connect to abstract (meaning not belonging to a filesystem) Unix
50521 + domain sockets that were bound outside of a chroot. It is recommended
50522 + that you say Y here. If the sysctl option is enabled, a sysctl option
50523 + with name "chroot_deny_unix" is created.
50524 +
50525 +config GRKERNSEC_CHROOT_FINDTASK
50526 + bool "Protect outside processes"
50527 + depends on GRKERNSEC_CHROOT
50528 + help
50529 + If you say Y here, processes inside a chroot will not be able to
50530 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
50531 + getsid, or view any process outside of the chroot. If the sysctl
50532 + option is enabled, a sysctl option with name "chroot_findtask" is
50533 + created.
50534 +
50535 +config GRKERNSEC_CHROOT_NICE
50536 + bool "Restrict priority changes"
50537 + depends on GRKERNSEC_CHROOT
50538 + help
50539 + If you say Y here, processes inside a chroot will not be able to raise
50540 + the priority of processes in the chroot, or alter the priority of
50541 + processes outside the chroot. This provides more security than simply
50542 + removing CAP_SYS_NICE from the process' capability set. If the
50543 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
50544 + is created.
50545 +
50546 +config GRKERNSEC_CHROOT_SYSCTL
50547 + bool "Deny sysctl writes"
50548 + depends on GRKERNSEC_CHROOT
50549 + help
50550 + If you say Y here, an attacker in a chroot will not be able to
50551 + write to sysctl entries, either by sysctl(2) or through a /proc
50552 + interface. It is strongly recommended that you say Y here. If the
50553 + sysctl option is enabled, a sysctl option with name
50554 + "chroot_deny_sysctl" is created.
50555 +
50556 +config GRKERNSEC_CHROOT_CAPS
50557 + bool "Capability restrictions"
50558 + depends on GRKERNSEC_CHROOT
50559 + help
50560 + If you say Y here, the capabilities on all processes within a
50561 + chroot jail will be lowered to stop module insertion, raw i/o,
50562 + system and net admin tasks, rebooting the system, modifying immutable
50563 + files, modifying IPC owned by another, and changing the system time.
50564 + This is left an option because it can break some apps. Disable this
50565 + if your chrooted apps are having problems performing those kinds of
50566 + tasks. If the sysctl option is enabled, a sysctl option with
50567 + name "chroot_caps" is created.
50568 +
50569 +endmenu
50570 +menu "Kernel Auditing"
50571 +depends on GRKERNSEC
50572 +
50573 +config GRKERNSEC_AUDIT_GROUP
50574 + bool "Single group for auditing"
50575 + help
50576 + If you say Y here, the exec, chdir, and (un)mount logging features
50577 + will only operate on a group you specify. This option is recommended
50578 + if you only want to watch certain users instead of having a large
50579 + amount of logs from the entire system. If the sysctl option is enabled,
50580 + a sysctl option with name "audit_group" is created.
50581 +
50582 +config GRKERNSEC_AUDIT_GID
50583 + int "GID for auditing"
50584 + depends on GRKERNSEC_AUDIT_GROUP
50585 + default 1007
50586 +
50587 +config GRKERNSEC_EXECLOG
50588 + bool "Exec logging"
50589 + help
50590 + If you say Y here, all execve() calls will be logged (since the
50591 + other exec*() calls are frontends to execve(), all execution
50592 + will be logged). Useful for shell-servers that like to keep track
50593 + of their users. If the sysctl option is enabled, a sysctl option with
50594 + name "exec_logging" is created.
50595 + WARNING: This option when enabled will produce a LOT of logs, especially
50596 + on an active system.
50597 +
50598 +config GRKERNSEC_RESLOG
50599 + bool "Resource logging"
50600 + help
50601 + If you say Y here, all attempts to overstep resource limits will
50602 + be logged with the resource name, the requested size, and the current
50603 + limit. It is highly recommended that you say Y here. If the sysctl
50604 + option is enabled, a sysctl option with name "resource_logging" is
50605 + created. If the RBAC system is enabled, the sysctl value is ignored.
50606 +
50607 +config GRKERNSEC_CHROOT_EXECLOG
50608 + bool "Log execs within chroot"
50609 + help
50610 + If you say Y here, all executions inside a chroot jail will be logged
50611 + to syslog. This can cause a large amount of logs if certain
50612 + applications (eg. djb's daemontools) are installed on the system, and
50613 + is therefore left as an option. If the sysctl option is enabled, a
50614 + sysctl option with name "chroot_execlog" is created.
50615 +
50616 +config GRKERNSEC_AUDIT_PTRACE
50617 + bool "Ptrace logging"
50618 + help
50619 + If you say Y here, all attempts to attach to a process via ptrace
50620 + will be logged. If the sysctl option is enabled, a sysctl option
50621 + with name "audit_ptrace" is created.
50622 +
50623 +config GRKERNSEC_AUDIT_CHDIR
50624 + bool "Chdir logging"
50625 + help
50626 + If you say Y here, all chdir() calls will be logged. If the sysctl
50627 + option is enabled, a sysctl option with name "audit_chdir" is created.
50628 +
50629 +config GRKERNSEC_AUDIT_MOUNT
50630 + bool "(Un)Mount logging"
50631 + help
50632 + If you say Y here, all mounts and unmounts will be logged. If the
50633 + sysctl option is enabled, a sysctl option with name "audit_mount" is
50634 + created.
50635 +
50636 +config GRKERNSEC_SIGNAL
50637 + bool "Signal logging"
50638 + help
50639 + If you say Y here, certain important signals will be logged, such as
50640 + SIGSEGV, which will as a result inform you of when a error in a program
50641 + occurred, which in some cases could mean a possible exploit attempt.
50642 + If the sysctl option is enabled, a sysctl option with name
50643 + "signal_logging" is created.
50644 +
50645 +config GRKERNSEC_FORKFAIL
50646 + bool "Fork failure logging"
50647 + help
50648 + If you say Y here, all failed fork() attempts will be logged.
50649 + This could suggest a fork bomb, or someone attempting to overstep
50650 + their process limit. If the sysctl option is enabled, a sysctl option
50651 + with name "forkfail_logging" is created.
50652 +
50653 +config GRKERNSEC_TIME
50654 + bool "Time change logging"
50655 + help
50656 + If you say Y here, any changes of the system clock will be logged.
50657 + If the sysctl option is enabled, a sysctl option with name
50658 + "timechange_logging" is created.
50659 +
50660 +config GRKERNSEC_PROC_IPADDR
50661 + bool "/proc/<pid>/ipaddr support"
50662 + help
50663 + If you say Y here, a new entry will be added to each /proc/<pid>
50664 + directory that contains the IP address of the person using the task.
50665 + The IP is carried across local TCP and AF_UNIX stream sockets.
50666 + This information can be useful for IDS/IPSes to perform remote response
50667 + to a local attack. The entry is readable by only the owner of the
50668 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
50669 + the RBAC system), and thus does not create privacy concerns.
50670 +
50671 +config GRKERNSEC_RWXMAP_LOG
50672 + bool 'Denied RWX mmap/mprotect logging'
50673 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
50674 + help
50675 + If you say Y here, calls to mmap() and mprotect() with explicit
50676 + usage of PROT_WRITE and PROT_EXEC together will be logged when
50677 + denied by the PAX_MPROTECT feature. If the sysctl option is
50678 + enabled, a sysctl option with name "rwxmap_logging" is created.
50679 +
50680 +config GRKERNSEC_AUDIT_TEXTREL
50681 + bool 'ELF text relocations logging (READ HELP)'
50682 + depends on PAX_MPROTECT
50683 + help
50684 + If you say Y here, text relocations will be logged with the filename
50685 + of the offending library or binary. The purpose of the feature is
50686 + to help Linux distribution developers get rid of libraries and
50687 + binaries that need text relocations which hinder the future progress
50688 + of PaX. Only Linux distribution developers should say Y here, and
50689 + never on a production machine, as this option creates an information
50690 + leak that could aid an attacker in defeating the randomization of
50691 + a single memory region. If the sysctl option is enabled, a sysctl
50692 + option with name "audit_textrel" is created.
50693 +
50694 +endmenu
50695 +
50696 +menu "Executable Protections"
50697 +depends on GRKERNSEC
50698 +
50699 +config GRKERNSEC_DMESG
50700 + bool "Dmesg(8) restriction"
50701 + help
50702 + If you say Y here, non-root users will not be able to use dmesg(8)
50703 + to view up to the last 4kb of messages in the kernel's log buffer.
50704 + The kernel's log buffer often contains kernel addresses and other
50705 + identifying information useful to an attacker in fingerprinting a
50706 + system for a targeted exploit.
50707 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
50708 + created.
50709 +
50710 +config GRKERNSEC_HARDEN_PTRACE
50711 + bool "Deter ptrace-based process snooping"
50712 + help
50713 + If you say Y here, TTY sniffers and other malicious monitoring
50714 + programs implemented through ptrace will be defeated. If you
50715 + have been using the RBAC system, this option has already been
50716 + enabled for several years for all users, with the ability to make
50717 + fine-grained exceptions.
50718 +
50719 + This option only affects the ability of non-root users to ptrace
50720 + processes that are not a descendent of the ptracing process.
50721 + This means that strace ./binary and gdb ./binary will still work,
50722 + but attaching to arbitrary processes will not. If the sysctl
50723 + option is enabled, a sysctl option with name "harden_ptrace" is
50724 + created.
50725 +
50726 +config GRKERNSEC_PTRACE_READEXEC
50727 + bool "Require read access to ptrace sensitive binaries"
50728 + help
50729 + If you say Y here, unprivileged users will not be able to ptrace unreadable
50730 + binaries. This option is useful in environments that
50731 + remove the read bits (e.g. file mode 4711) from suid binaries to
50732 + prevent infoleaking of their contents. This option adds
50733 + consistency to the use of that file mode, as the binary could normally
50734 + be read out when run without privileges while ptracing.
50735 +
50736 + If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
50737 + is created.
50738 +
50739 +config GRKERNSEC_SETXID
50740 + bool "Enforce consistent multithreaded privileges"
50741 + depends on (X86)
50742 + help
50743 + If you say Y here, a change from a root uid to a non-root uid
50744 + in a multithreaded application will cause the resulting uids,
50745 + gids, supplementary groups, and capabilities in that thread
50746 + to be propagated to the other threads of the process. In most
50747 + cases this is unnecessary, as glibc will emulate this behavior
50748 + on behalf of the application. Other libcs do not act in the
50749 + same way, allowing the other threads of the process to continue
50750 + running with root privileges. If the sysctl option is enabled,
50751 + a sysctl option with name "consistent_setxid" is created.
50752 +
50753 +config GRKERNSEC_TPE
50754 + bool "Trusted Path Execution (TPE)"
50755 + help
50756 + If you say Y here, you will be able to choose a gid to add to the
50757 + supplementary groups of users you want to mark as "untrusted."
50758 + These users will not be able to execute any files that are not in
50759 + root-owned directories writable only by root. If the sysctl option
50760 + is enabled, a sysctl option with name "tpe" is created.
50761 +
50762 +config GRKERNSEC_TPE_ALL
50763 + bool "Partially restrict all non-root users"
50764 + depends on GRKERNSEC_TPE
50765 + help
50766 + If you say Y here, all non-root users will be covered under
50767 + a weaker TPE restriction. This is separate from, and in addition to,
50768 + the main TPE options that you have selected elsewhere. Thus, if a
50769 + "trusted" GID is chosen, this restriction applies to even that GID.
50770 + Under this restriction, all non-root users will only be allowed to
50771 + execute files in directories they own that are not group or
50772 + world-writable, or in directories owned by root and writable only by
50773 + root. If the sysctl option is enabled, a sysctl option with name
50774 + "tpe_restrict_all" is created.
50775 +
50776 +config GRKERNSEC_TPE_INVERT
50777 + bool "Invert GID option"
50778 + depends on GRKERNSEC_TPE
50779 + help
50780 + If you say Y here, the group you specify in the TPE configuration will
50781 + decide what group TPE restrictions will be *disabled* for. This
50782 + option is useful if you want TPE restrictions to be applied to most
50783 + users on the system. If the sysctl option is enabled, a sysctl option
50784 + with name "tpe_invert" is created. Unlike other sysctl options, this
50785 + entry will default to on for backward-compatibility.
50786 +
50787 +config GRKERNSEC_TPE_GID
50788 + int "GID for untrusted users"
50789 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
50790 + default 1005
50791 + help
50792 + Setting this GID determines what group TPE restrictions will be
50793 + *enabled* for. If the sysctl option is enabled, a sysctl option
50794 + with name "tpe_gid" is created.
50795 +
50796 +config GRKERNSEC_TPE_GID
50797 + int "GID for trusted users"
50798 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
50799 + default 1005
50800 + help
50801 + Setting this GID determines what group TPE restrictions will be
50802 + *disabled* for. If the sysctl option is enabled, a sysctl option
50803 + with name "tpe_gid" is created.
50804 +
50805 +endmenu
50806 +menu "Network Protections"
50807 +depends on GRKERNSEC
50808 +
50809 +config GRKERNSEC_RANDNET
50810 + bool "Larger entropy pools"
50811 + help
50812 + If you say Y here, the entropy pools used for many features of Linux
50813 + and grsecurity will be doubled in size. Since several grsecurity
50814 + features use additional randomness, it is recommended that you say Y
50815 + here. Saying Y here has a similar effect as modifying
50816 + /proc/sys/kernel/random/poolsize.
50817 +
50818 +config GRKERNSEC_BLACKHOLE
50819 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
50820 + depends on NET
50821 + help
50822 + If you say Y here, neither TCP resets nor ICMP
50823 + destination-unreachable packets will be sent in response to packets
50824 + sent to ports for which no associated listening process exists.
50825 + This feature supports both IPV4 and IPV6 and exempts the
50826 + loopback interface from blackholing. Enabling this feature
50827 + makes a host more resilient to DoS attacks and reduces network
50828 + visibility against scanners.
50829 +
50830 + The blackhole feature as-implemented is equivalent to the FreeBSD
50831 + blackhole feature, as it prevents RST responses to all packets, not
50832 + just SYNs. Under most application behavior this causes no
50833 + problems, but applications (like haproxy) may not close certain
50834 + connections in a way that cleanly terminates them on the remote
50835 + end, leaving the remote host in LAST_ACK state. Because of this
50836 + side-effect and to prevent intentional LAST_ACK DoSes, this
50837 + feature also adds automatic mitigation against such attacks.
50838 + The mitigation drastically reduces the amount of time a socket
50839 + can spend in LAST_ACK state. If you're using haproxy and not
50840 + all servers it connects to have this option enabled, consider
50841 + disabling this feature on the haproxy host.
50842 +
50843 + If the sysctl option is enabled, two sysctl options with names
50844 + "ip_blackhole" and "lastack_retries" will be created.
50845 + While "ip_blackhole" takes the standard zero/non-zero on/off
50846 + toggle, "lastack_retries" uses the same kinds of values as
50847 + "tcp_retries1" and "tcp_retries2". The default value of 4
50848 + prevents a socket from lasting more than 45 seconds in LAST_ACK
50849 + state.
50850 +
50851 +config GRKERNSEC_SOCKET
50852 + bool "Socket restrictions"
50853 + depends on NET
50854 + help
50855 + If you say Y here, you will be able to choose from several options.
50856 + If you assign a GID on your system and add it to the supplementary
50857 + groups of users you want to restrict socket access to, this patch
50858 + will perform up to three things, based on the option(s) you choose.
50859 +
50860 +config GRKERNSEC_SOCKET_ALL
50861 + bool "Deny any sockets to group"
50862 + depends on GRKERNSEC_SOCKET
50863 + help
50864 + If you say Y here, you will be able to choose a GID of whose users will
50865 + be unable to connect to other hosts from your machine or run server
50866 + applications from your machine. If the sysctl option is enabled, a
50867 + sysctl option with name "socket_all" is created.
50868 +
50869 +config GRKERNSEC_SOCKET_ALL_GID
50870 + int "GID to deny all sockets for"
50871 + depends on GRKERNSEC_SOCKET_ALL
50872 + default 1004
50873 + help
50874 + Here you can choose the GID to disable socket access for. Remember to
50875 + add the users you want socket access disabled for to the GID
50876 + specified here. If the sysctl option is enabled, a sysctl option
50877 + with name "socket_all_gid" is created.
50878 +
50879 +config GRKERNSEC_SOCKET_CLIENT
50880 + bool "Deny client sockets to group"
50881 + depends on GRKERNSEC_SOCKET
50882 + help
50883 + If you say Y here, you will be able to choose a GID of whose users will
50884 + be unable to connect to other hosts from your machine, but will be
50885 + able to run servers. If this option is enabled, all users in the group
50886 + you specify will have to use passive mode when initiating ftp transfers
50887 + from the shell on your machine. If the sysctl option is enabled, a
50888 + sysctl option with name "socket_client" is created.
50889 +
50890 +config GRKERNSEC_SOCKET_CLIENT_GID
50891 + int "GID to deny client sockets for"
50892 + depends on GRKERNSEC_SOCKET_CLIENT
50893 + default 1003
50894 + help
50895 + Here you can choose the GID to disable client socket access for.
50896 + Remember to add the users you want client socket access disabled for to
50897 + the GID specified here. If the sysctl option is enabled, a sysctl
50898 + option with name "socket_client_gid" is created.
50899 +
50900 +config GRKERNSEC_SOCKET_SERVER
50901 + bool "Deny server sockets to group"
50902 + depends on GRKERNSEC_SOCKET
50903 + help
50904 + If you say Y here, you will be able to choose a GID of whose users will
50905 + be unable to run server applications from your machine. If the sysctl
50906 + option is enabled, a sysctl option with name "socket_server" is created.
50907 +
50908 +config GRKERNSEC_SOCKET_SERVER_GID
50909 + int "GID to deny server sockets for"
50910 + depends on GRKERNSEC_SOCKET_SERVER
50911 + default 1002
50912 + help
50913 + Here you can choose the GID to disable server socket access for.
50914 + Remember to add the users you want server socket access disabled for to
50915 + the GID specified here. If the sysctl option is enabled, a sysctl
50916 + option with name "socket_server_gid" is created.
50917 +
50918 +endmenu
50919 +menu "Sysctl support"
50920 +depends on GRKERNSEC && SYSCTL
50921 +
50922 +config GRKERNSEC_SYSCTL
50923 + bool "Sysctl support"
50924 + help
50925 + If you say Y here, you will be able to change the options that
50926 + grsecurity runs with at bootup, without having to recompile your
50927 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
50928 + to enable (1) or disable (0) various features. All the sysctl entries
50929 + are mutable until the "grsec_lock" entry is set to a non-zero value.
50930 + All features enabled in the kernel configuration are disabled at boot
50931 + if you do not say Y to the "Turn on features by default" option.
50932 + All options should be set at startup, and the grsec_lock entry should
50933 + be set to a non-zero value after all the options are set.
50934 + *THIS IS EXTREMELY IMPORTANT*
50935 +
50936 +config GRKERNSEC_SYSCTL_DISTRO
50937 + bool "Extra sysctl support for distro makers (READ HELP)"
50938 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
50939 + help
50940 + If you say Y here, additional sysctl options will be created
50941 + for features that affect processes running as root. Therefore,
50942 + it is critical when using this option that the grsec_lock entry be
50943 + enabled after boot. Only distros with prebuilt kernel packages
50944 + with this option enabled that can ensure grsec_lock is enabled
50945 + after boot should use this option.
50946 + *Failure to set grsec_lock after boot makes all grsec features
50947 + this option covers useless*
50948 +
50949 + Currently this option creates the following sysctl entries:
50950 + "Disable Privileged I/O": "disable_priv_io"
50951 +
50952 +config GRKERNSEC_SYSCTL_ON
50953 + bool "Turn on features by default"
50954 + depends on GRKERNSEC_SYSCTL
50955 + help
50956 + If you say Y here, instead of having all features enabled in the
50957 + kernel configuration disabled at boot time, the features will be
50958 + enabled at boot time. It is recommended you say Y here unless
50959 + there is some reason you would want all sysctl-tunable features to
50960 + be disabled by default. As mentioned elsewhere, it is important
50961 + to enable the grsec_lock entry once you have finished modifying
50962 + the sysctl entries.
50963 +
50964 +endmenu
50965 +menu "Logging Options"
50966 +depends on GRKERNSEC
50967 +
50968 +config GRKERNSEC_FLOODTIME
50969 + int "Seconds in between log messages (minimum)"
50970 + default 10
50971 + help
50972 + This option allows you to enforce the number of seconds between
50973 + grsecurity log messages. The default should be suitable for most
50974 + people, however, if you choose to change it, choose a value small enough
50975 + to allow informative logs to be produced, but large enough to
50976 + prevent flooding.
50977 +
50978 +config GRKERNSEC_FLOODBURST
50979 + int "Number of messages in a burst (maximum)"
50980 + default 6
50981 + help
50982 + This option allows you to choose the maximum number of messages allowed
50983 + within the flood time interval you chose in a separate option. The
50984 + default should be suitable for most people, however if you find that
50985 + many of your logs are being interpreted as flooding, you may want to
50986 + raise this value.
50987 +
50988 +endmenu
50989 +
50990 +endmenu
50991 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
50992 new file mode 100644
50993 index 0000000..1b9afa9
50994 --- /dev/null
50995 +++ b/grsecurity/Makefile
50996 @@ -0,0 +1,38 @@
50997 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
50998 +# during 2001-2009 it has been completely redesigned by Brad Spengler
50999 +# into an RBAC system
51000 +#
51001 +# All code in this directory and various hooks inserted throughout the kernel
51002 +# are copyright Brad Spengler - Open Source Security, Inc., and released
51003 +# under the GPL v2 or higher
51004 +
51005 +KBUILD_CFLAGS += -Werror
51006 +
51007 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
51008 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
51009 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
51010 +
51011 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
51012 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
51013 + gracl_learn.o grsec_log.o
51014 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
51015 +
51016 +ifdef CONFIG_NET
51017 +obj-y += grsec_sock.o
51018 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
51019 +endif
51020 +
51021 +ifndef CONFIG_GRKERNSEC
51022 +obj-y += grsec_disabled.o
51023 +endif
51024 +
51025 +ifdef CONFIG_GRKERNSEC_HIDESYM
51026 +extra-y := grsec_hidesym.o
51027 +$(obj)/grsec_hidesym.o:
51028 + @-chmod -f 500 /boot
51029 + @-chmod -f 500 /lib/modules
51030 + @-chmod -f 500 /lib64/modules
51031 + @-chmod -f 500 /lib32/modules
51032 + @-chmod -f 700 .
51033 + @echo ' grsec: protected kernel image paths'
51034 +endif
51035 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
51036 new file mode 100644
51037 index 0000000..a6d83f0
51038 --- /dev/null
51039 +++ b/grsecurity/gracl.c
51040 @@ -0,0 +1,4193 @@
51041 +#include <linux/kernel.h>
51042 +#include <linux/module.h>
51043 +#include <linux/sched.h>
51044 +#include <linux/mm.h>
51045 +#include <linux/file.h>
51046 +#include <linux/fs.h>
51047 +#include <linux/namei.h>
51048 +#include <linux/mount.h>
51049 +#include <linux/tty.h>
51050 +#include <linux/proc_fs.h>
51051 +#include <linux/lglock.h>
51052 +#include <linux/slab.h>
51053 +#include <linux/vmalloc.h>
51054 +#include <linux/types.h>
51055 +#include <linux/sysctl.h>
51056 +#include <linux/netdevice.h>
51057 +#include <linux/ptrace.h>
51058 +#include <linux/gracl.h>
51059 +#include <linux/gralloc.h>
51060 +#include <linux/security.h>
51061 +#include <linux/grinternal.h>
51062 +#include <linux/pid_namespace.h>
51063 +#include <linux/fdtable.h>
51064 +#include <linux/percpu.h>
51065 +#include "../fs/mount.h"
51066 +
51067 +#include <asm/uaccess.h>
51068 +#include <asm/errno.h>
51069 +#include <asm/mman.h>
51070 +
51071 +static struct acl_role_db acl_role_set;
51072 +static struct name_db name_set;
51073 +static struct inodev_db inodev_set;
51074 +
51075 +/* for keeping track of userspace pointers used for subjects, so we
51076 + can share references in the kernel as well
51077 +*/
51078 +
51079 +static struct path real_root;
51080 +
51081 +static struct acl_subj_map_db subj_map_set;
51082 +
51083 +static struct acl_role_label *default_role;
51084 +
51085 +static struct acl_role_label *role_list;
51086 +
51087 +static u16 acl_sp_role_value;
51088 +
51089 +extern char *gr_shared_page[4];
51090 +static DEFINE_MUTEX(gr_dev_mutex);
51091 +DEFINE_RWLOCK(gr_inode_lock);
51092 +
51093 +struct gr_arg *gr_usermode;
51094 +
51095 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
51096 +
51097 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
51098 +extern void gr_clear_learn_entries(void);
51099 +
51100 +#ifdef CONFIG_GRKERNSEC_RESLOG
51101 +extern void gr_log_resource(const struct task_struct *task,
51102 + const int res, const unsigned long wanted, const int gt);
51103 +#endif
51104 +
51105 +unsigned char *gr_system_salt;
51106 +unsigned char *gr_system_sum;
51107 +
51108 +static struct sprole_pw **acl_special_roles = NULL;
51109 +static __u16 num_sprole_pws = 0;
51110 +
51111 +static struct acl_role_label *kernel_role = NULL;
51112 +
51113 +static unsigned int gr_auth_attempts = 0;
51114 +static unsigned long gr_auth_expires = 0UL;
51115 +
51116 +#ifdef CONFIG_NET
51117 +extern struct vfsmount *sock_mnt;
51118 +#endif
51119 +
51120 +extern struct vfsmount *pipe_mnt;
51121 +extern struct vfsmount *shm_mnt;
51122 +#ifdef CONFIG_HUGETLBFS
51123 +extern struct vfsmount *hugetlbfs_vfsmount;
51124 +#endif
51125 +
51126 +static struct acl_object_label *fakefs_obj_rw;
51127 +static struct acl_object_label *fakefs_obj_rwx;
51128 +
51129 +extern int gr_init_uidset(void);
51130 +extern void gr_free_uidset(void);
51131 +extern void gr_remove_uid(uid_t uid);
51132 +extern int gr_find_uid(uid_t uid);
51133 +
51134 +DECLARE_BRLOCK(vfsmount_lock);
51135 +
51136 +__inline__ int
51137 +gr_acl_is_enabled(void)
51138 +{
51139 + return (gr_status & GR_READY);
51140 +}
51141 +
51142 +#ifdef CONFIG_BTRFS_FS
51143 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
51144 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
51145 +#endif
51146 +
51147 +static inline dev_t __get_dev(const struct dentry *dentry)
51148 +{
51149 +#ifdef CONFIG_BTRFS_FS
51150 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
51151 + return get_btrfs_dev_from_inode(dentry->d_inode);
51152 + else
51153 +#endif
51154 + return dentry->d_inode->i_sb->s_dev;
51155 +}
51156 +
51157 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
51158 +{
51159 + return __get_dev(dentry);
51160 +}
51161 +
51162 +static char gr_task_roletype_to_char(struct task_struct *task)
51163 +{
51164 + switch (task->role->roletype &
51165 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
51166 + GR_ROLE_SPECIAL)) {
51167 + case GR_ROLE_DEFAULT:
51168 + return 'D';
51169 + case GR_ROLE_USER:
51170 + return 'U';
51171 + case GR_ROLE_GROUP:
51172 + return 'G';
51173 + case GR_ROLE_SPECIAL:
51174 + return 'S';
51175 + }
51176 +
51177 + return 'X';
51178 +}
51179 +
51180 +char gr_roletype_to_char(void)
51181 +{
51182 + return gr_task_roletype_to_char(current);
51183 +}
51184 +
51185 +__inline__ int
51186 +gr_acl_tpe_check(void)
51187 +{
51188 + if (unlikely(!(gr_status & GR_READY)))
51189 + return 0;
51190 + if (current->role->roletype & GR_ROLE_TPE)
51191 + return 1;
51192 + else
51193 + return 0;
51194 +}
51195 +
51196 +int
51197 +gr_handle_rawio(const struct inode *inode)
51198 +{
51199 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
51200 + if (inode && S_ISBLK(inode->i_mode) &&
51201 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
51202 + !capable(CAP_SYS_RAWIO))
51203 + return 1;
51204 +#endif
51205 + return 0;
51206 +}
51207 +
51208 +static int
51209 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
51210 +{
51211 + if (likely(lena != lenb))
51212 + return 0;
51213 +
51214 + return !memcmp(a, b, lena);
51215 +}
51216 +
51217 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
51218 +{
51219 + *buflen -= namelen;
51220 + if (*buflen < 0)
51221 + return -ENAMETOOLONG;
51222 + *buffer -= namelen;
51223 + memcpy(*buffer, str, namelen);
51224 + return 0;
51225 +}
51226 +
51227 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
51228 +{
51229 + return prepend(buffer, buflen, name->name, name->len);
51230 +}
51231 +
51232 +static int prepend_path(const struct path *path, struct path *root,
51233 + char **buffer, int *buflen)
51234 +{
51235 + struct dentry *dentry = path->dentry;
51236 + struct vfsmount *vfsmnt = path->mnt;
51237 + struct mount *mnt = real_mount(vfsmnt);
51238 + bool slash = false;
51239 + int error = 0;
51240 +
51241 + while (dentry != root->dentry || vfsmnt != root->mnt) {
51242 + struct dentry * parent;
51243 +
51244 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
51245 + /* Global root? */
51246 + if (!mnt_has_parent(mnt)) {
51247 + goto out;
51248 + }
51249 + dentry = mnt->mnt_mountpoint;
51250 + mnt = mnt->mnt_parent;
51251 + vfsmnt = &mnt->mnt;
51252 + continue;
51253 + }
51254 + parent = dentry->d_parent;
51255 + prefetch(parent);
51256 + spin_lock(&dentry->d_lock);
51257 + error = prepend_name(buffer, buflen, &dentry->d_name);
51258 + spin_unlock(&dentry->d_lock);
51259 + if (!error)
51260 + error = prepend(buffer, buflen, "/", 1);
51261 + if (error)
51262 + break;
51263 +
51264 + slash = true;
51265 + dentry = parent;
51266 + }
51267 +
51268 +out:
51269 + if (!error && !slash)
51270 + error = prepend(buffer, buflen, "/", 1);
51271 +
51272 + return error;
51273 +}
51274 +
51275 +/* this must be called with vfsmount_lock and rename_lock held */
51276 +
51277 +static char *__our_d_path(const struct path *path, struct path *root,
51278 + char *buf, int buflen)
51279 +{
51280 + char *res = buf + buflen;
51281 + int error;
51282 +
51283 + prepend(&res, &buflen, "\0", 1);
51284 + error = prepend_path(path, root, &res, &buflen);
51285 + if (error)
51286 + return ERR_PTR(error);
51287 +
51288 + return res;
51289 +}
51290 +
51291 +static char *
51292 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
51293 +{
51294 + char *retval;
51295 +
51296 + retval = __our_d_path(path, root, buf, buflen);
51297 + if (unlikely(IS_ERR(retval)))
51298 + retval = strcpy(buf, "<path too long>");
51299 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
51300 + retval[1] = '\0';
51301 +
51302 + return retval;
51303 +}
51304 +
51305 +static char *
51306 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
51307 + char *buf, int buflen)
51308 +{
51309 + struct path path;
51310 + char *res;
51311 +
51312 + path.dentry = (struct dentry *)dentry;
51313 + path.mnt = (struct vfsmount *)vfsmnt;
51314 +
51315 + /* we can use real_root.dentry, real_root.mnt, because this is only called
51316 + by the RBAC system */
51317 + res = gen_full_path(&path, &real_root, buf, buflen);
51318 +
51319 + return res;
51320 +}
51321 +
51322 +static char *
51323 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
51324 + char *buf, int buflen)
51325 +{
51326 + char *res;
51327 + struct path path;
51328 + struct path root;
51329 + struct task_struct *reaper = &init_task;
51330 +
51331 + path.dentry = (struct dentry *)dentry;
51332 + path.mnt = (struct vfsmount *)vfsmnt;
51333 +
51334 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
51335 + get_fs_root(reaper->fs, &root);
51336 +
51337 + write_seqlock(&rename_lock);
51338 + br_read_lock(vfsmount_lock);
51339 + res = gen_full_path(&path, &root, buf, buflen);
51340 + br_read_unlock(vfsmount_lock);
51341 + write_sequnlock(&rename_lock);
51342 +
51343 + path_put(&root);
51344 + return res;
51345 +}
51346 +
51347 +static char *
51348 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
51349 +{
51350 + char *ret;
51351 + write_seqlock(&rename_lock);
51352 + br_read_lock(vfsmount_lock);
51353 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
51354 + PAGE_SIZE);
51355 + br_read_unlock(vfsmount_lock);
51356 + write_sequnlock(&rename_lock);
51357 + return ret;
51358 +}
51359 +
51360 +static char *
51361 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
51362 +{
51363 + char *ret;
51364 + char *buf;
51365 + int buflen;
51366 +
51367 + write_seqlock(&rename_lock);
51368 + br_read_lock(vfsmount_lock);
51369 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
51370 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
51371 + buflen = (int)(ret - buf);
51372 + if (buflen >= 5)
51373 + prepend(&ret, &buflen, "/proc", 5);
51374 + else
51375 + ret = strcpy(buf, "<path too long>");
51376 + br_read_unlock(vfsmount_lock);
51377 + write_sequnlock(&rename_lock);
51378 + return ret;
51379 +}
51380 +
51381 +char *
51382 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
51383 +{
51384 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
51385 + PAGE_SIZE);
51386 +}
51387 +
51388 +char *
51389 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
51390 +{
51391 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
51392 + PAGE_SIZE);
51393 +}
51394 +
51395 +char *
51396 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
51397 +{
51398 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
51399 + PAGE_SIZE);
51400 +}
51401 +
51402 +char *
51403 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
51404 +{
51405 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
51406 + PAGE_SIZE);
51407 +}
51408 +
51409 +char *
51410 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
51411 +{
51412 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
51413 + PAGE_SIZE);
51414 +}
51415 +
51416 +__inline__ __u32
51417 +to_gr_audit(const __u32 reqmode)
51418 +{
51419 + /* masks off auditable permission flags, then shifts them to create
51420 + auditing flags, and adds the special case of append auditing if
51421 + we're requesting write */
51422 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
51423 +}
51424 +
51425 +struct acl_subject_label *
51426 +lookup_subject_map(const struct acl_subject_label *userp)
51427 +{
51428 + unsigned int index = shash(userp, subj_map_set.s_size);
51429 + struct subject_map *match;
51430 +
51431 + match = subj_map_set.s_hash[index];
51432 +
51433 + while (match && match->user != userp)
51434 + match = match->next;
51435 +
51436 + if (match != NULL)
51437 + return match->kernel;
51438 + else
51439 + return NULL;
51440 +}
51441 +
51442 +static void
51443 +insert_subj_map_entry(struct subject_map *subjmap)
51444 +{
51445 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
51446 + struct subject_map **curr;
51447 +
51448 + subjmap->prev = NULL;
51449 +
51450 + curr = &subj_map_set.s_hash[index];
51451 + if (*curr != NULL)
51452 + (*curr)->prev = subjmap;
51453 +
51454 + subjmap->next = *curr;
51455 + *curr = subjmap;
51456 +
51457 + return;
51458 +}
51459 +
51460 +static struct acl_role_label *
51461 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
51462 + const gid_t gid)
51463 +{
51464 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
51465 + struct acl_role_label *match;
51466 + struct role_allowed_ip *ipp;
51467 + unsigned int x;
51468 + u32 curr_ip = task->signal->curr_ip;
51469 +
51470 + task->signal->saved_ip = curr_ip;
51471 +
51472 + match = acl_role_set.r_hash[index];
51473 +
51474 + while (match) {
51475 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
51476 + for (x = 0; x < match->domain_child_num; x++) {
51477 + if (match->domain_children[x] == uid)
51478 + goto found;
51479 + }
51480 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
51481 + break;
51482 + match = match->next;
51483 + }
51484 +found:
51485 + if (match == NULL) {
51486 + try_group:
51487 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
51488 + match = acl_role_set.r_hash[index];
51489 +
51490 + while (match) {
51491 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
51492 + for (x = 0; x < match->domain_child_num; x++) {
51493 + if (match->domain_children[x] == gid)
51494 + goto found2;
51495 + }
51496 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
51497 + break;
51498 + match = match->next;
51499 + }
51500 +found2:
51501 + if (match == NULL)
51502 + match = default_role;
51503 + if (match->allowed_ips == NULL)
51504 + return match;
51505 + else {
51506 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
51507 + if (likely
51508 + ((ntohl(curr_ip) & ipp->netmask) ==
51509 + (ntohl(ipp->addr) & ipp->netmask)))
51510 + return match;
51511 + }
51512 + match = default_role;
51513 + }
51514 + } else if (match->allowed_ips == NULL) {
51515 + return match;
51516 + } else {
51517 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
51518 + if (likely
51519 + ((ntohl(curr_ip) & ipp->netmask) ==
51520 + (ntohl(ipp->addr) & ipp->netmask)))
51521 + return match;
51522 + }
51523 + goto try_group;
51524 + }
51525 +
51526 + return match;
51527 +}
51528 +
51529 +struct acl_subject_label *
51530 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
51531 + const struct acl_role_label *role)
51532 +{
51533 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
51534 + struct acl_subject_label *match;
51535 +
51536 + match = role->subj_hash[index];
51537 +
51538 + while (match && (match->inode != ino || match->device != dev ||
51539 + (match->mode & GR_DELETED))) {
51540 + match = match->next;
51541 + }
51542 +
51543 + if (match && !(match->mode & GR_DELETED))
51544 + return match;
51545 + else
51546 + return NULL;
51547 +}
51548 +
51549 +struct acl_subject_label *
51550 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
51551 + const struct acl_role_label *role)
51552 +{
51553 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
51554 + struct acl_subject_label *match;
51555 +
51556 + match = role->subj_hash[index];
51557 +
51558 + while (match && (match->inode != ino || match->device != dev ||
51559 + !(match->mode & GR_DELETED))) {
51560 + match = match->next;
51561 + }
51562 +
51563 + if (match && (match->mode & GR_DELETED))
51564 + return match;
51565 + else
51566 + return NULL;
51567 +}
51568 +
51569 +static struct acl_object_label *
51570 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
51571 + const struct acl_subject_label *subj)
51572 +{
51573 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
51574 + struct acl_object_label *match;
51575 +
51576 + match = subj->obj_hash[index];
51577 +
51578 + while (match && (match->inode != ino || match->device != dev ||
51579 + (match->mode & GR_DELETED))) {
51580 + match = match->next;
51581 + }
51582 +
51583 + if (match && !(match->mode & GR_DELETED))
51584 + return match;
51585 + else
51586 + return NULL;
51587 +}
51588 +
51589 +static struct acl_object_label *
51590 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
51591 + const struct acl_subject_label *subj)
51592 +{
51593 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
51594 + struct acl_object_label *match;
51595 +
51596 + match = subj->obj_hash[index];
51597 +
51598 + while (match && (match->inode != ino || match->device != dev ||
51599 + !(match->mode & GR_DELETED))) {
51600 + match = match->next;
51601 + }
51602 +
51603 + if (match && (match->mode & GR_DELETED))
51604 + return match;
51605 +
51606 + match = subj->obj_hash[index];
51607 +
51608 + while (match && (match->inode != ino || match->device != dev ||
51609 + (match->mode & GR_DELETED))) {
51610 + match = match->next;
51611 + }
51612 +
51613 + if (match && !(match->mode & GR_DELETED))
51614 + return match;
51615 + else
51616 + return NULL;
51617 +}
51618 +
51619 +static struct name_entry *
51620 +lookup_name_entry(const char *name)
51621 +{
51622 + unsigned int len = strlen(name);
51623 + unsigned int key = full_name_hash(name, len);
51624 + unsigned int index = key % name_set.n_size;
51625 + struct name_entry *match;
51626 +
51627 + match = name_set.n_hash[index];
51628 +
51629 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
51630 + match = match->next;
51631 +
51632 + return match;
51633 +}
51634 +
51635 +static struct name_entry *
51636 +lookup_name_entry_create(const char *name)
51637 +{
51638 + unsigned int len = strlen(name);
51639 + unsigned int key = full_name_hash(name, len);
51640 + unsigned int index = key % name_set.n_size;
51641 + struct name_entry *match;
51642 +
51643 + match = name_set.n_hash[index];
51644 +
51645 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
51646 + !match->deleted))
51647 + match = match->next;
51648 +
51649 + if (match && match->deleted)
51650 + return match;
51651 +
51652 + match = name_set.n_hash[index];
51653 +
51654 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
51655 + match->deleted))
51656 + match = match->next;
51657 +
51658 + if (match && !match->deleted)
51659 + return match;
51660 + else
51661 + return NULL;
51662 +}
51663 +
51664 +static struct inodev_entry *
51665 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
51666 +{
51667 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
51668 + struct inodev_entry *match;
51669 +
51670 + match = inodev_set.i_hash[index];
51671 +
51672 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
51673 + match = match->next;
51674 +
51675 + return match;
51676 +}
51677 +
51678 +static void
51679 +insert_inodev_entry(struct inodev_entry *entry)
51680 +{
51681 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
51682 + inodev_set.i_size);
51683 + struct inodev_entry **curr;
51684 +
51685 + entry->prev = NULL;
51686 +
51687 + curr = &inodev_set.i_hash[index];
51688 + if (*curr != NULL)
51689 + (*curr)->prev = entry;
51690 +
51691 + entry->next = *curr;
51692 + *curr = entry;
51693 +
51694 + return;
51695 +}
51696 +
51697 +static void
51698 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
51699 +{
51700 + unsigned int index =
51701 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
51702 + struct acl_role_label **curr;
51703 + struct acl_role_label *tmp, *tmp2;
51704 +
51705 + curr = &acl_role_set.r_hash[index];
51706 +
51707 + /* simple case, slot is empty, just set it to our role */
51708 + if (*curr == NULL) {
51709 + *curr = role;
51710 + } else {
51711 + /* example:
51712 + 1 -> 2 -> 3 (adding 2 -> 3 to here)
51713 + 2 -> 3
51714 + */
51715 + /* first check to see if we can already be reached via this slot */
51716 + tmp = *curr;
51717 + while (tmp && tmp != role)
51718 + tmp = tmp->next;
51719 + if (tmp == role) {
51720 + /* we don't need to add ourselves to this slot's chain */
51721 + return;
51722 + }
51723 + /* we need to add ourselves to this chain, two cases */
51724 + if (role->next == NULL) {
51725 + /* simple case, append the current chain to our role */
51726 + role->next = *curr;
51727 + *curr = role;
51728 + } else {
51729 + /* 1 -> 2 -> 3 -> 4
51730 + 2 -> 3 -> 4
51731 + 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
51732 + */
51733 + /* trickier case: walk our role's chain until we find
51734 + the role for the start of the current slot's chain */
51735 + tmp = role;
51736 + tmp2 = *curr;
51737 + while (tmp->next && tmp->next != tmp2)
51738 + tmp = tmp->next;
51739 + if (tmp->next == tmp2) {
51740 + /* from example above, we found 3, so just
51741 + replace this slot's chain with ours */
51742 + *curr = role;
51743 + } else {
51744 + /* we didn't find a subset of our role's chain
51745 + in the current slot's chain, so append their
51746 + chain to ours, and set us as the first role in
51747 + the slot's chain
51748 +
51749 + we could fold this case with the case above,
51750 + but making it explicit for clarity
51751 + */
51752 + tmp->next = tmp2;
51753 + *curr = role;
51754 + }
51755 + }
51756 + }
51757 +
51758 + return;
51759 +}
51760 +
51761 +static void
51762 +insert_acl_role_label(struct acl_role_label *role)
51763 +{
51764 + int i;
51765 +
51766 + if (role_list == NULL) {
51767 + role_list = role;
51768 + role->prev = NULL;
51769 + } else {
51770 + role->prev = role_list;
51771 + role_list = role;
51772 + }
51773 +
51774 + /* used for hash chains */
51775 + role->next = NULL;
51776 +
51777 + if (role->roletype & GR_ROLE_DOMAIN) {
51778 + for (i = 0; i < role->domain_child_num; i++)
51779 + __insert_acl_role_label(role, role->domain_children[i]);
51780 + } else
51781 + __insert_acl_role_label(role, role->uidgid);
51782 +}
51783 +
51784 +static int
51785 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
51786 +{
51787 + struct name_entry **curr, *nentry;
51788 + struct inodev_entry *ientry;
51789 + unsigned int len = strlen(name);
51790 + unsigned int key = full_name_hash(name, len);
51791 + unsigned int index = key % name_set.n_size;
51792 +
51793 + curr = &name_set.n_hash[index];
51794 +
51795 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
51796 + curr = &((*curr)->next);
51797 +
51798 + if (*curr != NULL)
51799 + return 1;
51800 +
51801 + nentry = acl_alloc(sizeof (struct name_entry));
51802 + if (nentry == NULL)
51803 + return 0;
51804 + ientry = acl_alloc(sizeof (struct inodev_entry));
51805 + if (ientry == NULL)
51806 + return 0;
51807 + ientry->nentry = nentry;
51808 +
51809 + nentry->key = key;
51810 + nentry->name = name;
51811 + nentry->inode = inode;
51812 + nentry->device = device;
51813 + nentry->len = len;
51814 + nentry->deleted = deleted;
51815 +
51816 + nentry->prev = NULL;
51817 + curr = &name_set.n_hash[index];
51818 + if (*curr != NULL)
51819 + (*curr)->prev = nentry;
51820 + nentry->next = *curr;
51821 + *curr = nentry;
51822 +
51823 + /* insert us into the table searchable by inode/dev */
51824 + insert_inodev_entry(ientry);
51825 +
51826 + return 1;
51827 +}
51828 +
51829 +static void
51830 +insert_acl_obj_label(struct acl_object_label *obj,
51831 + struct acl_subject_label *subj)
51832 +{
51833 + unsigned int index =
51834 + fhash(obj->inode, obj->device, subj->obj_hash_size);
51835 + struct acl_object_label **curr;
51836 +
51837 +
51838 + obj->prev = NULL;
51839 +
51840 + curr = &subj->obj_hash[index];
51841 + if (*curr != NULL)
51842 + (*curr)->prev = obj;
51843 +
51844 + obj->next = *curr;
51845 + *curr = obj;
51846 +
51847 + return;
51848 +}
51849 +
51850 +static void
51851 +insert_acl_subj_label(struct acl_subject_label *obj,
51852 + struct acl_role_label *role)
51853 +{
51854 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
51855 + struct acl_subject_label **curr;
51856 +
51857 + obj->prev = NULL;
51858 +
51859 + curr = &role->subj_hash[index];
51860 + if (*curr != NULL)
51861 + (*curr)->prev = obj;
51862 +
51863 + obj->next = *curr;
51864 + *curr = obj;
51865 +
51866 + return;
51867 +}
51868 +
51869 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
51870 +
51871 +static void *
51872 +create_table(__u32 * len, int elementsize)
51873 +{
51874 + unsigned int table_sizes[] = {
51875 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
51876 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
51877 + 4194301, 8388593, 16777213, 33554393, 67108859
51878 + };
51879 + void *newtable = NULL;
51880 + unsigned int pwr = 0;
51881 +
51882 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
51883 + table_sizes[pwr] <= *len)
51884 + pwr++;
51885 +
51886 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
51887 + return newtable;
51888 +
51889 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
51890 + newtable =
51891 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
51892 + else
51893 + newtable = vmalloc(table_sizes[pwr] * elementsize);
51894 +
51895 + *len = table_sizes[pwr];
51896 +
51897 + return newtable;
51898 +}
51899 +
51900 +static int
51901 +init_variables(const struct gr_arg *arg)
51902 +{
51903 + struct task_struct *reaper = &init_task;
51904 + unsigned int stacksize;
51905 +
51906 + subj_map_set.s_size = arg->role_db.num_subjects;
51907 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
51908 + name_set.n_size = arg->role_db.num_objects;
51909 + inodev_set.i_size = arg->role_db.num_objects;
51910 +
51911 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
51912 + !name_set.n_size || !inodev_set.i_size)
51913 + return 1;
51914 +
51915 + if (!gr_init_uidset())
51916 + return 1;
51917 +
51918 + /* set up the stack that holds allocation info */
51919 +
51920 + stacksize = arg->role_db.num_pointers + 5;
51921 +
51922 + if (!acl_alloc_stack_init(stacksize))
51923 + return 1;
51924 +
51925 + /* grab reference for the real root dentry and vfsmount */
51926 + get_fs_root(reaper->fs, &real_root);
51927 +
51928 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51929 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
51930 +#endif
51931 +
51932 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
51933 + if (fakefs_obj_rw == NULL)
51934 + return 1;
51935 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
51936 +
51937 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
51938 + if (fakefs_obj_rwx == NULL)
51939 + return 1;
51940 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
51941 +
51942 + subj_map_set.s_hash =
51943 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
51944 + acl_role_set.r_hash =
51945 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
51946 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
51947 + inodev_set.i_hash =
51948 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
51949 +
51950 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
51951 + !name_set.n_hash || !inodev_set.i_hash)
51952 + return 1;
51953 +
51954 + memset(subj_map_set.s_hash, 0,
51955 + sizeof(struct subject_map *) * subj_map_set.s_size);
51956 + memset(acl_role_set.r_hash, 0,
51957 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
51958 + memset(name_set.n_hash, 0,
51959 + sizeof (struct name_entry *) * name_set.n_size);
51960 + memset(inodev_set.i_hash, 0,
51961 + sizeof (struct inodev_entry *) * inodev_set.i_size);
51962 +
51963 + return 0;
51964 +}
51965 +
51966 +/* free information not needed after startup
51967 + currently contains user->kernel pointer mappings for subjects
51968 +*/
51969 +
51970 +static void
51971 +free_init_variables(void)
51972 +{
51973 + __u32 i;
51974 +
51975 + if (subj_map_set.s_hash) {
51976 + for (i = 0; i < subj_map_set.s_size; i++) {
51977 + if (subj_map_set.s_hash[i]) {
51978 + kfree(subj_map_set.s_hash[i]);
51979 + subj_map_set.s_hash[i] = NULL;
51980 + }
51981 + }
51982 +
51983 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
51984 + PAGE_SIZE)
51985 + kfree(subj_map_set.s_hash);
51986 + else
51987 + vfree(subj_map_set.s_hash);
51988 + }
51989 +
51990 + return;
51991 +}
51992 +
51993 +static void
51994 +free_variables(void)
51995 +{
51996 + struct acl_subject_label *s;
51997 + struct acl_role_label *r;
51998 + struct task_struct *task, *task2;
51999 + unsigned int x;
52000 +
52001 + gr_clear_learn_entries();
52002 +
52003 + read_lock(&tasklist_lock);
52004 + do_each_thread(task2, task) {
52005 + task->acl_sp_role = 0;
52006 + task->acl_role_id = 0;
52007 + task->acl = NULL;
52008 + task->role = NULL;
52009 + } while_each_thread(task2, task);
52010 + read_unlock(&tasklist_lock);
52011 +
52012 + /* release the reference to the real root dentry and vfsmount */
52013 + path_put(&real_root);
52014 + memset(&real_root, 0, sizeof(real_root));
52015 +
52016 + /* free all object hash tables */
52017 +
52018 + FOR_EACH_ROLE_START(r)
52019 + if (r->subj_hash == NULL)
52020 + goto next_role;
52021 + FOR_EACH_SUBJECT_START(r, s, x)
52022 + if (s->obj_hash == NULL)
52023 + break;
52024 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
52025 + kfree(s->obj_hash);
52026 + else
52027 + vfree(s->obj_hash);
52028 + FOR_EACH_SUBJECT_END(s, x)
52029 + FOR_EACH_NESTED_SUBJECT_START(r, s)
52030 + if (s->obj_hash == NULL)
52031 + break;
52032 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
52033 + kfree(s->obj_hash);
52034 + else
52035 + vfree(s->obj_hash);
52036 + FOR_EACH_NESTED_SUBJECT_END(s)
52037 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
52038 + kfree(r->subj_hash);
52039 + else
52040 + vfree(r->subj_hash);
52041 + r->subj_hash = NULL;
52042 +next_role:
52043 + FOR_EACH_ROLE_END(r)
52044 +
52045 + acl_free_all();
52046 +
52047 + if (acl_role_set.r_hash) {
52048 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
52049 + PAGE_SIZE)
52050 + kfree(acl_role_set.r_hash);
52051 + else
52052 + vfree(acl_role_set.r_hash);
52053 + }
52054 + if (name_set.n_hash) {
52055 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
52056 + PAGE_SIZE)
52057 + kfree(name_set.n_hash);
52058 + else
52059 + vfree(name_set.n_hash);
52060 + }
52061 +
52062 + if (inodev_set.i_hash) {
52063 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
52064 + PAGE_SIZE)
52065 + kfree(inodev_set.i_hash);
52066 + else
52067 + vfree(inodev_set.i_hash);
52068 + }
52069 +
52070 + gr_free_uidset();
52071 +
52072 + memset(&name_set, 0, sizeof (struct name_db));
52073 + memset(&inodev_set, 0, sizeof (struct inodev_db));
52074 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
52075 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
52076 +
52077 + default_role = NULL;
52078 + kernel_role = NULL;
52079 + role_list = NULL;
52080 +
52081 + return;
52082 +}
52083 +
52084 +static __u32
52085 +count_user_objs(struct acl_object_label *userp)
52086 +{
52087 + struct acl_object_label o_tmp;
52088 + __u32 num = 0;
52089 +
52090 + while (userp) {
52091 + if (copy_from_user(&o_tmp, userp,
52092 + sizeof (struct acl_object_label)))
52093 + break;
52094 +
52095 + userp = o_tmp.prev;
52096 + num++;
52097 + }
52098 +
52099 + return num;
52100 +}
52101 +
52102 +static struct acl_subject_label *
52103 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
52104 +
52105 +static int
52106 +copy_user_glob(struct acl_object_label *obj)
52107 +{
52108 + struct acl_object_label *g_tmp, **guser;
52109 + unsigned int len;
52110 + char *tmp;
52111 +
52112 + if (obj->globbed == NULL)
52113 + return 0;
52114 +
52115 + guser = &obj->globbed;
52116 + while (*guser) {
52117 + g_tmp = (struct acl_object_label *)
52118 + acl_alloc(sizeof (struct acl_object_label));
52119 + if (g_tmp == NULL)
52120 + return -ENOMEM;
52121 +
52122 + if (copy_from_user(g_tmp, *guser,
52123 + sizeof (struct acl_object_label)))
52124 + return -EFAULT;
52125 +
52126 + len = strnlen_user(g_tmp->filename, PATH_MAX);
52127 +
52128 + if (!len || len >= PATH_MAX)
52129 + return -EINVAL;
52130 +
52131 + if ((tmp = (char *) acl_alloc(len)) == NULL)
52132 + return -ENOMEM;
52133 +
52134 + if (copy_from_user(tmp, g_tmp->filename, len))
52135 + return -EFAULT;
52136 + tmp[len-1] = '\0';
52137 + g_tmp->filename = tmp;
52138 +
52139 + *guser = g_tmp;
52140 + guser = &(g_tmp->next);
52141 + }
52142 +
52143 + return 0;
52144 +}
52145 +
52146 +static int
52147 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
52148 + struct acl_role_label *role)
52149 +{
52150 + struct acl_object_label *o_tmp;
52151 + unsigned int len;
52152 + int ret;
52153 + char *tmp;
52154 +
52155 + while (userp) {
52156 + if ((o_tmp = (struct acl_object_label *)
52157 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
52158 + return -ENOMEM;
52159 +
52160 + if (copy_from_user(o_tmp, userp,
52161 + sizeof (struct acl_object_label)))
52162 + return -EFAULT;
52163 +
52164 + userp = o_tmp->prev;
52165 +
52166 + len = strnlen_user(o_tmp->filename, PATH_MAX);
52167 +
52168 + if (!len || len >= PATH_MAX)
52169 + return -EINVAL;
52170 +
52171 + if ((tmp = (char *) acl_alloc(len)) == NULL)
52172 + return -ENOMEM;
52173 +
52174 + if (copy_from_user(tmp, o_tmp->filename, len))
52175 + return -EFAULT;
52176 + tmp[len-1] = '\0';
52177 + o_tmp->filename = tmp;
52178 +
52179 + insert_acl_obj_label(o_tmp, subj);
52180 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
52181 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
52182 + return -ENOMEM;
52183 +
52184 + ret = copy_user_glob(o_tmp);
52185 + if (ret)
52186 + return ret;
52187 +
52188 + if (o_tmp->nested) {
52189 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
52190 + if (IS_ERR(o_tmp->nested))
52191 + return PTR_ERR(o_tmp->nested);
52192 +
52193 + /* insert into nested subject list */
52194 + o_tmp->nested->next = role->hash->first;
52195 + role->hash->first = o_tmp->nested;
52196 + }
52197 + }
52198 +
52199 + return 0;
52200 +}
52201 +
52202 +static __u32
52203 +count_user_subjs(struct acl_subject_label *userp)
52204 +{
52205 + struct acl_subject_label s_tmp;
52206 + __u32 num = 0;
52207 +
52208 + while (userp) {
52209 + if (copy_from_user(&s_tmp, userp,
52210 + sizeof (struct acl_subject_label)))
52211 + break;
52212 +
52213 + userp = s_tmp.prev;
52214 + /* do not count nested subjects against this count, since
52215 + they are not included in the hash table, but are
52216 + attached to objects. We have already counted
52217 + the subjects in userspace for the allocation
52218 + stack
52219 + */
52220 + if (!(s_tmp.mode & GR_NESTED))
52221 + num++;
52222 + }
52223 +
52224 + return num;
52225 +}
52226 +
52227 +static int
52228 +copy_user_allowedips(struct acl_role_label *rolep)
52229 +{
52230 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
52231 +
52232 + ruserip = rolep->allowed_ips;
52233 +
52234 + while (ruserip) {
52235 + rlast = rtmp;
52236 +
52237 + if ((rtmp = (struct role_allowed_ip *)
52238 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
52239 + return -ENOMEM;
52240 +
52241 + if (copy_from_user(rtmp, ruserip,
52242 + sizeof (struct role_allowed_ip)))
52243 + return -EFAULT;
52244 +
52245 + ruserip = rtmp->prev;
52246 +
52247 + if (!rlast) {
52248 + rtmp->prev = NULL;
52249 + rolep->allowed_ips = rtmp;
52250 + } else {
52251 + rlast->next = rtmp;
52252 + rtmp->prev = rlast;
52253 + }
52254 +
52255 + if (!ruserip)
52256 + rtmp->next = NULL;
52257 + }
52258 +
52259 + return 0;
52260 +}
52261 +
52262 +static int
52263 +copy_user_transitions(struct acl_role_label *rolep)
52264 +{
52265 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
52266 +
52267 + unsigned int len;
52268 + char *tmp;
52269 +
52270 + rusertp = rolep->transitions;
52271 +
52272 + while (rusertp) {
52273 + rlast = rtmp;
52274 +
52275 + if ((rtmp = (struct role_transition *)
52276 + acl_alloc(sizeof (struct role_transition))) == NULL)
52277 + return -ENOMEM;
52278 +
52279 + if (copy_from_user(rtmp, rusertp,
52280 + sizeof (struct role_transition)))
52281 + return -EFAULT;
52282 +
52283 + rusertp = rtmp->prev;
52284 +
52285 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
52286 +
52287 + if (!len || len >= GR_SPROLE_LEN)
52288 + return -EINVAL;
52289 +
52290 + if ((tmp = (char *) acl_alloc(len)) == NULL)
52291 + return -ENOMEM;
52292 +
52293 + if (copy_from_user(tmp, rtmp->rolename, len))
52294 + return -EFAULT;
52295 + tmp[len-1] = '\0';
52296 + rtmp->rolename = tmp;
52297 +
52298 + if (!rlast) {
52299 + rtmp->prev = NULL;
52300 + rolep->transitions = rtmp;
52301 + } else {
52302 + rlast->next = rtmp;
52303 + rtmp->prev = rlast;
52304 + }
52305 +
52306 + if (!rusertp)
52307 + rtmp->next = NULL;
52308 + }
52309 +
52310 + return 0;
52311 +}
52312 +
52313 +static struct acl_subject_label *
52314 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
52315 +{
52316 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
52317 + unsigned int len;
52318 + char *tmp;
52319 + __u32 num_objs;
52320 + struct acl_ip_label **i_tmp, *i_utmp2;
52321 + struct gr_hash_struct ghash;
52322 + struct subject_map *subjmap;
52323 + unsigned int i_num;
52324 + int err;
52325 +
52326 + s_tmp = lookup_subject_map(userp);
52327 +
52328 + /* we've already copied this subject into the kernel, just return
52329 + the reference to it, and don't copy it over again
52330 + */
52331 + if (s_tmp)
52332 + return(s_tmp);
52333 +
52334 + if ((s_tmp = (struct acl_subject_label *)
52335 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
52336 + return ERR_PTR(-ENOMEM);
52337 +
52338 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
52339 + if (subjmap == NULL)
52340 + return ERR_PTR(-ENOMEM);
52341 +
52342 + subjmap->user = userp;
52343 + subjmap->kernel = s_tmp;
52344 + insert_subj_map_entry(subjmap);
52345 +
52346 + if (copy_from_user(s_tmp, userp,
52347 + sizeof (struct acl_subject_label)))
52348 + return ERR_PTR(-EFAULT);
52349 +
52350 + len = strnlen_user(s_tmp->filename, PATH_MAX);
52351 +
52352 + if (!len || len >= PATH_MAX)
52353 + return ERR_PTR(-EINVAL);
52354 +
52355 + if ((tmp = (char *) acl_alloc(len)) == NULL)
52356 + return ERR_PTR(-ENOMEM);
52357 +
52358 + if (copy_from_user(tmp, s_tmp->filename, len))
52359 + return ERR_PTR(-EFAULT);
52360 + tmp[len-1] = '\0';
52361 + s_tmp->filename = tmp;
52362 +
52363 + if (!strcmp(s_tmp->filename, "/"))
52364 + role->root_label = s_tmp;
52365 +
52366 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
52367 + return ERR_PTR(-EFAULT);
52368 +
52369 + /* copy user and group transition tables */
52370 +
52371 + if (s_tmp->user_trans_num) {
52372 + uid_t *uidlist;
52373 +
52374 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
52375 + if (uidlist == NULL)
52376 + return ERR_PTR(-ENOMEM);
52377 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
52378 + return ERR_PTR(-EFAULT);
52379 +
52380 + s_tmp->user_transitions = uidlist;
52381 + }
52382 +
52383 + if (s_tmp->group_trans_num) {
52384 + gid_t *gidlist;
52385 +
52386 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
52387 + if (gidlist == NULL)
52388 + return ERR_PTR(-ENOMEM);
52389 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
52390 + return ERR_PTR(-EFAULT);
52391 +
52392 + s_tmp->group_transitions = gidlist;
52393 + }
52394 +
52395 + /* set up object hash table */
52396 + num_objs = count_user_objs(ghash.first);
52397 +
52398 + s_tmp->obj_hash_size = num_objs;
52399 + s_tmp->obj_hash =
52400 + (struct acl_object_label **)
52401 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
52402 +
52403 + if (!s_tmp->obj_hash)
52404 + return ERR_PTR(-ENOMEM);
52405 +
52406 + memset(s_tmp->obj_hash, 0,
52407 + s_tmp->obj_hash_size *
52408 + sizeof (struct acl_object_label *));
52409 +
52410 + /* add in objects */
52411 + err = copy_user_objs(ghash.first, s_tmp, role);
52412 +
52413 + if (err)
52414 + return ERR_PTR(err);
52415 +
52416 + /* set pointer for parent subject */
52417 + if (s_tmp->parent_subject) {
52418 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
52419 +
52420 + if (IS_ERR(s_tmp2))
52421 + return s_tmp2;
52422 +
52423 + s_tmp->parent_subject = s_tmp2;
52424 + }
52425 +
52426 + /* add in ip acls */
52427 +
52428 + if (!s_tmp->ip_num) {
52429 + s_tmp->ips = NULL;
52430 + goto insert;
52431 + }
52432 +
52433 + i_tmp =
52434 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
52435 + sizeof (struct acl_ip_label *));
52436 +
52437 + if (!i_tmp)
52438 + return ERR_PTR(-ENOMEM);
52439 +
52440 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
52441 + *(i_tmp + i_num) =
52442 + (struct acl_ip_label *)
52443 + acl_alloc(sizeof (struct acl_ip_label));
52444 + if (!*(i_tmp + i_num))
52445 + return ERR_PTR(-ENOMEM);
52446 +
52447 + if (copy_from_user
52448 + (&i_utmp2, s_tmp->ips + i_num,
52449 + sizeof (struct acl_ip_label *)))
52450 + return ERR_PTR(-EFAULT);
52451 +
52452 + if (copy_from_user
52453 + (*(i_tmp + i_num), i_utmp2,
52454 + sizeof (struct acl_ip_label)))
52455 + return ERR_PTR(-EFAULT);
52456 +
52457 + if ((*(i_tmp + i_num))->iface == NULL)
52458 + continue;
52459 +
52460 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
52461 + if (!len || len >= IFNAMSIZ)
52462 + return ERR_PTR(-EINVAL);
52463 + tmp = acl_alloc(len);
52464 + if (tmp == NULL)
52465 + return ERR_PTR(-ENOMEM);
52466 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
52467 + return ERR_PTR(-EFAULT);
52468 + (*(i_tmp + i_num))->iface = tmp;
52469 + }
52470 +
52471 + s_tmp->ips = i_tmp;
52472 +
52473 +insert:
52474 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
52475 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
52476 + return ERR_PTR(-ENOMEM);
52477 +
52478 + return s_tmp;
52479 +}
52480 +
52481 +static int
52482 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
52483 +{
52484 + struct acl_subject_label s_pre;
52485 + struct acl_subject_label * ret;
52486 + int err;
52487 +
52488 + while (userp) {
52489 + if (copy_from_user(&s_pre, userp,
52490 + sizeof (struct acl_subject_label)))
52491 + return -EFAULT;
52492 +
52493 + /* do not add nested subjects here, add
52494 + while parsing objects
52495 + */
52496 +
52497 + if (s_pre.mode & GR_NESTED) {
52498 + userp = s_pre.prev;
52499 + continue;
52500 + }
52501 +
52502 + ret = do_copy_user_subj(userp, role);
52503 +
52504 + err = PTR_ERR(ret);
52505 + if (IS_ERR(ret))
52506 + return err;
52507 +
52508 + insert_acl_subj_label(ret, role);
52509 +
52510 + userp = s_pre.prev;
52511 + }
52512 +
52513 + return 0;
52514 +}
52515 +
52516 +static int
52517 +copy_user_acl(struct gr_arg *arg)
52518 +{
52519 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
52520 + struct sprole_pw *sptmp;
52521 + struct gr_hash_struct *ghash;
52522 + uid_t *domainlist;
52523 + unsigned int r_num;
52524 + unsigned int len;
52525 + char *tmp;
52526 + int err = 0;
52527 + __u16 i;
52528 + __u32 num_subjs;
52529 +
52530 + /* we need a default and kernel role */
52531 + if (arg->role_db.num_roles < 2)
52532 + return -EINVAL;
52533 +
52534 + /* copy special role authentication info from userspace */
52535 +
52536 + num_sprole_pws = arg->num_sprole_pws;
52537 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
52538 +
52539 + if (!acl_special_roles && num_sprole_pws)
52540 + return -ENOMEM;
52541 +
52542 + for (i = 0; i < num_sprole_pws; i++) {
52543 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
52544 + if (!sptmp)
52545 + return -ENOMEM;
52546 + if (copy_from_user(sptmp, arg->sprole_pws + i,
52547 + sizeof (struct sprole_pw)))
52548 + return -EFAULT;
52549 +
52550 + len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
52551 +
52552 + if (!len || len >= GR_SPROLE_LEN)
52553 + return -EINVAL;
52554 +
52555 + if ((tmp = (char *) acl_alloc(len)) == NULL)
52556 + return -ENOMEM;
52557 +
52558 + if (copy_from_user(tmp, sptmp->rolename, len))
52559 + return -EFAULT;
52560 +
52561 + tmp[len-1] = '\0';
52562 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52563 + printk(KERN_ALERT "Copying special role %s\n", tmp);
52564 +#endif
52565 + sptmp->rolename = tmp;
52566 + acl_special_roles[i] = sptmp;
52567 + }
52568 +
52569 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
52570 +
52571 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
52572 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
52573 +
52574 + if (!r_tmp)
52575 + return -ENOMEM;
52576 +
52577 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
52578 + sizeof (struct acl_role_label *)))
52579 + return -EFAULT;
52580 +
52581 + if (copy_from_user(r_tmp, r_utmp2,
52582 + sizeof (struct acl_role_label)))
52583 + return -EFAULT;
52584 +
52585 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
52586 +
52587 + if (!len || len >= PATH_MAX)
52588 + return -EINVAL;
52589 +
52590 + if ((tmp = (char *) acl_alloc(len)) == NULL)
52591 + return -ENOMEM;
52592 +
52593 + if (copy_from_user(tmp, r_tmp->rolename, len))
52594 + return -EFAULT;
52595 +
52596 + tmp[len-1] = '\0';
52597 + r_tmp->rolename = tmp;
52598 +
52599 + if (!strcmp(r_tmp->rolename, "default")
52600 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
52601 + default_role = r_tmp;
52602 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
52603 + kernel_role = r_tmp;
52604 + }
52605 +
52606 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
52607 + return -ENOMEM;
52608 +
52609 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
52610 + return -EFAULT;
52611 +
52612 + r_tmp->hash = ghash;
52613 +
52614 + num_subjs = count_user_subjs(r_tmp->hash->first);
52615 +
52616 + r_tmp->subj_hash_size = num_subjs;
52617 + r_tmp->subj_hash =
52618 + (struct acl_subject_label **)
52619 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
52620 +
52621 + if (!r_tmp->subj_hash)
52622 + return -ENOMEM;
52623 +
52624 + err = copy_user_allowedips(r_tmp);
52625 + if (err)
52626 + return err;
52627 +
52628 + /* copy domain info */
52629 + if (r_tmp->domain_children != NULL) {
52630 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
52631 + if (domainlist == NULL)
52632 + return -ENOMEM;
52633 +
52634 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
52635 + return -EFAULT;
52636 +
52637 + r_tmp->domain_children = domainlist;
52638 + }
52639 +
52640 + err = copy_user_transitions(r_tmp);
52641 + if (err)
52642 + return err;
52643 +
52644 + memset(r_tmp->subj_hash, 0,
52645 + r_tmp->subj_hash_size *
52646 + sizeof (struct acl_subject_label *));
52647 +
52648 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
52649 +
52650 + if (err)
52651 + return err;
52652 +
52653 + /* set nested subject list to null */
52654 + r_tmp->hash->first = NULL;
52655 +
52656 + insert_acl_role_label(r_tmp);
52657 + }
52658 +
52659 + if (default_role == NULL || kernel_role == NULL)
52660 + return -EINVAL;
52661 +
52662 + return err;
52663 +}
52664 +
52665 +static int
52666 +gracl_init(struct gr_arg *args)
52667 +{
52668 + int error = 0;
52669 +
52670 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
52671 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
52672 +
52673 + if (init_variables(args)) {
52674 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
52675 + error = -ENOMEM;
52676 + free_variables();
52677 + goto out;
52678 + }
52679 +
52680 + error = copy_user_acl(args);
52681 + free_init_variables();
52682 + if (error) {
52683 + free_variables();
52684 + goto out;
52685 + }
52686 +
52687 + if ((error = gr_set_acls(0))) {
52688 + free_variables();
52689 + goto out;
52690 + }
52691 +
52692 + pax_open_kernel();
52693 + gr_status |= GR_READY;
52694 + pax_close_kernel();
52695 +
52696 + out:
52697 + return error;
52698 +}
52699 +
52700 +/* derived from glibc fnmatch() 0: match, 1: no match*/
52701 +
52702 +static int
52703 +glob_match(const char *p, const char *n)
52704 +{
52705 + char c;
52706 +
52707 + while ((c = *p++) != '\0') {
52708 + switch (c) {
52709 + case '?':
52710 + if (*n == '\0')
52711 + return 1;
52712 + else if (*n == '/')
52713 + return 1;
52714 + break;
52715 + case '\\':
52716 + if (*n != c)
52717 + return 1;
52718 + break;
52719 + case '*':
52720 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
52721 + if (*n == '/')
52722 + return 1;
52723 + else if (c == '?') {
52724 + if (*n == '\0')
52725 + return 1;
52726 + else
52727 + ++n;
52728 + }
52729 + }
52730 + if (c == '\0') {
52731 + return 0;
52732 + } else {
52733 + const char *endp;
52734 +
52735 + if ((endp = strchr(n, '/')) == NULL)
52736 + endp = n + strlen(n);
52737 +
52738 + if (c == '[') {
52739 + for (--p; n < endp; ++n)
52740 + if (!glob_match(p, n))
52741 + return 0;
52742 + } else if (c == '/') {
52743 + while (*n != '\0' && *n != '/')
52744 + ++n;
52745 + if (*n == '/' && !glob_match(p, n + 1))
52746 + return 0;
52747 + } else {
52748 + for (--p; n < endp; ++n)
52749 + if (*n == c && !glob_match(p, n))
52750 + return 0;
52751 + }
52752 +
52753 + return 1;
52754 + }
52755 + case '[':
52756 + {
52757 + int not;
52758 + char cold;
52759 +
52760 + if (*n == '\0' || *n == '/')
52761 + return 1;
52762 +
52763 + not = (*p == '!' || *p == '^');
52764 + if (not)
52765 + ++p;
52766 +
52767 + c = *p++;
52768 + for (;;) {
52769 + unsigned char fn = (unsigned char)*n;
52770 +
52771 + if (c == '\0')
52772 + return 1;
52773 + else {
52774 + if (c == fn)
52775 + goto matched;
52776 + cold = c;
52777 + c = *p++;
52778 +
52779 + if (c == '-' && *p != ']') {
52780 + unsigned char cend = *p++;
52781 +
52782 + if (cend == '\0')
52783 + return 1;
52784 +
52785 + if (cold <= fn && fn <= cend)
52786 + goto matched;
52787 +
52788 + c = *p++;
52789 + }
52790 + }
52791 +
52792 + if (c == ']')
52793 + break;
52794 + }
52795 + if (!not)
52796 + return 1;
52797 + break;
52798 + matched:
52799 + while (c != ']') {
52800 + if (c == '\0')
52801 + return 1;
52802 +
52803 + c = *p++;
52804 + }
52805 + if (not)
52806 + return 1;
52807 + }
52808 + break;
52809 + default:
52810 + if (c != *n)
52811 + return 1;
52812 + }
52813 +
52814 + ++n;
52815 + }
52816 +
52817 + if (*n == '\0')
52818 + return 0;
52819 +
52820 + if (*n == '/')
52821 + return 0;
52822 +
52823 + return 1;
52824 +}
52825 +
52826 +static struct acl_object_label *
52827 +chk_glob_label(struct acl_object_label *globbed,
52828 + const struct dentry *dentry, const struct vfsmount *mnt, char **path)
52829 +{
52830 + struct acl_object_label *tmp;
52831 +
52832 + if (*path == NULL)
52833 + *path = gr_to_filename_nolock(dentry, mnt);
52834 +
52835 + tmp = globbed;
52836 +
52837 + while (tmp) {
52838 + if (!glob_match(tmp->filename, *path))
52839 + return tmp;
52840 + tmp = tmp->next;
52841 + }
52842 +
52843 + return NULL;
52844 +}
52845 +
52846 +static struct acl_object_label *
52847 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
52848 + const ino_t curr_ino, const dev_t curr_dev,
52849 + const struct acl_subject_label *subj, char **path, const int checkglob)
52850 +{
52851 + struct acl_subject_label *tmpsubj;
52852 + struct acl_object_label *retval;
52853 + struct acl_object_label *retval2;
52854 +
52855 + tmpsubj = (struct acl_subject_label *) subj;
52856 + read_lock(&gr_inode_lock);
52857 + do {
52858 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
52859 + if (retval) {
52860 + if (checkglob && retval->globbed) {
52861 + retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
52862 + if (retval2)
52863 + retval = retval2;
52864 + }
52865 + break;
52866 + }
52867 + } while ((tmpsubj = tmpsubj->parent_subject));
52868 + read_unlock(&gr_inode_lock);
52869 +
52870 + return retval;
52871 +}
52872 +
52873 +static __inline__ struct acl_object_label *
52874 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
52875 + struct dentry *curr_dentry,
52876 + const struct acl_subject_label *subj, char **path, const int checkglob)
52877 +{
52878 + int newglob = checkglob;
52879 + ino_t inode;
52880 + dev_t device;
52881 +
52882 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
52883 + as we don't want a / * rule to match instead of the / object
52884 + don't do this for create lookups that call this function though, since they're looking up
52885 + on the parent and thus need globbing checks on all paths
52886 + */
52887 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
52888 + newglob = GR_NO_GLOB;
52889 +
52890 + spin_lock(&curr_dentry->d_lock);
52891 + inode = curr_dentry->d_inode->i_ino;
52892 + device = __get_dev(curr_dentry);
52893 + spin_unlock(&curr_dentry->d_lock);
52894 +
52895 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
52896 +}
52897 +
52898 +static struct acl_object_label *
52899 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52900 + const struct acl_subject_label *subj, char *path, const int checkglob)
52901 +{
52902 + struct dentry *dentry = (struct dentry *) l_dentry;
52903 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
52904 + struct mount *real_mnt = real_mount(mnt);
52905 + struct acl_object_label *retval;
52906 + struct dentry *parent;
52907 +
52908 + write_seqlock(&rename_lock);
52909 + br_read_lock(vfsmount_lock);
52910 +
52911 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
52912 +#ifdef CONFIG_NET
52913 + mnt == sock_mnt ||
52914 +#endif
52915 +#ifdef CONFIG_HUGETLBFS
52916 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
52917 +#endif
52918 + /* ignore Eric Biederman */
52919 + IS_PRIVATE(l_dentry->d_inode))) {
52920 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
52921 + goto out;
52922 + }
52923 +
52924 + for (;;) {
52925 + if (dentry == real_root.dentry && mnt == real_root.mnt)
52926 + break;
52927 +
52928 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
52929 + if (!mnt_has_parent(real_mnt))
52930 + break;
52931 +
52932 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52933 + if (retval != NULL)
52934 + goto out;
52935 +
52936 + dentry = real_mnt->mnt_mountpoint;
52937 + real_mnt = real_mnt->mnt_parent;
52938 + mnt = &real_mnt->mnt;
52939 + continue;
52940 + }
52941 +
52942 + parent = dentry->d_parent;
52943 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52944 + if (retval != NULL)
52945 + goto out;
52946 +
52947 + dentry = parent;
52948 + }
52949 +
52950 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52951 +
52952 + /* real_root is pinned so we don't have to hold a reference */
52953 + if (retval == NULL)
52954 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
52955 +out:
52956 + br_read_unlock(vfsmount_lock);
52957 + write_sequnlock(&rename_lock);
52958 +
52959 + BUG_ON(retval == NULL);
52960 +
52961 + return retval;
52962 +}
52963 +
52964 +static __inline__ struct acl_object_label *
52965 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52966 + const struct acl_subject_label *subj)
52967 +{
52968 + char *path = NULL;
52969 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
52970 +}
52971 +
52972 +static __inline__ struct acl_object_label *
52973 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52974 + const struct acl_subject_label *subj)
52975 +{
52976 + char *path = NULL;
52977 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
52978 +}
52979 +
52980 +static __inline__ struct acl_object_label *
52981 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52982 + const struct acl_subject_label *subj, char *path)
52983 +{
52984 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
52985 +}
52986 +
52987 +static struct acl_subject_label *
52988 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52989 + const struct acl_role_label *role)
52990 +{
52991 + struct dentry *dentry = (struct dentry *) l_dentry;
52992 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
52993 + struct mount *real_mnt = real_mount(mnt);
52994 + struct acl_subject_label *retval;
52995 + struct dentry *parent;
52996 +
52997 + write_seqlock(&rename_lock);
52998 + br_read_lock(vfsmount_lock);
52999 +
53000 + for (;;) {
53001 + if (dentry == real_root.dentry && mnt == real_root.mnt)
53002 + break;
53003 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
53004 + if (!mnt_has_parent(real_mnt))
53005 + break;
53006 +
53007 + spin_lock(&dentry->d_lock);
53008 + read_lock(&gr_inode_lock);
53009 + retval =
53010 + lookup_acl_subj_label(dentry->d_inode->i_ino,
53011 + __get_dev(dentry), role);
53012 + read_unlock(&gr_inode_lock);
53013 + spin_unlock(&dentry->d_lock);
53014 + if (retval != NULL)
53015 + goto out;
53016 +
53017 + dentry = real_mnt->mnt_mountpoint;
53018 + real_mnt = real_mnt->mnt_parent;
53019 + mnt = &real_mnt->mnt;
53020 + continue;
53021 + }
53022 +
53023 + spin_lock(&dentry->d_lock);
53024 + read_lock(&gr_inode_lock);
53025 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
53026 + __get_dev(dentry), role);
53027 + read_unlock(&gr_inode_lock);
53028 + parent = dentry->d_parent;
53029 + spin_unlock(&dentry->d_lock);
53030 +
53031 + if (retval != NULL)
53032 + goto out;
53033 +
53034 + dentry = parent;
53035 + }
53036 +
53037 + spin_lock(&dentry->d_lock);
53038 + read_lock(&gr_inode_lock);
53039 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
53040 + __get_dev(dentry), role);
53041 + read_unlock(&gr_inode_lock);
53042 + spin_unlock(&dentry->d_lock);
53043 +
53044 + if (unlikely(retval == NULL)) {
53045 + /* real_root is pinned, we don't need to hold a reference */
53046 + read_lock(&gr_inode_lock);
53047 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
53048 + __get_dev(real_root.dentry), role);
53049 + read_unlock(&gr_inode_lock);
53050 + }
53051 +out:
53052 + br_read_unlock(vfsmount_lock);
53053 + write_sequnlock(&rename_lock);
53054 +
53055 + BUG_ON(retval == NULL);
53056 +
53057 + return retval;
53058 +}
53059 +
53060 +static void
53061 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
53062 +{
53063 + struct task_struct *task = current;
53064 + const struct cred *cred = current_cred();
53065 +
53066 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
53067 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
53068 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
53069 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
53070 +
53071 + return;
53072 +}
53073 +
53074 +static void
53075 +gr_log_learn_sysctl(const char *path, const __u32 mode)
53076 +{
53077 + struct task_struct *task = current;
53078 + const struct cred *cred = current_cred();
53079 +
53080 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
53081 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
53082 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
53083 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
53084 +
53085 + return;
53086 +}
53087 +
53088 +static void
53089 +gr_log_learn_id_change(const char type, const unsigned int real,
53090 + const unsigned int effective, const unsigned int fs)
53091 +{
53092 + struct task_struct *task = current;
53093 + const struct cred *cred = current_cred();
53094 +
53095 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
53096 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
53097 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
53098 + type, real, effective, fs, &task->signal->saved_ip);
53099 +
53100 + return;
53101 +}
53102 +
53103 +__u32
53104 +gr_search_file(const struct dentry * dentry, const __u32 mode,
53105 + const struct vfsmount * mnt)
53106 +{
53107 + __u32 retval = mode;
53108 + struct acl_subject_label *curracl;
53109 + struct acl_object_label *currobj;
53110 +
53111 + if (unlikely(!(gr_status & GR_READY)))
53112 + return (mode & ~GR_AUDITS);
53113 +
53114 + curracl = current->acl;
53115 +
53116 + currobj = chk_obj_label(dentry, mnt, curracl);
53117 + retval = currobj->mode & mode;
53118 +
53119 + /* if we're opening a specified transfer file for writing
53120 + (e.g. /dev/initctl), then transfer our role to init
53121 + */
53122 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
53123 + current->role->roletype & GR_ROLE_PERSIST)) {
53124 + struct task_struct *task = init_pid_ns.child_reaper;
53125 +
53126 + if (task->role != current->role) {
53127 + task->acl_sp_role = 0;
53128 + task->acl_role_id = current->acl_role_id;
53129 + task->role = current->role;
53130 + rcu_read_lock();
53131 + read_lock(&grsec_exec_file_lock);
53132 + gr_apply_subject_to_task(task);
53133 + read_unlock(&grsec_exec_file_lock);
53134 + rcu_read_unlock();
53135 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
53136 + }
53137 + }
53138 +
53139 + if (unlikely
53140 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
53141 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
53142 + __u32 new_mode = mode;
53143 +
53144 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
53145 +
53146 + retval = new_mode;
53147 +
53148 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
53149 + new_mode |= GR_INHERIT;
53150 +
53151 + if (!(mode & GR_NOLEARN))
53152 + gr_log_learn(dentry, mnt, new_mode);
53153 + }
53154 +
53155 + return retval;
53156 +}
53157 +
53158 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
53159 + const struct dentry *parent,
53160 + const struct vfsmount *mnt)
53161 +{
53162 + struct name_entry *match;
53163 + struct acl_object_label *matchpo;
53164 + struct acl_subject_label *curracl;
53165 + char *path;
53166 +
53167 + if (unlikely(!(gr_status & GR_READY)))
53168 + return NULL;
53169 +
53170 + preempt_disable();
53171 + path = gr_to_filename_rbac(new_dentry, mnt);
53172 + match = lookup_name_entry_create(path);
53173 +
53174 + curracl = current->acl;
53175 +
53176 + if (match) {
53177 + read_lock(&gr_inode_lock);
53178 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
53179 + read_unlock(&gr_inode_lock);
53180 +
53181 + if (matchpo) {
53182 + preempt_enable();
53183 + return matchpo;
53184 + }
53185 + }
53186 +
53187 + // lookup parent
53188 +
53189 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
53190 +
53191 + preempt_enable();
53192 + return matchpo;
53193 +}
53194 +
53195 +__u32
53196 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
53197 + const struct vfsmount * mnt, const __u32 mode)
53198 +{
53199 + struct acl_object_label *matchpo;
53200 + __u32 retval;
53201 +
53202 + if (unlikely(!(gr_status & GR_READY)))
53203 + return (mode & ~GR_AUDITS);
53204 +
53205 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
53206 +
53207 + retval = matchpo->mode & mode;
53208 +
53209 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
53210 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
53211 + __u32 new_mode = mode;
53212 +
53213 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
53214 +
53215 + gr_log_learn(new_dentry, mnt, new_mode);
53216 + return new_mode;
53217 + }
53218 +
53219 + return retval;
53220 +}
53221 +
53222 +__u32
53223 +gr_check_link(const struct dentry * new_dentry,
53224 + const struct dentry * parent_dentry,
53225 + const struct vfsmount * parent_mnt,
53226 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
53227 +{
53228 + struct acl_object_label *obj;
53229 + __u32 oldmode, newmode;
53230 + __u32 needmode;
53231 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
53232 + GR_DELETE | GR_INHERIT;
53233 +
53234 + if (unlikely(!(gr_status & GR_READY)))
53235 + return (GR_CREATE | GR_LINK);
53236 +
53237 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
53238 + oldmode = obj->mode;
53239 +
53240 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
53241 + newmode = obj->mode;
53242 +
53243 + needmode = newmode & checkmodes;
53244 +
53245 + // old name for hardlink must have at least the permissions of the new name
53246 + if ((oldmode & needmode) != needmode)
53247 + goto bad;
53248 +
53249 + // if old name had restrictions/auditing, make sure the new name does as well
53250 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
53251 +
53252 + // don't allow hardlinking of suid/sgid files without permission
53253 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
53254 + needmode |= GR_SETID;
53255 +
53256 + if ((newmode & needmode) != needmode)
53257 + goto bad;
53258 +
53259 + // enforce minimum permissions
53260 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
53261 + return newmode;
53262 +bad:
53263 + needmode = oldmode;
53264 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
53265 + needmode |= GR_SETID;
53266 +
53267 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
53268 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
53269 + return (GR_CREATE | GR_LINK);
53270 + } else if (newmode & GR_SUPPRESS)
53271 + return GR_SUPPRESS;
53272 + else
53273 + return 0;
53274 +}
53275 +
53276 +int
53277 +gr_check_hidden_task(const struct task_struct *task)
53278 +{
53279 + if (unlikely(!(gr_status & GR_READY)))
53280 + return 0;
53281 +
53282 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
53283 + return 1;
53284 +
53285 + return 0;
53286 +}
53287 +
53288 +int
53289 +gr_check_protected_task(const struct task_struct *task)
53290 +{
53291 + if (unlikely(!(gr_status & GR_READY) || !task))
53292 + return 0;
53293 +
53294 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
53295 + task->acl != current->acl)
53296 + return 1;
53297 +
53298 + return 0;
53299 +}
53300 +
53301 +int
53302 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
53303 +{
53304 + struct task_struct *p;
53305 + int ret = 0;
53306 +
53307 + if (unlikely(!(gr_status & GR_READY) || !pid))
53308 + return ret;
53309 +
53310 + read_lock(&tasklist_lock);
53311 + do_each_pid_task(pid, type, p) {
53312 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
53313 + p->acl != current->acl) {
53314 + ret = 1;
53315 + goto out;
53316 + }
53317 + } while_each_pid_task(pid, type, p);
53318 +out:
53319 + read_unlock(&tasklist_lock);
53320 +
53321 + return ret;
53322 +}
53323 +
53324 +void
53325 +gr_copy_label(struct task_struct *tsk)
53326 +{
53327 + /* plain copying of fields is already done by dup_task_struct */
53328 + tsk->signal->used_accept = 0;
53329 + tsk->acl_sp_role = 0;
53330 + //tsk->acl_role_id = current->acl_role_id;
53331 + //tsk->acl = current->acl;
53332 + //tsk->role = current->role;
53333 + tsk->signal->curr_ip = current->signal->curr_ip;
53334 + tsk->signal->saved_ip = current->signal->saved_ip;
53335 + if (current->exec_file)
53336 + get_file(current->exec_file);
53337 + //tsk->exec_file = current->exec_file;
53338 + //tsk->is_writable = current->is_writable;
53339 + if (unlikely(current->signal->used_accept)) {
53340 + current->signal->curr_ip = 0;
53341 + current->signal->saved_ip = 0;
53342 + }
53343 +
53344 + return;
53345 +}
53346 +
53347 +static void
53348 +gr_set_proc_res(struct task_struct *task)
53349 +{
53350 + struct acl_subject_label *proc;
53351 + unsigned short i;
53352 +
53353 + proc = task->acl;
53354 +
53355 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
53356 + return;
53357 +
53358 + for (i = 0; i < RLIM_NLIMITS; i++) {
53359 + if (!(proc->resmask & (1 << i)))
53360 + continue;
53361 +
53362 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
53363 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
53364 + }
53365 +
53366 + return;
53367 +}
53368 +
53369 +extern int __gr_process_user_ban(struct user_struct *user);
53370 +
53371 +int
53372 +gr_check_user_change(int real, int effective, int fs)
53373 +{
53374 + unsigned int i;
53375 + __u16 num;
53376 + uid_t *uidlist;
53377 + int curuid;
53378 + int realok = 0;
53379 + int effectiveok = 0;
53380 + int fsok = 0;
53381 +
53382 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
53383 + struct user_struct *user;
53384 +
53385 + if (real == -1)
53386 + goto skipit;
53387 +
53388 + user = find_user(real);
53389 + if (user == NULL)
53390 + goto skipit;
53391 +
53392 + if (__gr_process_user_ban(user)) {
53393 + /* for find_user */
53394 + free_uid(user);
53395 + return 1;
53396 + }
53397 +
53398 + /* for find_user */
53399 + free_uid(user);
53400 +
53401 +skipit:
53402 +#endif
53403 +
53404 + if (unlikely(!(gr_status & GR_READY)))
53405 + return 0;
53406 +
53407 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
53408 + gr_log_learn_id_change('u', real, effective, fs);
53409 +
53410 + num = current->acl->user_trans_num;
53411 + uidlist = current->acl->user_transitions;
53412 +
53413 + if (uidlist == NULL)
53414 + return 0;
53415 +
53416 + if (real == -1)
53417 + realok = 1;
53418 + if (effective == -1)
53419 + effectiveok = 1;
53420 + if (fs == -1)
53421 + fsok = 1;
53422 +
53423 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
53424 + for (i = 0; i < num; i++) {
53425 + curuid = (int)uidlist[i];
53426 + if (real == curuid)
53427 + realok = 1;
53428 + if (effective == curuid)
53429 + effectiveok = 1;
53430 + if (fs == curuid)
53431 + fsok = 1;
53432 + }
53433 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
53434 + for (i = 0; i < num; i++) {
53435 + curuid = (int)uidlist[i];
53436 + if (real == curuid)
53437 + break;
53438 + if (effective == curuid)
53439 + break;
53440 + if (fs == curuid)
53441 + break;
53442 + }
53443 + /* not in deny list */
53444 + if (i == num) {
53445 + realok = 1;
53446 + effectiveok = 1;
53447 + fsok = 1;
53448 + }
53449 + }
53450 +
53451 + if (realok && effectiveok && fsok)
53452 + return 0;
53453 + else {
53454 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
53455 + return 1;
53456 + }
53457 +}
53458 +
53459 +int
53460 +gr_check_group_change(int real, int effective, int fs)
53461 +{
53462 + unsigned int i;
53463 + __u16 num;
53464 + gid_t *gidlist;
53465 + int curgid;
53466 + int realok = 0;
53467 + int effectiveok = 0;
53468 + int fsok = 0;
53469 +
53470 + if (unlikely(!(gr_status & GR_READY)))
53471 + return 0;
53472 +
53473 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
53474 + gr_log_learn_id_change('g', real, effective, fs);
53475 +
53476 + num = current->acl->group_trans_num;
53477 + gidlist = current->acl->group_transitions;
53478 +
53479 + if (gidlist == NULL)
53480 + return 0;
53481 +
53482 + if (real == -1)
53483 + realok = 1;
53484 + if (effective == -1)
53485 + effectiveok = 1;
53486 + if (fs == -1)
53487 + fsok = 1;
53488 +
53489 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
53490 + for (i = 0; i < num; i++) {
53491 + curgid = (int)gidlist[i];
53492 + if (real == curgid)
53493 + realok = 1;
53494 + if (effective == curgid)
53495 + effectiveok = 1;
53496 + if (fs == curgid)
53497 + fsok = 1;
53498 + }
53499 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
53500 + for (i = 0; i < num; i++) {
53501 + curgid = (int)gidlist[i];
53502 + if (real == curgid)
53503 + break;
53504 + if (effective == curgid)
53505 + break;
53506 + if (fs == curgid)
53507 + break;
53508 + }
53509 + /* not in deny list */
53510 + if (i == num) {
53511 + realok = 1;
53512 + effectiveok = 1;
53513 + fsok = 1;
53514 + }
53515 + }
53516 +
53517 + if (realok && effectiveok && fsok)
53518 + return 0;
53519 + else {
53520 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
53521 + return 1;
53522 + }
53523 +}
53524 +
53525 +extern int gr_acl_is_capable(const int cap);
53526 +
53527 +void
53528 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
53529 +{
53530 + struct acl_role_label *role = task->role;
53531 + struct acl_subject_label *subj = NULL;
53532 + struct acl_object_label *obj;
53533 + struct file *filp;
53534 +
53535 + if (unlikely(!(gr_status & GR_READY)))
53536 + return;
53537 +
53538 + filp = task->exec_file;
53539 +
53540 + /* kernel process, we'll give them the kernel role */
53541 + if (unlikely(!filp)) {
53542 + task->role = kernel_role;
53543 + task->acl = kernel_role->root_label;
53544 + return;
53545 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
53546 + role = lookup_acl_role_label(task, uid, gid);
53547 +
53548 + /* don't change the role if we're not a privileged process */
53549 + if (role && task->role != role &&
53550 + (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
53551 + ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
53552 + return;
53553 +
53554 + /* perform subject lookup in possibly new role
53555 + we can use this result below in the case where role == task->role
53556 + */
53557 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
53558 +
53559 + /* if we changed uid/gid, but result in the same role
53560 + and are using inheritance, don't lose the inherited subject
53561 + if current subject is other than what normal lookup
53562 + would result in, we arrived via inheritance, don't
53563 + lose subject
53564 + */
53565 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
53566 + (subj == task->acl)))
53567 + task->acl = subj;
53568 +
53569 + task->role = role;
53570 +
53571 + task->is_writable = 0;
53572 +
53573 + /* ignore additional mmap checks for processes that are writable
53574 + by the default ACL */
53575 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53576 + if (unlikely(obj->mode & GR_WRITE))
53577 + task->is_writable = 1;
53578 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
53579 + if (unlikely(obj->mode & GR_WRITE))
53580 + task->is_writable = 1;
53581 +
53582 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53583 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53584 +#endif
53585 +
53586 + gr_set_proc_res(task);
53587 +
53588 + return;
53589 +}
53590 +
53591 +int
53592 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
53593 + const int unsafe_flags)
53594 +{
53595 + struct task_struct *task = current;
53596 + struct acl_subject_label *newacl;
53597 + struct acl_object_label *obj;
53598 + __u32 retmode;
53599 +
53600 + if (unlikely(!(gr_status & GR_READY)))
53601 + return 0;
53602 +
53603 + newacl = chk_subj_label(dentry, mnt, task->role);
53604 +
53605 + /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
53606 + did an exec
53607 + */
53608 + rcu_read_lock();
53609 + read_lock(&tasklist_lock);
53610 + if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
53611 + (task->parent->acl->mode & GR_POVERRIDE))) {
53612 + read_unlock(&tasklist_lock);
53613 + rcu_read_unlock();
53614 + goto skip_check;
53615 + }
53616 + read_unlock(&tasklist_lock);
53617 + rcu_read_unlock();
53618 +
53619 + if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
53620 + !(task->role->roletype & GR_ROLE_GOD) &&
53621 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
53622 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
53623 + if (unsafe_flags & LSM_UNSAFE_SHARE)
53624 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
53625 + else
53626 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
53627 + return -EACCES;
53628 + }
53629 +
53630 +skip_check:
53631 +
53632 + obj = chk_obj_label(dentry, mnt, task->acl);
53633 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
53634 +
53635 + if (!(task->acl->mode & GR_INHERITLEARN) &&
53636 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
53637 + if (obj->nested)
53638 + task->acl = obj->nested;
53639 + else
53640 + task->acl = newacl;
53641 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
53642 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
53643 +
53644 + task->is_writable = 0;
53645 +
53646 + /* ignore additional mmap checks for processes that are writable
53647 + by the default ACL */
53648 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
53649 + if (unlikely(obj->mode & GR_WRITE))
53650 + task->is_writable = 1;
53651 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
53652 + if (unlikely(obj->mode & GR_WRITE))
53653 + task->is_writable = 1;
53654 +
53655 + gr_set_proc_res(task);
53656 +
53657 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53658 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53659 +#endif
53660 + return 0;
53661 +}
53662 +
53663 +/* always called with valid inodev ptr */
53664 +static void
53665 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
53666 +{
53667 + struct acl_object_label *matchpo;
53668 + struct acl_subject_label *matchps;
53669 + struct acl_subject_label *subj;
53670 + struct acl_role_label *role;
53671 + unsigned int x;
53672 +
53673 + FOR_EACH_ROLE_START(role)
53674 + FOR_EACH_SUBJECT_START(role, subj, x)
53675 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
53676 + matchpo->mode |= GR_DELETED;
53677 + FOR_EACH_SUBJECT_END(subj,x)
53678 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
53679 + if (subj->inode == ino && subj->device == dev)
53680 + subj->mode |= GR_DELETED;
53681 + FOR_EACH_NESTED_SUBJECT_END(subj)
53682 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
53683 + matchps->mode |= GR_DELETED;
53684 + FOR_EACH_ROLE_END(role)
53685 +
53686 + inodev->nentry->deleted = 1;
53687 +
53688 + return;
53689 +}
53690 +
53691 +void
53692 +gr_handle_delete(const ino_t ino, const dev_t dev)
53693 +{
53694 + struct inodev_entry *inodev;
53695 +
53696 + if (unlikely(!(gr_status & GR_READY)))
53697 + return;
53698 +
53699 + write_lock(&gr_inode_lock);
53700 + inodev = lookup_inodev_entry(ino, dev);
53701 + if (inodev != NULL)
53702 + do_handle_delete(inodev, ino, dev);
53703 + write_unlock(&gr_inode_lock);
53704 +
53705 + return;
53706 +}
53707 +
53708 +static void
53709 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
53710 + const ino_t newinode, const dev_t newdevice,
53711 + struct acl_subject_label *subj)
53712 +{
53713 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
53714 + struct acl_object_label *match;
53715 +
53716 + match = subj->obj_hash[index];
53717 +
53718 + while (match && (match->inode != oldinode ||
53719 + match->device != olddevice ||
53720 + !(match->mode & GR_DELETED)))
53721 + match = match->next;
53722 +
53723 + if (match && (match->inode == oldinode)
53724 + && (match->device == olddevice)
53725 + && (match->mode & GR_DELETED)) {
53726 + if (match->prev == NULL) {
53727 + subj->obj_hash[index] = match->next;
53728 + if (match->next != NULL)
53729 + match->next->prev = NULL;
53730 + } else {
53731 + match->prev->next = match->next;
53732 + if (match->next != NULL)
53733 + match->next->prev = match->prev;
53734 + }
53735 + match->prev = NULL;
53736 + match->next = NULL;
53737 + match->inode = newinode;
53738 + match->device = newdevice;
53739 + match->mode &= ~GR_DELETED;
53740 +
53741 + insert_acl_obj_label(match, subj);
53742 + }
53743 +
53744 + return;
53745 +}
53746 +
53747 +static void
53748 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
53749 + const ino_t newinode, const dev_t newdevice,
53750 + struct acl_role_label *role)
53751 +{
53752 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
53753 + struct acl_subject_label *match;
53754 +
53755 + match = role->subj_hash[index];
53756 +
53757 + while (match && (match->inode != oldinode ||
53758 + match->device != olddevice ||
53759 + !(match->mode & GR_DELETED)))
53760 + match = match->next;
53761 +
53762 + if (match && (match->inode == oldinode)
53763 + && (match->device == olddevice)
53764 + && (match->mode & GR_DELETED)) {
53765 + if (match->prev == NULL) {
53766 + role->subj_hash[index] = match->next;
53767 + if (match->next != NULL)
53768 + match->next->prev = NULL;
53769 + } else {
53770 + match->prev->next = match->next;
53771 + if (match->next != NULL)
53772 + match->next->prev = match->prev;
53773 + }
53774 + match->prev = NULL;
53775 + match->next = NULL;
53776 + match->inode = newinode;
53777 + match->device = newdevice;
53778 + match->mode &= ~GR_DELETED;
53779 +
53780 + insert_acl_subj_label(match, role);
53781 + }
53782 +
53783 + return;
53784 +}
53785 +
53786 +static void
53787 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
53788 + const ino_t newinode, const dev_t newdevice)
53789 +{
53790 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
53791 + struct inodev_entry *match;
53792 +
53793 + match = inodev_set.i_hash[index];
53794 +
53795 + while (match && (match->nentry->inode != oldinode ||
53796 + match->nentry->device != olddevice || !match->nentry->deleted))
53797 + match = match->next;
53798 +
53799 + if (match && (match->nentry->inode == oldinode)
53800 + && (match->nentry->device == olddevice) &&
53801 + match->nentry->deleted) {
53802 + if (match->prev == NULL) {
53803 + inodev_set.i_hash[index] = match->next;
53804 + if (match->next != NULL)
53805 + match->next->prev = NULL;
53806 + } else {
53807 + match->prev->next = match->next;
53808 + if (match->next != NULL)
53809 + match->next->prev = match->prev;
53810 + }
53811 + match->prev = NULL;
53812 + match->next = NULL;
53813 + match->nentry->inode = newinode;
53814 + match->nentry->device = newdevice;
53815 + match->nentry->deleted = 0;
53816 +
53817 + insert_inodev_entry(match);
53818 + }
53819 +
53820 + return;
53821 +}
53822 +
53823 +static void
53824 +__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
53825 +{
53826 + struct acl_subject_label *subj;
53827 + struct acl_role_label *role;
53828 + unsigned int x;
53829 +
53830 + FOR_EACH_ROLE_START(role)
53831 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
53832 +
53833 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
53834 + if ((subj->inode == ino) && (subj->device == dev)) {
53835 + subj->inode = ino;
53836 + subj->device = dev;
53837 + }
53838 + FOR_EACH_NESTED_SUBJECT_END(subj)
53839 + FOR_EACH_SUBJECT_START(role, subj, x)
53840 + update_acl_obj_label(matchn->inode, matchn->device,
53841 + ino, dev, subj);
53842 + FOR_EACH_SUBJECT_END(subj,x)
53843 + FOR_EACH_ROLE_END(role)
53844 +
53845 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
53846 +
53847 + return;
53848 +}
53849 +
53850 +static void
53851 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
53852 + const struct vfsmount *mnt)
53853 +{
53854 + ino_t ino = dentry->d_inode->i_ino;
53855 + dev_t dev = __get_dev(dentry);
53856 +
53857 + __do_handle_create(matchn, ino, dev);
53858 +
53859 + return;
53860 +}
53861 +
53862 +void
53863 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
53864 +{
53865 + struct name_entry *matchn;
53866 +
53867 + if (unlikely(!(gr_status & GR_READY)))
53868 + return;
53869 +
53870 + preempt_disable();
53871 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
53872 +
53873 + if (unlikely((unsigned long)matchn)) {
53874 + write_lock(&gr_inode_lock);
53875 + do_handle_create(matchn, dentry, mnt);
53876 + write_unlock(&gr_inode_lock);
53877 + }
53878 + preempt_enable();
53879 +
53880 + return;
53881 +}
53882 +
53883 +void
53884 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
53885 +{
53886 + struct name_entry *matchn;
53887 +
53888 + if (unlikely(!(gr_status & GR_READY)))
53889 + return;
53890 +
53891 + preempt_disable();
53892 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
53893 +
53894 + if (unlikely((unsigned long)matchn)) {
53895 + write_lock(&gr_inode_lock);
53896 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
53897 + write_unlock(&gr_inode_lock);
53898 + }
53899 + preempt_enable();
53900 +
53901 + return;
53902 +}
53903 +
53904 +void
53905 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53906 + struct dentry *old_dentry,
53907 + struct dentry *new_dentry,
53908 + struct vfsmount *mnt, const __u8 replace)
53909 +{
53910 + struct name_entry *matchn;
53911 + struct inodev_entry *inodev;
53912 + struct inode *inode = new_dentry->d_inode;
53913 + ino_t old_ino = old_dentry->d_inode->i_ino;
53914 + dev_t old_dev = __get_dev(old_dentry);
53915 +
53916 + /* vfs_rename swaps the name and parent link for old_dentry and
53917 + new_dentry
53918 + at this point, old_dentry has the new name, parent link, and inode
53919 + for the renamed file
53920 + if a file is being replaced by a rename, new_dentry has the inode
53921 + and name for the replaced file
53922 + */
53923 +
53924 + if (unlikely(!(gr_status & GR_READY)))
53925 + return;
53926 +
53927 + preempt_disable();
53928 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
53929 +
53930 + /* we wouldn't have to check d_inode if it weren't for
53931 + NFS silly-renaming
53932 + */
53933 +
53934 + write_lock(&gr_inode_lock);
53935 + if (unlikely(replace && inode)) {
53936 + ino_t new_ino = inode->i_ino;
53937 + dev_t new_dev = __get_dev(new_dentry);
53938 +
53939 + inodev = lookup_inodev_entry(new_ino, new_dev);
53940 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
53941 + do_handle_delete(inodev, new_ino, new_dev);
53942 + }
53943 +
53944 + inodev = lookup_inodev_entry(old_ino, old_dev);
53945 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
53946 + do_handle_delete(inodev, old_ino, old_dev);
53947 +
53948 + if (unlikely((unsigned long)matchn))
53949 + do_handle_create(matchn, old_dentry, mnt);
53950 +
53951 + write_unlock(&gr_inode_lock);
53952 + preempt_enable();
53953 +
53954 + return;
53955 +}
53956 +
53957 +static int
53958 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
53959 + unsigned char **sum)
53960 +{
53961 + struct acl_role_label *r;
53962 + struct role_allowed_ip *ipp;
53963 + struct role_transition *trans;
53964 + unsigned int i;
53965 + int found = 0;
53966 + u32 curr_ip = current->signal->curr_ip;
53967 +
53968 + current->signal->saved_ip = curr_ip;
53969 +
53970 + /* check transition table */
53971 +
53972 + for (trans = current->role->transitions; trans; trans = trans->next) {
53973 + if (!strcmp(rolename, trans->rolename)) {
53974 + found = 1;
53975 + break;
53976 + }
53977 + }
53978 +
53979 + if (!found)
53980 + return 0;
53981 +
53982 + /* handle special roles that do not require authentication
53983 + and check ip */
53984 +
53985 + FOR_EACH_ROLE_START(r)
53986 + if (!strcmp(rolename, r->rolename) &&
53987 + (r->roletype & GR_ROLE_SPECIAL)) {
53988 + found = 0;
53989 + if (r->allowed_ips != NULL) {
53990 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
53991 + if ((ntohl(curr_ip) & ipp->netmask) ==
53992 + (ntohl(ipp->addr) & ipp->netmask))
53993 + found = 1;
53994 + }
53995 + } else
53996 + found = 2;
53997 + if (!found)
53998 + return 0;
53999 +
54000 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
54001 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
54002 + *salt = NULL;
54003 + *sum = NULL;
54004 + return 1;
54005 + }
54006 + }
54007 + FOR_EACH_ROLE_END(r)
54008 +
54009 + for (i = 0; i < num_sprole_pws; i++) {
54010 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
54011 + *salt = acl_special_roles[i]->salt;
54012 + *sum = acl_special_roles[i]->sum;
54013 + return 1;
54014 + }
54015 + }
54016 +
54017 + return 0;
54018 +}
54019 +
54020 +static void
54021 +assign_special_role(char *rolename)
54022 +{
54023 + struct acl_object_label *obj;
54024 + struct acl_role_label *r;
54025 + struct acl_role_label *assigned = NULL;
54026 + struct task_struct *tsk;
54027 + struct file *filp;
54028 +
54029 + FOR_EACH_ROLE_START(r)
54030 + if (!strcmp(rolename, r->rolename) &&
54031 + (r->roletype & GR_ROLE_SPECIAL)) {
54032 + assigned = r;
54033 + break;
54034 + }
54035 + FOR_EACH_ROLE_END(r)
54036 +
54037 + if (!assigned)
54038 + return;
54039 +
54040 + read_lock(&tasklist_lock);
54041 + read_lock(&grsec_exec_file_lock);
54042 +
54043 + tsk = current->real_parent;
54044 + if (tsk == NULL)
54045 + goto out_unlock;
54046 +
54047 + filp = tsk->exec_file;
54048 + if (filp == NULL)
54049 + goto out_unlock;
54050 +
54051 + tsk->is_writable = 0;
54052 +
54053 + tsk->acl_sp_role = 1;
54054 + tsk->acl_role_id = ++acl_sp_role_value;
54055 + tsk->role = assigned;
54056 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
54057 +
54058 + /* ignore additional mmap checks for processes that are writable
54059 + by the default ACL */
54060 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
54061 + if (unlikely(obj->mode & GR_WRITE))
54062 + tsk->is_writable = 1;
54063 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
54064 + if (unlikely(obj->mode & GR_WRITE))
54065 + tsk->is_writable = 1;
54066 +
54067 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
54068 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
54069 +#endif
54070 +
54071 +out_unlock:
54072 + read_unlock(&grsec_exec_file_lock);
54073 + read_unlock(&tasklist_lock);
54074 + return;
54075 +}
54076 +
54077 +int gr_check_secure_terminal(struct task_struct *task)
54078 +{
54079 + struct task_struct *p, *p2, *p3;
54080 + struct files_struct *files;
54081 + struct fdtable *fdt;
54082 + struct file *our_file = NULL, *file;
54083 + int i;
54084 +
54085 + if (task->signal->tty == NULL)
54086 + return 1;
54087 +
54088 + files = get_files_struct(task);
54089 + if (files != NULL) {
54090 + rcu_read_lock();
54091 + fdt = files_fdtable(files);
54092 + for (i=0; i < fdt->max_fds; i++) {
54093 + file = fcheck_files(files, i);
54094 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
54095 + get_file(file);
54096 + our_file = file;
54097 + }
54098 + }
54099 + rcu_read_unlock();
54100 + put_files_struct(files);
54101 + }
54102 +
54103 + if (our_file == NULL)
54104 + return 1;
54105 +
54106 + read_lock(&tasklist_lock);
54107 + do_each_thread(p2, p) {
54108 + files = get_files_struct(p);
54109 + if (files == NULL ||
54110 + (p->signal && p->signal->tty == task->signal->tty)) {
54111 + if (files != NULL)
54112 + put_files_struct(files);
54113 + continue;
54114 + }
54115 + rcu_read_lock();
54116 + fdt = files_fdtable(files);
54117 + for (i=0; i < fdt->max_fds; i++) {
54118 + file = fcheck_files(files, i);
54119 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
54120 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
54121 + p3 = task;
54122 + while (p3->pid > 0) {
54123 + if (p3 == p)
54124 + break;
54125 + p3 = p3->real_parent;
54126 + }
54127 + if (p3 == p)
54128 + break;
54129 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
54130 + gr_handle_alertkill(p);
54131 + rcu_read_unlock();
54132 + put_files_struct(files);
54133 + read_unlock(&tasklist_lock);
54134 + fput(our_file);
54135 + return 0;
54136 + }
54137 + }
54138 + rcu_read_unlock();
54139 + put_files_struct(files);
54140 + } while_each_thread(p2, p);
54141 + read_unlock(&tasklist_lock);
54142 +
54143 + fput(our_file);
54144 + return 1;
54145 +}
54146 +
54147 +ssize_t
54148 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
54149 +{
54150 + struct gr_arg_wrapper uwrap;
54151 + unsigned char *sprole_salt = NULL;
54152 + unsigned char *sprole_sum = NULL;
54153 + int error = sizeof (struct gr_arg_wrapper);
54154 + int error2 = 0;
54155 +
54156 + mutex_lock(&gr_dev_mutex);
54157 +
54158 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
54159 + error = -EPERM;
54160 + goto out;
54161 + }
54162 +
54163 + if (count != sizeof (struct gr_arg_wrapper)) {
54164 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
54165 + error = -EINVAL;
54166 + goto out;
54167 + }
54168 +
54169 +
54170 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
54171 + gr_auth_expires = 0;
54172 + gr_auth_attempts = 0;
54173 + }
54174 +
54175 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
54176 + error = -EFAULT;
54177 + goto out;
54178 + }
54179 +
54180 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
54181 + error = -EINVAL;
54182 + goto out;
54183 + }
54184 +
54185 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
54186 + error = -EFAULT;
54187 + goto out;
54188 + }
54189 +
54190 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
54191 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
54192 + time_after(gr_auth_expires, get_seconds())) {
54193 + error = -EBUSY;
54194 + goto out;
54195 + }
54196 +
54197 + /* if non-root trying to do anything other than use a special role,
54198 + do not attempt authentication, do not count towards authentication
54199 + locking
54200 + */
54201 +
54202 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
54203 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
54204 + current_uid()) {
54205 + error = -EPERM;
54206 + goto out;
54207 + }
54208 +
54209 + /* ensure pw and special role name are null terminated */
54210 +
54211 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
54212 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
54213 +
54214 + /* Okay.
54215 + * We have our enough of the argument structure..(we have yet
54216 + * to copy_from_user the tables themselves) . Copy the tables
54217 + * only if we need them, i.e. for loading operations. */
54218 +
54219 + switch (gr_usermode->mode) {
54220 + case GR_STATUS:
54221 + if (gr_status & GR_READY) {
54222 + error = 1;
54223 + if (!gr_check_secure_terminal(current))
54224 + error = 3;
54225 + } else
54226 + error = 2;
54227 + goto out;
54228 + case GR_SHUTDOWN:
54229 + if ((gr_status & GR_READY)
54230 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
54231 + pax_open_kernel();
54232 + gr_status &= ~GR_READY;
54233 + pax_close_kernel();
54234 +
54235 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
54236 + free_variables();
54237 + memset(gr_usermode, 0, sizeof (struct gr_arg));
54238 + memset(gr_system_salt, 0, GR_SALT_LEN);
54239 + memset(gr_system_sum, 0, GR_SHA_LEN);
54240 + } else if (gr_status & GR_READY) {
54241 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
54242 + error = -EPERM;
54243 + } else {
54244 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
54245 + error = -EAGAIN;
54246 + }
54247 + break;
54248 + case GR_ENABLE:
54249 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
54250 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
54251 + else {
54252 + if (gr_status & GR_READY)
54253 + error = -EAGAIN;
54254 + else
54255 + error = error2;
54256 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
54257 + }
54258 + break;
54259 + case GR_RELOAD:
54260 + if (!(gr_status & GR_READY)) {
54261 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
54262 + error = -EAGAIN;
54263 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
54264 + preempt_disable();
54265 +
54266 + pax_open_kernel();
54267 + gr_status &= ~GR_READY;
54268 + pax_close_kernel();
54269 +
54270 + free_variables();
54271 + if (!(error2 = gracl_init(gr_usermode))) {
54272 + preempt_enable();
54273 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
54274 + } else {
54275 + preempt_enable();
54276 + error = error2;
54277 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
54278 + }
54279 + } else {
54280 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
54281 + error = -EPERM;
54282 + }
54283 + break;
54284 + case GR_SEGVMOD:
54285 + if (unlikely(!(gr_status & GR_READY))) {
54286 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
54287 + error = -EAGAIN;
54288 + break;
54289 + }
54290 +
54291 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
54292 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
54293 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
54294 + struct acl_subject_label *segvacl;
54295 + segvacl =
54296 + lookup_acl_subj_label(gr_usermode->segv_inode,
54297 + gr_usermode->segv_device,
54298 + current->role);
54299 + if (segvacl) {
54300 + segvacl->crashes = 0;
54301 + segvacl->expires = 0;
54302 + }
54303 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
54304 + gr_remove_uid(gr_usermode->segv_uid);
54305 + }
54306 + } else {
54307 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
54308 + error = -EPERM;
54309 + }
54310 + break;
54311 + case GR_SPROLE:
54312 + case GR_SPROLEPAM:
54313 + if (unlikely(!(gr_status & GR_READY))) {
54314 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
54315 + error = -EAGAIN;
54316 + break;
54317 + }
54318 +
54319 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
54320 + current->role->expires = 0;
54321 + current->role->auth_attempts = 0;
54322 + }
54323 +
54324 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
54325 + time_after(current->role->expires, get_seconds())) {
54326 + error = -EBUSY;
54327 + goto out;
54328 + }
54329 +
54330 + if (lookup_special_role_auth
54331 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
54332 + && ((!sprole_salt && !sprole_sum)
54333 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
54334 + char *p = "";
54335 + assign_special_role(gr_usermode->sp_role);
54336 + read_lock(&tasklist_lock);
54337 + if (current->real_parent)
54338 + p = current->real_parent->role->rolename;
54339 + read_unlock(&tasklist_lock);
54340 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
54341 + p, acl_sp_role_value);
54342 + } else {
54343 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
54344 + error = -EPERM;
54345 + if(!(current->role->auth_attempts++))
54346 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
54347 +
54348 + goto out;
54349 + }
54350 + break;
54351 + case GR_UNSPROLE:
54352 + if (unlikely(!(gr_status & GR_READY))) {
54353 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
54354 + error = -EAGAIN;
54355 + break;
54356 + }
54357 +
54358 + if (current->role->roletype & GR_ROLE_SPECIAL) {
54359 + char *p = "";
54360 + int i = 0;
54361 +
54362 + read_lock(&tasklist_lock);
54363 + if (current->real_parent) {
54364 + p = current->real_parent->role->rolename;
54365 + i = current->real_parent->acl_role_id;
54366 + }
54367 + read_unlock(&tasklist_lock);
54368 +
54369 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
54370 + gr_set_acls(1);
54371 + } else {
54372 + error = -EPERM;
54373 + goto out;
54374 + }
54375 + break;
54376 + default:
54377 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
54378 + error = -EINVAL;
54379 + break;
54380 + }
54381 +
54382 + if (error != -EPERM)
54383 + goto out;
54384 +
54385 + if(!(gr_auth_attempts++))
54386 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
54387 +
54388 + out:
54389 + mutex_unlock(&gr_dev_mutex);
54390 + return error;
54391 +}
54392 +
54393 +/* must be called with
54394 + rcu_read_lock();
54395 + read_lock(&tasklist_lock);
54396 + read_lock(&grsec_exec_file_lock);
54397 +*/
54398 +int gr_apply_subject_to_task(struct task_struct *task)
54399 +{
54400 + struct acl_object_label *obj;
54401 + char *tmpname;
54402 + struct acl_subject_label *tmpsubj;
54403 + struct file *filp;
54404 + struct name_entry *nmatch;
54405 +
54406 + filp = task->exec_file;
54407 + if (filp == NULL)
54408 + return 0;
54409 +
54410 + /* the following is to apply the correct subject
54411 + on binaries running when the RBAC system
54412 + is enabled, when the binaries have been
54413 + replaced or deleted since their execution
54414 + -----
54415 + when the RBAC system starts, the inode/dev
54416 + from exec_file will be one the RBAC system
54417 + is unaware of. It only knows the inode/dev
54418 + of the present file on disk, or the absence
54419 + of it.
54420 + */
54421 + preempt_disable();
54422 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
54423 +
54424 + nmatch = lookup_name_entry(tmpname);
54425 + preempt_enable();
54426 + tmpsubj = NULL;
54427 + if (nmatch) {
54428 + if (nmatch->deleted)
54429 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
54430 + else
54431 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
54432 + if (tmpsubj != NULL)
54433 + task->acl = tmpsubj;
54434 + }
54435 + if (tmpsubj == NULL)
54436 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
54437 + task->role);
54438 + if (task->acl) {
54439 + task->is_writable = 0;
54440 + /* ignore additional mmap checks for processes that are writable
54441 + by the default ACL */
54442 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
54443 + if (unlikely(obj->mode & GR_WRITE))
54444 + task->is_writable = 1;
54445 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
54446 + if (unlikely(obj->mode & GR_WRITE))
54447 + task->is_writable = 1;
54448 +
54449 + gr_set_proc_res(task);
54450 +
54451 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
54452 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
54453 +#endif
54454 + } else {
54455 + return 1;
54456 + }
54457 +
54458 + return 0;
54459 +}
54460 +
54461 +int
54462 +gr_set_acls(const int type)
54463 +{
54464 + struct task_struct *task, *task2;
54465 + struct acl_role_label *role = current->role;
54466 + __u16 acl_role_id = current->acl_role_id;
54467 + const struct cred *cred;
54468 + int ret;
54469 +
54470 + rcu_read_lock();
54471 + read_lock(&tasklist_lock);
54472 + read_lock(&grsec_exec_file_lock);
54473 + do_each_thread(task2, task) {
54474 + /* check to see if we're called from the exit handler,
54475 + if so, only replace ACLs that have inherited the admin
54476 + ACL */
54477 +
54478 + if (type && (task->role != role ||
54479 + task->acl_role_id != acl_role_id))
54480 + continue;
54481 +
54482 + task->acl_role_id = 0;
54483 + task->acl_sp_role = 0;
54484 +
54485 + if (task->exec_file) {
54486 + cred = __task_cred(task);
54487 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
54488 + ret = gr_apply_subject_to_task(task);
54489 + if (ret) {
54490 + read_unlock(&grsec_exec_file_lock);
54491 + read_unlock(&tasklist_lock);
54492 + rcu_read_unlock();
54493 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
54494 + return ret;
54495 + }
54496 + } else {
54497 + // it's a kernel process
54498 + task->role = kernel_role;
54499 + task->acl = kernel_role->root_label;
54500 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
54501 + task->acl->mode &= ~GR_PROCFIND;
54502 +#endif
54503 + }
54504 + } while_each_thread(task2, task);
54505 + read_unlock(&grsec_exec_file_lock);
54506 + read_unlock(&tasklist_lock);
54507 + rcu_read_unlock();
54508 +
54509 + return 0;
54510 +}
54511 +
54512 +void
54513 +gr_learn_resource(const struct task_struct *task,
54514 + const int res, const unsigned long wanted, const int gt)
54515 +{
54516 + struct acl_subject_label *acl;
54517 + const struct cred *cred;
54518 +
54519 + if (unlikely((gr_status & GR_READY) &&
54520 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
54521 + goto skip_reslog;
54522 +
54523 +#ifdef CONFIG_GRKERNSEC_RESLOG
54524 + gr_log_resource(task, res, wanted, gt);
54525 +#endif
54526 + skip_reslog:
54527 +
54528 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
54529 + return;
54530 +
54531 + acl = task->acl;
54532 +
54533 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
54534 + !(acl->resmask & (1 << (unsigned short) res))))
54535 + return;
54536 +
54537 + if (wanted >= acl->res[res].rlim_cur) {
54538 + unsigned long res_add;
54539 +
54540 + res_add = wanted;
54541 + switch (res) {
54542 + case RLIMIT_CPU:
54543 + res_add += GR_RLIM_CPU_BUMP;
54544 + break;
54545 + case RLIMIT_FSIZE:
54546 + res_add += GR_RLIM_FSIZE_BUMP;
54547 + break;
54548 + case RLIMIT_DATA:
54549 + res_add += GR_RLIM_DATA_BUMP;
54550 + break;
54551 + case RLIMIT_STACK:
54552 + res_add += GR_RLIM_STACK_BUMP;
54553 + break;
54554 + case RLIMIT_CORE:
54555 + res_add += GR_RLIM_CORE_BUMP;
54556 + break;
54557 + case RLIMIT_RSS:
54558 + res_add += GR_RLIM_RSS_BUMP;
54559 + break;
54560 + case RLIMIT_NPROC:
54561 + res_add += GR_RLIM_NPROC_BUMP;
54562 + break;
54563 + case RLIMIT_NOFILE:
54564 + res_add += GR_RLIM_NOFILE_BUMP;
54565 + break;
54566 + case RLIMIT_MEMLOCK:
54567 + res_add += GR_RLIM_MEMLOCK_BUMP;
54568 + break;
54569 + case RLIMIT_AS:
54570 + res_add += GR_RLIM_AS_BUMP;
54571 + break;
54572 + case RLIMIT_LOCKS:
54573 + res_add += GR_RLIM_LOCKS_BUMP;
54574 + break;
54575 + case RLIMIT_SIGPENDING:
54576 + res_add += GR_RLIM_SIGPENDING_BUMP;
54577 + break;
54578 + case RLIMIT_MSGQUEUE:
54579 + res_add += GR_RLIM_MSGQUEUE_BUMP;
54580 + break;
54581 + case RLIMIT_NICE:
54582 + res_add += GR_RLIM_NICE_BUMP;
54583 + break;
54584 + case RLIMIT_RTPRIO:
54585 + res_add += GR_RLIM_RTPRIO_BUMP;
54586 + break;
54587 + case RLIMIT_RTTIME:
54588 + res_add += GR_RLIM_RTTIME_BUMP;
54589 + break;
54590 + }
54591 +
54592 + acl->res[res].rlim_cur = res_add;
54593 +
54594 + if (wanted > acl->res[res].rlim_max)
54595 + acl->res[res].rlim_max = res_add;
54596 +
54597 + /* only log the subject filename, since resource logging is supported for
54598 + single-subject learning only */
54599 + rcu_read_lock();
54600 + cred = __task_cred(task);
54601 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
54602 + task->role->roletype, cred->uid, cred->gid, acl->filename,
54603 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
54604 + "", (unsigned long) res, &task->signal->saved_ip);
54605 + rcu_read_unlock();
54606 + }
54607 +
54608 + return;
54609 +}
54610 +
54611 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
54612 +void
54613 +pax_set_initial_flags(struct linux_binprm *bprm)
54614 +{
54615 + struct task_struct *task = current;
54616 + struct acl_subject_label *proc;
54617 + unsigned long flags;
54618 +
54619 + if (unlikely(!(gr_status & GR_READY)))
54620 + return;
54621 +
54622 + flags = pax_get_flags(task);
54623 +
54624 + proc = task->acl;
54625 +
54626 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
54627 + flags &= ~MF_PAX_PAGEEXEC;
54628 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
54629 + flags &= ~MF_PAX_SEGMEXEC;
54630 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
54631 + flags &= ~MF_PAX_RANDMMAP;
54632 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
54633 + flags &= ~MF_PAX_EMUTRAMP;
54634 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
54635 + flags &= ~MF_PAX_MPROTECT;
54636 +
54637 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
54638 + flags |= MF_PAX_PAGEEXEC;
54639 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
54640 + flags |= MF_PAX_SEGMEXEC;
54641 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
54642 + flags |= MF_PAX_RANDMMAP;
54643 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
54644 + flags |= MF_PAX_EMUTRAMP;
54645 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
54646 + flags |= MF_PAX_MPROTECT;
54647 +
54648 + pax_set_flags(task, flags);
54649 +
54650 + return;
54651 +}
54652 +#endif
54653 +
54654 +#ifdef CONFIG_SYSCTL
54655 +/* Eric Biederman likes breaking userland ABI and every inode-based security
54656 + system to save 35kb of memory */
54657 +
54658 +/* we modify the passed in filename, but adjust it back before returning */
54659 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
54660 +{
54661 + struct name_entry *nmatch;
54662 + char *p, *lastp = NULL;
54663 + struct acl_object_label *obj = NULL, *tmp;
54664 + struct acl_subject_label *tmpsubj;
54665 + char c = '\0';
54666 +
54667 + read_lock(&gr_inode_lock);
54668 +
54669 + p = name + len - 1;
54670 + do {
54671 + nmatch = lookup_name_entry(name);
54672 + if (lastp != NULL)
54673 + *lastp = c;
54674 +
54675 + if (nmatch == NULL)
54676 + goto next_component;
54677 + tmpsubj = current->acl;
54678 + do {
54679 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
54680 + if (obj != NULL) {
54681 + tmp = obj->globbed;
54682 + while (tmp) {
54683 + if (!glob_match(tmp->filename, name)) {
54684 + obj = tmp;
54685 + goto found_obj;
54686 + }
54687 + tmp = tmp->next;
54688 + }
54689 + goto found_obj;
54690 + }
54691 + } while ((tmpsubj = tmpsubj->parent_subject));
54692 +next_component:
54693 + /* end case */
54694 + if (p == name)
54695 + break;
54696 +
54697 + while (*p != '/')
54698 + p--;
54699 + if (p == name)
54700 + lastp = p + 1;
54701 + else {
54702 + lastp = p;
54703 + p--;
54704 + }
54705 + c = *lastp;
54706 + *lastp = '\0';
54707 + } while (1);
54708 +found_obj:
54709 + read_unlock(&gr_inode_lock);
54710 + /* obj returned will always be non-null */
54711 + return obj;
54712 +}
54713 +
54714 +/* returns 0 when allowing, non-zero on error
54715 + op of 0 is used for readdir, so we don't log the names of hidden files
54716 +*/
54717 +__u32
54718 +gr_handle_sysctl(const struct ctl_table *table, const int op)
54719 +{
54720 + struct ctl_table *tmp;
54721 + const char *proc_sys = "/proc/sys";
54722 + char *path;
54723 + struct acl_object_label *obj;
54724 + unsigned short len = 0, pos = 0, depth = 0, i;
54725 + __u32 err = 0;
54726 + __u32 mode = 0;
54727 +
54728 + if (unlikely(!(gr_status & GR_READY)))
54729 + return 0;
54730 +
54731 + /* for now, ignore operations on non-sysctl entries if it's not a
54732 + readdir*/
54733 + if (table->child != NULL && op != 0)
54734 + return 0;
54735 +
54736 + mode |= GR_FIND;
54737 + /* it's only a read if it's an entry, read on dirs is for readdir */
54738 + if (op & MAY_READ)
54739 + mode |= GR_READ;
54740 + if (op & MAY_WRITE)
54741 + mode |= GR_WRITE;
54742 +
54743 + preempt_disable();
54744 +
54745 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
54746 +
54747 + /* it's only a read/write if it's an actual entry, not a dir
54748 + (which are opened for readdir)
54749 + */
54750 +
54751 + /* convert the requested sysctl entry into a pathname */
54752 +
54753 + for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
54754 + len += strlen(tmp->procname);
54755 + len++;
54756 + depth++;
54757 + }
54758 +
54759 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
54760 + /* deny */
54761 + goto out;
54762 + }
54763 +
54764 + memset(path, 0, PAGE_SIZE);
54765 +
54766 + memcpy(path, proc_sys, strlen(proc_sys));
54767 +
54768 + pos += strlen(proc_sys);
54769 +
54770 + for (; depth > 0; depth--) {
54771 + path[pos] = '/';
54772 + pos++;
54773 + for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
54774 + if (depth == i) {
54775 + memcpy(path + pos, tmp->procname,
54776 + strlen(tmp->procname));
54777 + pos += strlen(tmp->procname);
54778 + }
54779 + i++;
54780 + }
54781 + }
54782 +
54783 + obj = gr_lookup_by_name(path, pos);
54784 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
54785 +
54786 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
54787 + ((err & mode) != mode))) {
54788 + __u32 new_mode = mode;
54789 +
54790 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
54791 +
54792 + err = 0;
54793 + gr_log_learn_sysctl(path, new_mode);
54794 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
54795 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
54796 + err = -ENOENT;
54797 + } else if (!(err & GR_FIND)) {
54798 + err = -ENOENT;
54799 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
54800 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
54801 + path, (mode & GR_READ) ? " reading" : "",
54802 + (mode & GR_WRITE) ? " writing" : "");
54803 + err = -EACCES;
54804 + } else if ((err & mode) != mode) {
54805 + err = -EACCES;
54806 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
54807 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
54808 + path, (mode & GR_READ) ? " reading" : "",
54809 + (mode & GR_WRITE) ? " writing" : "");
54810 + err = 0;
54811 + } else
54812 + err = 0;
54813 +
54814 + out:
54815 + preempt_enable();
54816 +
54817 + return err;
54818 +}
54819 +#endif
54820 +
54821 +int
54822 +gr_handle_proc_ptrace(struct task_struct *task)
54823 +{
54824 + struct file *filp;
54825 + struct task_struct *tmp = task;
54826 + struct task_struct *curtemp = current;
54827 + __u32 retmode;
54828 +
54829 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
54830 + if (unlikely(!(gr_status & GR_READY)))
54831 + return 0;
54832 +#endif
54833 +
54834 + read_lock(&tasklist_lock);
54835 + read_lock(&grsec_exec_file_lock);
54836 + filp = task->exec_file;
54837 +
54838 + while (tmp->pid > 0) {
54839 + if (tmp == curtemp)
54840 + break;
54841 + tmp = tmp->real_parent;
54842 + }
54843 +
54844 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
54845 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
54846 + read_unlock(&grsec_exec_file_lock);
54847 + read_unlock(&tasklist_lock);
54848 + return 1;
54849 + }
54850 +
54851 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
54852 + if (!(gr_status & GR_READY)) {
54853 + read_unlock(&grsec_exec_file_lock);
54854 + read_unlock(&tasklist_lock);
54855 + return 0;
54856 + }
54857 +#endif
54858 +
54859 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
54860 + read_unlock(&grsec_exec_file_lock);
54861 + read_unlock(&tasklist_lock);
54862 +
54863 + if (retmode & GR_NOPTRACE)
54864 + return 1;
54865 +
54866 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
54867 + && (current->acl != task->acl || (current->acl != current->role->root_label
54868 + && current->pid != task->pid)))
54869 + return 1;
54870 +
54871 + return 0;
54872 +}
54873 +
54874 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
54875 +{
54876 + if (unlikely(!(gr_status & GR_READY)))
54877 + return;
54878 +
54879 + if (!(current->role->roletype & GR_ROLE_GOD))
54880 + return;
54881 +
54882 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
54883 + p->role->rolename, gr_task_roletype_to_char(p),
54884 + p->acl->filename);
54885 +}
54886 +
54887 +int
54888 +gr_handle_ptrace(struct task_struct *task, const long request)
54889 +{
54890 + struct task_struct *tmp = task;
54891 + struct task_struct *curtemp = current;
54892 + __u32 retmode;
54893 +
54894 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
54895 + if (unlikely(!(gr_status & GR_READY)))
54896 + return 0;
54897 +#endif
54898 + if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
54899 + read_lock(&tasklist_lock);
54900 + while (tmp->pid > 0) {
54901 + if (tmp == curtemp)
54902 + break;
54903 + tmp = tmp->real_parent;
54904 + }
54905 +
54906 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
54907 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
54908 + read_unlock(&tasklist_lock);
54909 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54910 + return 1;
54911 + }
54912 + read_unlock(&tasklist_lock);
54913 + }
54914 +
54915 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
54916 + if (!(gr_status & GR_READY))
54917 + return 0;
54918 +#endif
54919 +
54920 + read_lock(&grsec_exec_file_lock);
54921 + if (unlikely(!task->exec_file)) {
54922 + read_unlock(&grsec_exec_file_lock);
54923 + return 0;
54924 + }
54925 +
54926 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
54927 + read_unlock(&grsec_exec_file_lock);
54928 +
54929 + if (retmode & GR_NOPTRACE) {
54930 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54931 + return 1;
54932 + }
54933 +
54934 + if (retmode & GR_PTRACERD) {
54935 + switch (request) {
54936 + case PTRACE_SEIZE:
54937 + case PTRACE_POKETEXT:
54938 + case PTRACE_POKEDATA:
54939 + case PTRACE_POKEUSR:
54940 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
54941 + case PTRACE_SETREGS:
54942 + case PTRACE_SETFPREGS:
54943 +#endif
54944 +#ifdef CONFIG_X86
54945 + case PTRACE_SETFPXREGS:
54946 +#endif
54947 +#ifdef CONFIG_ALTIVEC
54948 + case PTRACE_SETVRREGS:
54949 +#endif
54950 + return 1;
54951 + default:
54952 + return 0;
54953 + }
54954 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
54955 + !(current->role->roletype & GR_ROLE_GOD) &&
54956 + (current->acl != task->acl)) {
54957 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54958 + return 1;
54959 + }
54960 +
54961 + return 0;
54962 +}
54963 +
54964 +static int is_writable_mmap(const struct file *filp)
54965 +{
54966 + struct task_struct *task = current;
54967 + struct acl_object_label *obj, *obj2;
54968 +
54969 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
54970 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
54971 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
54972 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
54973 + task->role->root_label);
54974 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
54975 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
54976 + return 1;
54977 + }
54978 + }
54979 + return 0;
54980 +}
54981 +
54982 +int
54983 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
54984 +{
54985 + __u32 mode;
54986 +
54987 + if (unlikely(!file || !(prot & PROT_EXEC)))
54988 + return 1;
54989 +
54990 + if (is_writable_mmap(file))
54991 + return 0;
54992 +
54993 + mode =
54994 + gr_search_file(file->f_path.dentry,
54995 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
54996 + file->f_path.mnt);
54997 +
54998 + if (!gr_tpe_allow(file))
54999 + return 0;
55000 +
55001 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
55002 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
55003 + return 0;
55004 + } else if (unlikely(!(mode & GR_EXEC))) {
55005 + return 0;
55006 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
55007 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
55008 + return 1;
55009 + }
55010 +
55011 + return 1;
55012 +}
55013 +
55014 +int
55015 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
55016 +{
55017 + __u32 mode;
55018 +
55019 + if (unlikely(!file || !(prot & PROT_EXEC)))
55020 + return 1;
55021 +
55022 + if (is_writable_mmap(file))
55023 + return 0;
55024 +
55025 + mode =
55026 + gr_search_file(file->f_path.dentry,
55027 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
55028 + file->f_path.mnt);
55029 +
55030 + if (!gr_tpe_allow(file))
55031 + return 0;
55032 +
55033 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
55034 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
55035 + return 0;
55036 + } else if (unlikely(!(mode & GR_EXEC))) {
55037 + return 0;
55038 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
55039 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
55040 + return 1;
55041 + }
55042 +
55043 + return 1;
55044 +}
55045 +
55046 +void
55047 +gr_acl_handle_psacct(struct task_struct *task, const long code)
55048 +{
55049 + unsigned long runtime;
55050 + unsigned long cputime;
55051 + unsigned int wday, cday;
55052 + __u8 whr, chr;
55053 + __u8 wmin, cmin;
55054 + __u8 wsec, csec;
55055 + struct timespec timeval;
55056 +
55057 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
55058 + !(task->acl->mode & GR_PROCACCT)))
55059 + return;
55060 +
55061 + do_posix_clock_monotonic_gettime(&timeval);
55062 + runtime = timeval.tv_sec - task->start_time.tv_sec;
55063 + wday = runtime / (3600 * 24);
55064 + runtime -= wday * (3600 * 24);
55065 + whr = runtime / 3600;
55066 + runtime -= whr * 3600;
55067 + wmin = runtime / 60;
55068 + runtime -= wmin * 60;
55069 + wsec = runtime;
55070 +
55071 + cputime = (task->utime + task->stime) / HZ;
55072 + cday = cputime / (3600 * 24);
55073 + cputime -= cday * (3600 * 24);
55074 + chr = cputime / 3600;
55075 + cputime -= chr * 3600;
55076 + cmin = cputime / 60;
55077 + cputime -= cmin * 60;
55078 + csec = cputime;
55079 +
55080 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
55081 +
55082 + return;
55083 +}
55084 +
55085 +void gr_set_kernel_label(struct task_struct *task)
55086 +{
55087 + if (gr_status & GR_READY) {
55088 + task->role = kernel_role;
55089 + task->acl = kernel_role->root_label;
55090 + }
55091 + return;
55092 +}
55093 +
55094 +#ifdef CONFIG_TASKSTATS
55095 +int gr_is_taskstats_denied(int pid)
55096 +{
55097 + struct task_struct *task;
55098 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55099 + const struct cred *cred;
55100 +#endif
55101 + int ret = 0;
55102 +
55103 + /* restrict taskstats viewing to un-chrooted root users
55104 + who have the 'view' subject flag if the RBAC system is enabled
55105 + */
55106 +
55107 + rcu_read_lock();
55108 + read_lock(&tasklist_lock);
55109 + task = find_task_by_vpid(pid);
55110 + if (task) {
55111 +#ifdef CONFIG_GRKERNSEC_CHROOT
55112 + if (proc_is_chrooted(task))
55113 + ret = -EACCES;
55114 +#endif
55115 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55116 + cred = __task_cred(task);
55117 +#ifdef CONFIG_GRKERNSEC_PROC_USER
55118 + if (cred->uid != 0)
55119 + ret = -EACCES;
55120 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55121 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
55122 + ret = -EACCES;
55123 +#endif
55124 +#endif
55125 + if (gr_status & GR_READY) {
55126 + if (!(task->acl->mode & GR_VIEW))
55127 + ret = -EACCES;
55128 + }
55129 + } else
55130 + ret = -ENOENT;
55131 +
55132 + read_unlock(&tasklist_lock);
55133 + rcu_read_unlock();
55134 +
55135 + return ret;
55136 +}
55137 +#endif
55138 +
55139 +/* AUXV entries are filled via a descendant of search_binary_handler
55140 + after we've already applied the subject for the target
55141 +*/
55142 +int gr_acl_enable_at_secure(void)
55143 +{
55144 + if (unlikely(!(gr_status & GR_READY)))
55145 + return 0;
55146 +
55147 + if (current->acl->mode & GR_ATSECURE)
55148 + return 1;
55149 +
55150 + return 0;
55151 +}
55152 +
55153 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
55154 +{
55155 + struct task_struct *task = current;
55156 + struct dentry *dentry = file->f_path.dentry;
55157 + struct vfsmount *mnt = file->f_path.mnt;
55158 + struct acl_object_label *obj, *tmp;
55159 + struct acl_subject_label *subj;
55160 + unsigned int bufsize;
55161 + int is_not_root;
55162 + char *path;
55163 + dev_t dev = __get_dev(dentry);
55164 +
55165 + if (unlikely(!(gr_status & GR_READY)))
55166 + return 1;
55167 +
55168 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
55169 + return 1;
55170 +
55171 + /* ignore Eric Biederman */
55172 + if (IS_PRIVATE(dentry->d_inode))
55173 + return 1;
55174 +
55175 + subj = task->acl;
55176 + do {
55177 + obj = lookup_acl_obj_label(ino, dev, subj);
55178 + if (obj != NULL)
55179 + return (obj->mode & GR_FIND) ? 1 : 0;
55180 + } while ((subj = subj->parent_subject));
55181 +
55182 + /* this is purely an optimization since we're looking for an object
55183 + for the directory we're doing a readdir on
55184 + if it's possible for any globbed object to match the entry we're
55185 + filling into the directory, then the object we find here will be
55186 + an anchor point with attached globbed objects
55187 + */
55188 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
55189 + if (obj->globbed == NULL)
55190 + return (obj->mode & GR_FIND) ? 1 : 0;
55191 +
55192 + is_not_root = ((obj->filename[0] == '/') &&
55193 + (obj->filename[1] == '\0')) ? 0 : 1;
55194 + bufsize = PAGE_SIZE - namelen - is_not_root;
55195 +
55196 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
55197 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
55198 + return 1;
55199 +
55200 + preempt_disable();
55201 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
55202 + bufsize);
55203 +
55204 + bufsize = strlen(path);
55205 +
55206 + /* if base is "/", don't append an additional slash */
55207 + if (is_not_root)
55208 + *(path + bufsize) = '/';
55209 + memcpy(path + bufsize + is_not_root, name, namelen);
55210 + *(path + bufsize + namelen + is_not_root) = '\0';
55211 +
55212 + tmp = obj->globbed;
55213 + while (tmp) {
55214 + if (!glob_match(tmp->filename, path)) {
55215 + preempt_enable();
55216 + return (tmp->mode & GR_FIND) ? 1 : 0;
55217 + }
55218 + tmp = tmp->next;
55219 + }
55220 + preempt_enable();
55221 + return (obj->mode & GR_FIND) ? 1 : 0;
55222 +}
55223 +
55224 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
55225 +EXPORT_SYMBOL(gr_acl_is_enabled);
55226 +#endif
55227 +EXPORT_SYMBOL(gr_learn_resource);
55228 +EXPORT_SYMBOL(gr_set_kernel_label);
55229 +#ifdef CONFIG_SECURITY
55230 +EXPORT_SYMBOL(gr_check_user_change);
55231 +EXPORT_SYMBOL(gr_check_group_change);
55232 +#endif
55233 +
55234 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
55235 new file mode 100644
55236 index 0000000..34fefda
55237 --- /dev/null
55238 +++ b/grsecurity/gracl_alloc.c
55239 @@ -0,0 +1,105 @@
55240 +#include <linux/kernel.h>
55241 +#include <linux/mm.h>
55242 +#include <linux/slab.h>
55243 +#include <linux/vmalloc.h>
55244 +#include <linux/gracl.h>
55245 +#include <linux/grsecurity.h>
55246 +
55247 +static unsigned long alloc_stack_next = 1;
55248 +static unsigned long alloc_stack_size = 1;
55249 +static void **alloc_stack;
55250 +
55251 +static __inline__ int
55252 +alloc_pop(void)
55253 +{
55254 + if (alloc_stack_next == 1)
55255 + return 0;
55256 +
55257 + kfree(alloc_stack[alloc_stack_next - 2]);
55258 +
55259 + alloc_stack_next--;
55260 +
55261 + return 1;
55262 +}
55263 +
55264 +static __inline__ int
55265 +alloc_push(void *buf)
55266 +{
55267 + if (alloc_stack_next >= alloc_stack_size)
55268 + return 1;
55269 +
55270 + alloc_stack[alloc_stack_next - 1] = buf;
55271 +
55272 + alloc_stack_next++;
55273 +
55274 + return 0;
55275 +}
55276 +
55277 +void *
55278 +acl_alloc(unsigned long len)
55279 +{
55280 + void *ret = NULL;
55281 +
55282 + if (!len || len > PAGE_SIZE)
55283 + goto out;
55284 +
55285 + ret = kmalloc(len, GFP_KERNEL);
55286 +
55287 + if (ret) {
55288 + if (alloc_push(ret)) {
55289 + kfree(ret);
55290 + ret = NULL;
55291 + }
55292 + }
55293 +
55294 +out:
55295 + return ret;
55296 +}
55297 +
55298 +void *
55299 +acl_alloc_num(unsigned long num, unsigned long len)
55300 +{
55301 + if (!len || (num > (PAGE_SIZE / len)))
55302 + return NULL;
55303 +
55304 + return acl_alloc(num * len);
55305 +}
55306 +
55307 +void
55308 +acl_free_all(void)
55309 +{
55310 + if (gr_acl_is_enabled() || !alloc_stack)
55311 + return;
55312 +
55313 + while (alloc_pop()) ;
55314 +
55315 + if (alloc_stack) {
55316 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
55317 + kfree(alloc_stack);
55318 + else
55319 + vfree(alloc_stack);
55320 + }
55321 +
55322 + alloc_stack = NULL;
55323 + alloc_stack_size = 1;
55324 + alloc_stack_next = 1;
55325 +
55326 + return;
55327 +}
55328 +
55329 +int
55330 +acl_alloc_stack_init(unsigned long size)
55331 +{
55332 + if ((size * sizeof (void *)) <= PAGE_SIZE)
55333 + alloc_stack =
55334 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
55335 + else
55336 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
55337 +
55338 + alloc_stack_size = size;
55339 +
55340 + if (!alloc_stack)
55341 + return 0;
55342 + else
55343 + return 1;
55344 +}
55345 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
55346 new file mode 100644
55347 index 0000000..6d21049
55348 --- /dev/null
55349 +++ b/grsecurity/gracl_cap.c
55350 @@ -0,0 +1,110 @@
55351 +#include <linux/kernel.h>
55352 +#include <linux/module.h>
55353 +#include <linux/sched.h>
55354 +#include <linux/gracl.h>
55355 +#include <linux/grsecurity.h>
55356 +#include <linux/grinternal.h>
55357 +
55358 +extern const char *captab_log[];
55359 +extern int captab_log_entries;
55360 +
55361 +int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
55362 +{
55363 + struct acl_subject_label *curracl;
55364 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
55365 + kernel_cap_t cap_audit = __cap_empty_set;
55366 +
55367 + if (!gr_acl_is_enabled())
55368 + return 1;
55369 +
55370 + curracl = task->acl;
55371 +
55372 + cap_drop = curracl->cap_lower;
55373 + cap_mask = curracl->cap_mask;
55374 + cap_audit = curracl->cap_invert_audit;
55375 +
55376 + while ((curracl = curracl->parent_subject)) {
55377 + /* if the cap isn't specified in the current computed mask but is specified in the
55378 + current level subject, and is lowered in the current level subject, then add
55379 + it to the set of dropped capabilities
55380 + otherwise, add the current level subject's mask to the current computed mask
55381 + */
55382 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
55383 + cap_raise(cap_mask, cap);
55384 + if (cap_raised(curracl->cap_lower, cap))
55385 + cap_raise(cap_drop, cap);
55386 + if (cap_raised(curracl->cap_invert_audit, cap))
55387 + cap_raise(cap_audit, cap);
55388 + }
55389 + }
55390 +
55391 + if (!cap_raised(cap_drop, cap)) {
55392 + if (cap_raised(cap_audit, cap))
55393 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
55394 + return 1;
55395 + }
55396 +
55397 + curracl = task->acl;
55398 +
55399 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
55400 + && cap_raised(cred->cap_effective, cap)) {
55401 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
55402 + task->role->roletype, cred->uid,
55403 + cred->gid, task->exec_file ?
55404 + gr_to_filename(task->exec_file->f_path.dentry,
55405 + task->exec_file->f_path.mnt) : curracl->filename,
55406 + curracl->filename, 0UL,
55407 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
55408 + return 1;
55409 + }
55410 +
55411 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
55412 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
55413 +
55414 + return 0;
55415 +}
55416 +
55417 +int
55418 +gr_acl_is_capable(const int cap)
55419 +{
55420 + return gr_task_acl_is_capable(current, current_cred(), cap);
55421 +}
55422 +
55423 +int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
55424 +{
55425 + struct acl_subject_label *curracl;
55426 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
55427 +
55428 + if (!gr_acl_is_enabled())
55429 + return 1;
55430 +
55431 + curracl = task->acl;
55432 +
55433 + cap_drop = curracl->cap_lower;
55434 + cap_mask = curracl->cap_mask;
55435 +
55436 + while ((curracl = curracl->parent_subject)) {
55437 + /* if the cap isn't specified in the current computed mask but is specified in the
55438 + current level subject, and is lowered in the current level subject, then add
55439 + it to the set of dropped capabilities
55440 + otherwise, add the current level subject's mask to the current computed mask
55441 + */
55442 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
55443 + cap_raise(cap_mask, cap);
55444 + if (cap_raised(curracl->cap_lower, cap))
55445 + cap_raise(cap_drop, cap);
55446 + }
55447 + }
55448 +
55449 + if (!cap_raised(cap_drop, cap))
55450 + return 1;
55451 +
55452 + return 0;
55453 +}
55454 +
55455 +int
55456 +gr_acl_is_capable_nolog(const int cap)
55457 +{
55458 + return gr_task_acl_is_capable_nolog(current, cap);
55459 +}
55460 +
55461 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
55462 new file mode 100644
55463 index 0000000..88d0e87
55464 --- /dev/null
55465 +++ b/grsecurity/gracl_fs.c
55466 @@ -0,0 +1,435 @@
55467 +#include <linux/kernel.h>
55468 +#include <linux/sched.h>
55469 +#include <linux/types.h>
55470 +#include <linux/fs.h>
55471 +#include <linux/file.h>
55472 +#include <linux/stat.h>
55473 +#include <linux/grsecurity.h>
55474 +#include <linux/grinternal.h>
55475 +#include <linux/gracl.h>
55476 +
55477 +umode_t
55478 +gr_acl_umask(void)
55479 +{
55480 + if (unlikely(!gr_acl_is_enabled()))
55481 + return 0;
55482 +
55483 + return current->role->umask;
55484 +}
55485 +
55486 +__u32
55487 +gr_acl_handle_hidden_file(const struct dentry * dentry,
55488 + const struct vfsmount * mnt)
55489 +{
55490 + __u32 mode;
55491 +
55492 + if (unlikely(!dentry->d_inode))
55493 + return GR_FIND;
55494 +
55495 + mode =
55496 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
55497 +
55498 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
55499 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
55500 + return mode;
55501 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
55502 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
55503 + return 0;
55504 + } else if (unlikely(!(mode & GR_FIND)))
55505 + return 0;
55506 +
55507 + return GR_FIND;
55508 +}
55509 +
55510 +__u32
55511 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
55512 + int acc_mode)
55513 +{
55514 + __u32 reqmode = GR_FIND;
55515 + __u32 mode;
55516 +
55517 + if (unlikely(!dentry->d_inode))
55518 + return reqmode;
55519 +
55520 + if (acc_mode & MAY_APPEND)
55521 + reqmode |= GR_APPEND;
55522 + else if (acc_mode & MAY_WRITE)
55523 + reqmode |= GR_WRITE;
55524 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
55525 + reqmode |= GR_READ;
55526 +
55527 + mode =
55528 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
55529 + mnt);
55530 +
55531 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55532 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
55533 + reqmode & GR_READ ? " reading" : "",
55534 + reqmode & GR_WRITE ? " writing" : reqmode &
55535 + GR_APPEND ? " appending" : "");
55536 + return reqmode;
55537 + } else
55538 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55539 + {
55540 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
55541 + reqmode & GR_READ ? " reading" : "",
55542 + reqmode & GR_WRITE ? " writing" : reqmode &
55543 + GR_APPEND ? " appending" : "");
55544 + return 0;
55545 + } else if (unlikely((mode & reqmode) != reqmode))
55546 + return 0;
55547 +
55548 + return reqmode;
55549 +}
55550 +
55551 +__u32
55552 +gr_acl_handle_creat(const struct dentry * dentry,
55553 + const struct dentry * p_dentry,
55554 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
55555 + const int imode)
55556 +{
55557 + __u32 reqmode = GR_WRITE | GR_CREATE;
55558 + __u32 mode;
55559 +
55560 + if (acc_mode & MAY_APPEND)
55561 + reqmode |= GR_APPEND;
55562 + // if a directory was required or the directory already exists, then
55563 + // don't count this open as a read
55564 + if ((acc_mode & MAY_READ) &&
55565 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
55566 + reqmode |= GR_READ;
55567 + if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
55568 + reqmode |= GR_SETID;
55569 +
55570 + mode =
55571 + gr_check_create(dentry, p_dentry, p_mnt,
55572 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
55573 +
55574 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55575 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
55576 + reqmode & GR_READ ? " reading" : "",
55577 + reqmode & GR_WRITE ? " writing" : reqmode &
55578 + GR_APPEND ? " appending" : "");
55579 + return reqmode;
55580 + } else
55581 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55582 + {
55583 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
55584 + reqmode & GR_READ ? " reading" : "",
55585 + reqmode & GR_WRITE ? " writing" : reqmode &
55586 + GR_APPEND ? " appending" : "");
55587 + return 0;
55588 + } else if (unlikely((mode & reqmode) != reqmode))
55589 + return 0;
55590 +
55591 + return reqmode;
55592 +}
55593 +
55594 +__u32
55595 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
55596 + const int fmode)
55597 +{
55598 + __u32 mode, reqmode = GR_FIND;
55599 +
55600 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
55601 + reqmode |= GR_EXEC;
55602 + if (fmode & S_IWOTH)
55603 + reqmode |= GR_WRITE;
55604 + if (fmode & S_IROTH)
55605 + reqmode |= GR_READ;
55606 +
55607 + mode =
55608 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
55609 + mnt);
55610 +
55611 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55612 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
55613 + reqmode & GR_READ ? " reading" : "",
55614 + reqmode & GR_WRITE ? " writing" : "",
55615 + reqmode & GR_EXEC ? " executing" : "");
55616 + return reqmode;
55617 + } else
55618 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55619 + {
55620 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
55621 + reqmode & GR_READ ? " reading" : "",
55622 + reqmode & GR_WRITE ? " writing" : "",
55623 + reqmode & GR_EXEC ? " executing" : "");
55624 + return 0;
55625 + } else if (unlikely((mode & reqmode) != reqmode))
55626 + return 0;
55627 +
55628 + return reqmode;
55629 +}
55630 +
55631 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
55632 +{
55633 + __u32 mode;
55634 +
55635 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
55636 +
55637 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
55638 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
55639 + return mode;
55640 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
55641 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
55642 + return 0;
55643 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
55644 + return 0;
55645 +
55646 + return (reqmode);
55647 +}
55648 +
55649 +__u32
55650 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
55651 +{
55652 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
55653 +}
55654 +
55655 +__u32
55656 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
55657 +{
55658 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
55659 +}
55660 +
55661 +__u32
55662 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
55663 +{
55664 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
55665 +}
55666 +
55667 +__u32
55668 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
55669 +{
55670 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
55671 +}
55672 +
55673 +__u32
55674 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
55675 + umode_t *modeptr)
55676 +{
55677 + umode_t mode;
55678 +
55679 + *modeptr &= ~gr_acl_umask();
55680 + mode = *modeptr;
55681 +
55682 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
55683 + return 1;
55684 +
55685 + if (unlikely(mode & (S_ISUID | S_ISGID))) {
55686 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
55687 + GR_CHMOD_ACL_MSG);
55688 + } else {
55689 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
55690 + }
55691 +}
55692 +
55693 +__u32
55694 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
55695 +{
55696 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
55697 +}
55698 +
55699 +__u32
55700 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
55701 +{
55702 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
55703 +}
55704 +
55705 +__u32
55706 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
55707 +{
55708 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
55709 +}
55710 +
55711 +__u32
55712 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
55713 +{
55714 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
55715 + GR_UNIXCONNECT_ACL_MSG);
55716 +}
55717 +
55718 +/* hardlinks require at minimum create and link permission,
55719 + any additional privilege required is based on the
55720 + privilege of the file being linked to
55721 +*/
55722 +__u32
55723 +gr_acl_handle_link(const struct dentry * new_dentry,
55724 + const struct dentry * parent_dentry,
55725 + const struct vfsmount * parent_mnt,
55726 + const struct dentry * old_dentry,
55727 + const struct vfsmount * old_mnt, const char *to)
55728 +{
55729 + __u32 mode;
55730 + __u32 needmode = GR_CREATE | GR_LINK;
55731 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
55732 +
55733 + mode =
55734 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
55735 + old_mnt);
55736 +
55737 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
55738 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
55739 + return mode;
55740 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
55741 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
55742 + return 0;
55743 + } else if (unlikely((mode & needmode) != needmode))
55744 + return 0;
55745 +
55746 + return 1;
55747 +}
55748 +
55749 +__u32
55750 +gr_acl_handle_symlink(const struct dentry * new_dentry,
55751 + const struct dentry * parent_dentry,
55752 + const struct vfsmount * parent_mnt, const char *from)
55753 +{
55754 + __u32 needmode = GR_WRITE | GR_CREATE;
55755 + __u32 mode;
55756 +
55757 + mode =
55758 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
55759 + GR_CREATE | GR_AUDIT_CREATE |
55760 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
55761 +
55762 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
55763 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
55764 + return mode;
55765 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
55766 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
55767 + return 0;
55768 + } else if (unlikely((mode & needmode) != needmode))
55769 + return 0;
55770 +
55771 + return (GR_WRITE | GR_CREATE);
55772 +}
55773 +
55774 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
55775 +{
55776 + __u32 mode;
55777 +
55778 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
55779 +
55780 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
55781 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
55782 + return mode;
55783 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
55784 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
55785 + return 0;
55786 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
55787 + return 0;
55788 +
55789 + return (reqmode);
55790 +}
55791 +
55792 +__u32
55793 +gr_acl_handle_mknod(const struct dentry * new_dentry,
55794 + const struct dentry * parent_dentry,
55795 + const struct vfsmount * parent_mnt,
55796 + const int mode)
55797 +{
55798 + __u32 reqmode = GR_WRITE | GR_CREATE;
55799 + if (unlikely(mode & (S_ISUID | S_ISGID)))
55800 + reqmode |= GR_SETID;
55801 +
55802 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
55803 + reqmode, GR_MKNOD_ACL_MSG);
55804 +}
55805 +
55806 +__u32
55807 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
55808 + const struct dentry *parent_dentry,
55809 + const struct vfsmount *parent_mnt)
55810 +{
55811 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
55812 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
55813 +}
55814 +
55815 +#define RENAME_CHECK_SUCCESS(old, new) \
55816 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
55817 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
55818 +
55819 +int
55820 +gr_acl_handle_rename(struct dentry *new_dentry,
55821 + struct dentry *parent_dentry,
55822 + const struct vfsmount *parent_mnt,
55823 + struct dentry *old_dentry,
55824 + struct inode *old_parent_inode,
55825 + struct vfsmount *old_mnt, const char *newname)
55826 +{
55827 + __u32 comp1, comp2;
55828 + int error = 0;
55829 +
55830 + if (unlikely(!gr_acl_is_enabled()))
55831 + return 0;
55832 +
55833 + if (!new_dentry->d_inode) {
55834 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
55835 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
55836 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
55837 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
55838 + GR_DELETE | GR_AUDIT_DELETE |
55839 + GR_AUDIT_READ | GR_AUDIT_WRITE |
55840 + GR_SUPPRESS, old_mnt);
55841 + } else {
55842 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
55843 + GR_CREATE | GR_DELETE |
55844 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
55845 + GR_AUDIT_READ | GR_AUDIT_WRITE |
55846 + GR_SUPPRESS, parent_mnt);
55847 + comp2 =
55848 + gr_search_file(old_dentry,
55849 + GR_READ | GR_WRITE | GR_AUDIT_READ |
55850 + GR_DELETE | GR_AUDIT_DELETE |
55851 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
55852 + }
55853 +
55854 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
55855 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
55856 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
55857 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
55858 + && !(comp2 & GR_SUPPRESS)) {
55859 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
55860 + error = -EACCES;
55861 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
55862 + error = -EACCES;
55863 +
55864 + return error;
55865 +}
55866 +
55867 +void
55868 +gr_acl_handle_exit(void)
55869 +{
55870 + u16 id;
55871 + char *rolename;
55872 + struct file *exec_file;
55873 +
55874 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
55875 + !(current->role->roletype & GR_ROLE_PERSIST))) {
55876 + id = current->acl_role_id;
55877 + rolename = current->role->rolename;
55878 + gr_set_acls(1);
55879 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
55880 + }
55881 +
55882 + write_lock(&grsec_exec_file_lock);
55883 + exec_file = current->exec_file;
55884 + current->exec_file = NULL;
55885 + write_unlock(&grsec_exec_file_lock);
55886 +
55887 + if (exec_file)
55888 + fput(exec_file);
55889 +}
55890 +
55891 +int
55892 +gr_acl_handle_procpidmem(const struct task_struct *task)
55893 +{
55894 + if (unlikely(!gr_acl_is_enabled()))
55895 + return 0;
55896 +
55897 + if (task != current && task->acl->mode & GR_PROTPROCFD)
55898 + return -EACCES;
55899 +
55900 + return 0;
55901 +}
55902 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
55903 new file mode 100644
55904 index 0000000..58800a7
55905 --- /dev/null
55906 +++ b/grsecurity/gracl_ip.c
55907 @@ -0,0 +1,384 @@
55908 +#include <linux/kernel.h>
55909 +#include <asm/uaccess.h>
55910 +#include <asm/errno.h>
55911 +#include <net/sock.h>
55912 +#include <linux/file.h>
55913 +#include <linux/fs.h>
55914 +#include <linux/net.h>
55915 +#include <linux/in.h>
55916 +#include <linux/skbuff.h>
55917 +#include <linux/ip.h>
55918 +#include <linux/udp.h>
55919 +#include <linux/types.h>
55920 +#include <linux/sched.h>
55921 +#include <linux/netdevice.h>
55922 +#include <linux/inetdevice.h>
55923 +#include <linux/gracl.h>
55924 +#include <linux/grsecurity.h>
55925 +#include <linux/grinternal.h>
55926 +
55927 +#define GR_BIND 0x01
55928 +#define GR_CONNECT 0x02
55929 +#define GR_INVERT 0x04
55930 +#define GR_BINDOVERRIDE 0x08
55931 +#define GR_CONNECTOVERRIDE 0x10
55932 +#define GR_SOCK_FAMILY 0x20
55933 +
55934 +static const char * gr_protocols[IPPROTO_MAX] = {
55935 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
55936 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
55937 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
55938 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
55939 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
55940 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
55941 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
55942 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
55943 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
55944 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
55945 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
55946 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
55947 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
55948 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
55949 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
55950 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
55951 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
55952 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
55953 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
55954 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
55955 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
55956 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
55957 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
55958 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
55959 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
55960 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
55961 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
55962 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
55963 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
55964 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
55965 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
55966 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
55967 + };
55968 +
55969 +static const char * gr_socktypes[SOCK_MAX] = {
55970 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
55971 + "unknown:7", "unknown:8", "unknown:9", "packet"
55972 + };
55973 +
55974 +static const char * gr_sockfamilies[AF_MAX+1] = {
55975 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
55976 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
55977 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
55978 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
55979 + };
55980 +
55981 +const char *
55982 +gr_proto_to_name(unsigned char proto)
55983 +{
55984 + return gr_protocols[proto];
55985 +}
55986 +
55987 +const char *
55988 +gr_socktype_to_name(unsigned char type)
55989 +{
55990 + return gr_socktypes[type];
55991 +}
55992 +
55993 +const char *
55994 +gr_sockfamily_to_name(unsigned char family)
55995 +{
55996 + return gr_sockfamilies[family];
55997 +}
55998 +
55999 +int
56000 +gr_search_socket(const int domain, const int type, const int protocol)
56001 +{
56002 + struct acl_subject_label *curr;
56003 + const struct cred *cred = current_cred();
56004 +
56005 + if (unlikely(!gr_acl_is_enabled()))
56006 + goto exit;
56007 +
56008 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
56009 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
56010 + goto exit; // let the kernel handle it
56011 +
56012 + curr = current->acl;
56013 +
56014 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
56015 + /* the family is allowed, if this is PF_INET allow it only if
56016 + the extra sock type/protocol checks pass */
56017 + if (domain == PF_INET)
56018 + goto inet_check;
56019 + goto exit;
56020 + } else {
56021 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
56022 + __u32 fakeip = 0;
56023 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
56024 + current->role->roletype, cred->uid,
56025 + cred->gid, current->exec_file ?
56026 + gr_to_filename(current->exec_file->f_path.dentry,
56027 + current->exec_file->f_path.mnt) :
56028 + curr->filename, curr->filename,
56029 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
56030 + &current->signal->saved_ip);
56031 + goto exit;
56032 + }
56033 + goto exit_fail;
56034 + }
56035 +
56036 +inet_check:
56037 + /* the rest of this checking is for IPv4 only */
56038 + if (!curr->ips)
56039 + goto exit;
56040 +
56041 + if ((curr->ip_type & (1 << type)) &&
56042 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
56043 + goto exit;
56044 +
56045 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
56046 + /* we don't place acls on raw sockets , and sometimes
56047 + dgram/ip sockets are opened for ioctl and not
56048 + bind/connect, so we'll fake a bind learn log */
56049 + if (type == SOCK_RAW || type == SOCK_PACKET) {
56050 + __u32 fakeip = 0;
56051 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
56052 + current->role->roletype, cred->uid,
56053 + cred->gid, current->exec_file ?
56054 + gr_to_filename(current->exec_file->f_path.dentry,
56055 + current->exec_file->f_path.mnt) :
56056 + curr->filename, curr->filename,
56057 + &fakeip, 0, type,
56058 + protocol, GR_CONNECT, &current->signal->saved_ip);
56059 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
56060 + __u32 fakeip = 0;
56061 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
56062 + current->role->roletype, cred->uid,
56063 + cred->gid, current->exec_file ?
56064 + gr_to_filename(current->exec_file->f_path.dentry,
56065 + current->exec_file->f_path.mnt) :
56066 + curr->filename, curr->filename,
56067 + &fakeip, 0, type,
56068 + protocol, GR_BIND, &current->signal->saved_ip);
56069 + }
56070 + /* we'll log when they use connect or bind */
56071 + goto exit;
56072 + }
56073 +
56074 +exit_fail:
56075 + if (domain == PF_INET)
56076 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
56077 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
56078 + else
56079 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
56080 + gr_socktype_to_name(type), protocol);
56081 +
56082 + return 0;
56083 +exit:
56084 + return 1;
56085 +}
56086 +
56087 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
56088 +{
56089 + if ((ip->mode & mode) &&
56090 + (ip_port >= ip->low) &&
56091 + (ip_port <= ip->high) &&
56092 + ((ntohl(ip_addr) & our_netmask) ==
56093 + (ntohl(our_addr) & our_netmask))
56094 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
56095 + && (ip->type & (1 << type))) {
56096 + if (ip->mode & GR_INVERT)
56097 + return 2; // specifically denied
56098 + else
56099 + return 1; // allowed
56100 + }
56101 +
56102 + return 0; // not specifically allowed, may continue parsing
56103 +}
56104 +
56105 +static int
56106 +gr_search_connectbind(const int full_mode, struct sock *sk,
56107 + struct sockaddr_in *addr, const int type)
56108 +{
56109 + char iface[IFNAMSIZ] = {0};
56110 + struct acl_subject_label *curr;
56111 + struct acl_ip_label *ip;
56112 + struct inet_sock *isk;
56113 + struct net_device *dev;
56114 + struct in_device *idev;
56115 + unsigned long i;
56116 + int ret;
56117 + int mode = full_mode & (GR_BIND | GR_CONNECT);
56118 + __u32 ip_addr = 0;
56119 + __u32 our_addr;
56120 + __u32 our_netmask;
56121 + char *p;
56122 + __u16 ip_port = 0;
56123 + const struct cred *cred = current_cred();
56124 +
56125 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
56126 + return 0;
56127 +
56128 + curr = current->acl;
56129 + isk = inet_sk(sk);
56130 +
56131 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
56132 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
56133 + addr->sin_addr.s_addr = curr->inaddr_any_override;
56134 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
56135 + struct sockaddr_in saddr;
56136 + int err;
56137 +
56138 + saddr.sin_family = AF_INET;
56139 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
56140 + saddr.sin_port = isk->inet_sport;
56141 +
56142 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
56143 + if (err)
56144 + return err;
56145 +
56146 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
56147 + if (err)
56148 + return err;
56149 + }
56150 +
56151 + if (!curr->ips)
56152 + return 0;
56153 +
56154 + ip_addr = addr->sin_addr.s_addr;
56155 + ip_port = ntohs(addr->sin_port);
56156 +
56157 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
56158 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
56159 + current->role->roletype, cred->uid,
56160 + cred->gid, current->exec_file ?
56161 + gr_to_filename(current->exec_file->f_path.dentry,
56162 + current->exec_file->f_path.mnt) :
56163 + curr->filename, curr->filename,
56164 + &ip_addr, ip_port, type,
56165 + sk->sk_protocol, mode, &current->signal->saved_ip);
56166 + return 0;
56167 + }
56168 +
56169 + for (i = 0; i < curr->ip_num; i++) {
56170 + ip = *(curr->ips + i);
56171 + if (ip->iface != NULL) {
56172 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
56173 + p = strchr(iface, ':');
56174 + if (p != NULL)
56175 + *p = '\0';
56176 + dev = dev_get_by_name(sock_net(sk), iface);
56177 + if (dev == NULL)
56178 + continue;
56179 + idev = in_dev_get(dev);
56180 + if (idev == NULL) {
56181 + dev_put(dev);
56182 + continue;
56183 + }
56184 + rcu_read_lock();
56185 + for_ifa(idev) {
56186 + if (!strcmp(ip->iface, ifa->ifa_label)) {
56187 + our_addr = ifa->ifa_address;
56188 + our_netmask = 0xffffffff;
56189 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
56190 + if (ret == 1) {
56191 + rcu_read_unlock();
56192 + in_dev_put(idev);
56193 + dev_put(dev);
56194 + return 0;
56195 + } else if (ret == 2) {
56196 + rcu_read_unlock();
56197 + in_dev_put(idev);
56198 + dev_put(dev);
56199 + goto denied;
56200 + }
56201 + }
56202 + } endfor_ifa(idev);
56203 + rcu_read_unlock();
56204 + in_dev_put(idev);
56205 + dev_put(dev);
56206 + } else {
56207 + our_addr = ip->addr;
56208 + our_netmask = ip->netmask;
56209 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
56210 + if (ret == 1)
56211 + return 0;
56212 + else if (ret == 2)
56213 + goto denied;
56214 + }
56215 + }
56216 +
56217 +denied:
56218 + if (mode == GR_BIND)
56219 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
56220 + else if (mode == GR_CONNECT)
56221 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
56222 +
56223 + return -EACCES;
56224 +}
56225 +
56226 +int
56227 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
56228 +{
56229 + /* always allow disconnection of dgram sockets with connect */
56230 + if (addr->sin_family == AF_UNSPEC)
56231 + return 0;
56232 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
56233 +}
56234 +
56235 +int
56236 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
56237 +{
56238 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
56239 +}
56240 +
56241 +int gr_search_listen(struct socket *sock)
56242 +{
56243 + struct sock *sk = sock->sk;
56244 + struct sockaddr_in addr;
56245 +
56246 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
56247 + addr.sin_port = inet_sk(sk)->inet_sport;
56248 +
56249 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
56250 +}
56251 +
56252 +int gr_search_accept(struct socket *sock)
56253 +{
56254 + struct sock *sk = sock->sk;
56255 + struct sockaddr_in addr;
56256 +
56257 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
56258 + addr.sin_port = inet_sk(sk)->inet_sport;
56259 +
56260 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
56261 +}
56262 +
56263 +int
56264 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
56265 +{
56266 + if (addr)
56267 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
56268 + else {
56269 + struct sockaddr_in sin;
56270 + const struct inet_sock *inet = inet_sk(sk);
56271 +
56272 + sin.sin_addr.s_addr = inet->inet_daddr;
56273 + sin.sin_port = inet->inet_dport;
56274 +
56275 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
56276 + }
56277 +}
56278 +
56279 +int
56280 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
56281 +{
56282 + struct sockaddr_in sin;
56283 +
56284 + if (unlikely(skb->len < sizeof (struct udphdr)))
56285 + return 0; // skip this packet
56286 +
56287 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
56288 + sin.sin_port = udp_hdr(skb)->source;
56289 +
56290 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
56291 +}
56292 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
56293 new file mode 100644
56294 index 0000000..25f54ef
56295 --- /dev/null
56296 +++ b/grsecurity/gracl_learn.c
56297 @@ -0,0 +1,207 @@
56298 +#include <linux/kernel.h>
56299 +#include <linux/mm.h>
56300 +#include <linux/sched.h>
56301 +#include <linux/poll.h>
56302 +#include <linux/string.h>
56303 +#include <linux/file.h>
56304 +#include <linux/types.h>
56305 +#include <linux/vmalloc.h>
56306 +#include <linux/grinternal.h>
56307 +
56308 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
56309 + size_t count, loff_t *ppos);
56310 +extern int gr_acl_is_enabled(void);
56311 +
56312 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
56313 +static int gr_learn_attached;
56314 +
56315 +/* use a 512k buffer */
56316 +#define LEARN_BUFFER_SIZE (512 * 1024)
56317 +
56318 +static DEFINE_SPINLOCK(gr_learn_lock);
56319 +static DEFINE_MUTEX(gr_learn_user_mutex);
56320 +
56321 +/* we need to maintain two buffers, so that the kernel context of grlearn
56322 + uses a semaphore around the userspace copying, and the other kernel contexts
56323 + use a spinlock when copying into the buffer, since they cannot sleep
56324 +*/
56325 +static char *learn_buffer;
56326 +static char *learn_buffer_user;
56327 +static int learn_buffer_len;
56328 +static int learn_buffer_user_len;
56329 +
56330 +static ssize_t
56331 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
56332 +{
56333 + DECLARE_WAITQUEUE(wait, current);
56334 + ssize_t retval = 0;
56335 +
56336 + add_wait_queue(&learn_wait, &wait);
56337 + set_current_state(TASK_INTERRUPTIBLE);
56338 + do {
56339 + mutex_lock(&gr_learn_user_mutex);
56340 + spin_lock(&gr_learn_lock);
56341 + if (learn_buffer_len)
56342 + break;
56343 + spin_unlock(&gr_learn_lock);
56344 + mutex_unlock(&gr_learn_user_mutex);
56345 + if (file->f_flags & O_NONBLOCK) {
56346 + retval = -EAGAIN;
56347 + goto out;
56348 + }
56349 + if (signal_pending(current)) {
56350 + retval = -ERESTARTSYS;
56351 + goto out;
56352 + }
56353 +
56354 + schedule();
56355 + } while (1);
56356 +
56357 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
56358 + learn_buffer_user_len = learn_buffer_len;
56359 + retval = learn_buffer_len;
56360 + learn_buffer_len = 0;
56361 +
56362 + spin_unlock(&gr_learn_lock);
56363 +
56364 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
56365 + retval = -EFAULT;
56366 +
56367 + mutex_unlock(&gr_learn_user_mutex);
56368 +out:
56369 + set_current_state(TASK_RUNNING);
56370 + remove_wait_queue(&learn_wait, &wait);
56371 + return retval;
56372 +}
56373 +
56374 +static unsigned int
56375 +poll_learn(struct file * file, poll_table * wait)
56376 +{
56377 + poll_wait(file, &learn_wait, wait);
56378 +
56379 + if (learn_buffer_len)
56380 + return (POLLIN | POLLRDNORM);
56381 +
56382 + return 0;
56383 +}
56384 +
56385 +void
56386 +gr_clear_learn_entries(void)
56387 +{
56388 + char *tmp;
56389 +
56390 + mutex_lock(&gr_learn_user_mutex);
56391 + spin_lock(&gr_learn_lock);
56392 + tmp = learn_buffer;
56393 + learn_buffer = NULL;
56394 + spin_unlock(&gr_learn_lock);
56395 + if (tmp)
56396 + vfree(tmp);
56397 + if (learn_buffer_user != NULL) {
56398 + vfree(learn_buffer_user);
56399 + learn_buffer_user = NULL;
56400 + }
56401 + learn_buffer_len = 0;
56402 + mutex_unlock(&gr_learn_user_mutex);
56403 +
56404 + return;
56405 +}
56406 +
56407 +void
56408 +gr_add_learn_entry(const char *fmt, ...)
56409 +{
56410 + va_list args;
56411 + unsigned int len;
56412 +
56413 + if (!gr_learn_attached)
56414 + return;
56415 +
56416 + spin_lock(&gr_learn_lock);
56417 +
56418 + /* leave a gap at the end so we know when it's "full" but don't have to
56419 + compute the exact length of the string we're trying to append
56420 + */
56421 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
56422 + spin_unlock(&gr_learn_lock);
56423 + wake_up_interruptible(&learn_wait);
56424 + return;
56425 + }
56426 + if (learn_buffer == NULL) {
56427 + spin_unlock(&gr_learn_lock);
56428 + return;
56429 + }
56430 +
56431 + va_start(args, fmt);
56432 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
56433 + va_end(args);
56434 +
56435 + learn_buffer_len += len + 1;
56436 +
56437 + spin_unlock(&gr_learn_lock);
56438 + wake_up_interruptible(&learn_wait);
56439 +
56440 + return;
56441 +}
56442 +
56443 +static int
56444 +open_learn(struct inode *inode, struct file *file)
56445 +{
56446 + if (file->f_mode & FMODE_READ && gr_learn_attached)
56447 + return -EBUSY;
56448 + if (file->f_mode & FMODE_READ) {
56449 + int retval = 0;
56450 + mutex_lock(&gr_learn_user_mutex);
56451 + if (learn_buffer == NULL)
56452 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
56453 + if (learn_buffer_user == NULL)
56454 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
56455 + if (learn_buffer == NULL) {
56456 + retval = -ENOMEM;
56457 + goto out_error;
56458 + }
56459 + if (learn_buffer_user == NULL) {
56460 + retval = -ENOMEM;
56461 + goto out_error;
56462 + }
56463 + learn_buffer_len = 0;
56464 + learn_buffer_user_len = 0;
56465 + gr_learn_attached = 1;
56466 +out_error:
56467 + mutex_unlock(&gr_learn_user_mutex);
56468 + return retval;
56469 + }
56470 + return 0;
56471 +}
56472 +
56473 +static int
56474 +close_learn(struct inode *inode, struct file *file)
56475 +{
56476 + if (file->f_mode & FMODE_READ) {
56477 + char *tmp = NULL;
56478 + mutex_lock(&gr_learn_user_mutex);
56479 + spin_lock(&gr_learn_lock);
56480 + tmp = learn_buffer;
56481 + learn_buffer = NULL;
56482 + spin_unlock(&gr_learn_lock);
56483 + if (tmp)
56484 + vfree(tmp);
56485 + if (learn_buffer_user != NULL) {
56486 + vfree(learn_buffer_user);
56487 + learn_buffer_user = NULL;
56488 + }
56489 + learn_buffer_len = 0;
56490 + learn_buffer_user_len = 0;
56491 + gr_learn_attached = 0;
56492 + mutex_unlock(&gr_learn_user_mutex);
56493 + }
56494 +
56495 + return 0;
56496 +}
56497 +
56498 +const struct file_operations grsec_fops = {
56499 + .read = read_learn,
56500 + .write = write_grsec_handler,
56501 + .open = open_learn,
56502 + .release = close_learn,
56503 + .poll = poll_learn,
56504 +};
56505 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
56506 new file mode 100644
56507 index 0000000..39645c9
56508 --- /dev/null
56509 +++ b/grsecurity/gracl_res.c
56510 @@ -0,0 +1,68 @@
56511 +#include <linux/kernel.h>
56512 +#include <linux/sched.h>
56513 +#include <linux/gracl.h>
56514 +#include <linux/grinternal.h>
56515 +
56516 +static const char *restab_log[] = {
56517 + [RLIMIT_CPU] = "RLIMIT_CPU",
56518 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
56519 + [RLIMIT_DATA] = "RLIMIT_DATA",
56520 + [RLIMIT_STACK] = "RLIMIT_STACK",
56521 + [RLIMIT_CORE] = "RLIMIT_CORE",
56522 + [RLIMIT_RSS] = "RLIMIT_RSS",
56523 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
56524 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
56525 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
56526 + [RLIMIT_AS] = "RLIMIT_AS",
56527 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
56528 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
56529 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
56530 + [RLIMIT_NICE] = "RLIMIT_NICE",
56531 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
56532 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
56533 + [GR_CRASH_RES] = "RLIMIT_CRASH"
56534 +};
56535 +
56536 +void
56537 +gr_log_resource(const struct task_struct *task,
56538 + const int res, const unsigned long wanted, const int gt)
56539 +{
56540 + const struct cred *cred;
56541 + unsigned long rlim;
56542 +
56543 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
56544 + return;
56545 +
56546 + // not yet supported resource
56547 + if (unlikely(!restab_log[res]))
56548 + return;
56549 +
56550 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
56551 + rlim = task_rlimit_max(task, res);
56552 + else
56553 + rlim = task_rlimit(task, res);
56554 +
56555 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
56556 + return;
56557 +
56558 + rcu_read_lock();
56559 + cred = __task_cred(task);
56560 +
56561 + if (res == RLIMIT_NPROC &&
56562 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
56563 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
56564 + goto out_rcu_unlock;
56565 + else if (res == RLIMIT_MEMLOCK &&
56566 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
56567 + goto out_rcu_unlock;
56568 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
56569 + goto out_rcu_unlock;
56570 + rcu_read_unlock();
56571 +
56572 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
56573 +
56574 + return;
56575 +out_rcu_unlock:
56576 + rcu_read_unlock();
56577 + return;
56578 +}
56579 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
56580 new file mode 100644
56581 index 0000000..5556be3
56582 --- /dev/null
56583 +++ b/grsecurity/gracl_segv.c
56584 @@ -0,0 +1,299 @@
56585 +#include <linux/kernel.h>
56586 +#include <linux/mm.h>
56587 +#include <asm/uaccess.h>
56588 +#include <asm/errno.h>
56589 +#include <asm/mman.h>
56590 +#include <net/sock.h>
56591 +#include <linux/file.h>
56592 +#include <linux/fs.h>
56593 +#include <linux/net.h>
56594 +#include <linux/in.h>
56595 +#include <linux/slab.h>
56596 +#include <linux/types.h>
56597 +#include <linux/sched.h>
56598 +#include <linux/timer.h>
56599 +#include <linux/gracl.h>
56600 +#include <linux/grsecurity.h>
56601 +#include <linux/grinternal.h>
56602 +
56603 +static struct crash_uid *uid_set;
56604 +static unsigned short uid_used;
56605 +static DEFINE_SPINLOCK(gr_uid_lock);
56606 +extern rwlock_t gr_inode_lock;
56607 +extern struct acl_subject_label *
56608 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
56609 + struct acl_role_label *role);
56610 +
56611 +#ifdef CONFIG_BTRFS_FS
56612 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
56613 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
56614 +#endif
56615 +
56616 +static inline dev_t __get_dev(const struct dentry *dentry)
56617 +{
56618 +#ifdef CONFIG_BTRFS_FS
56619 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
56620 + return get_btrfs_dev_from_inode(dentry->d_inode);
56621 + else
56622 +#endif
56623 + return dentry->d_inode->i_sb->s_dev;
56624 +}
56625 +
56626 +int
56627 +gr_init_uidset(void)
56628 +{
56629 + uid_set =
56630 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
56631 + uid_used = 0;
56632 +
56633 + return uid_set ? 1 : 0;
56634 +}
56635 +
56636 +void
56637 +gr_free_uidset(void)
56638 +{
56639 + if (uid_set)
56640 + kfree(uid_set);
56641 +
56642 + return;
56643 +}
56644 +
56645 +int
56646 +gr_find_uid(const uid_t uid)
56647 +{
56648 + struct crash_uid *tmp = uid_set;
56649 + uid_t buid;
56650 + int low = 0, high = uid_used - 1, mid;
56651 +
56652 + while (high >= low) {
56653 + mid = (low + high) >> 1;
56654 + buid = tmp[mid].uid;
56655 + if (buid == uid)
56656 + return mid;
56657 + if (buid > uid)
56658 + high = mid - 1;
56659 + if (buid < uid)
56660 + low = mid + 1;
56661 + }
56662 +
56663 + return -1;
56664 +}
56665 +
56666 +static __inline__ void
56667 +gr_insertsort(void)
56668 +{
56669 + unsigned short i, j;
56670 + struct crash_uid index;
56671 +
56672 + for (i = 1; i < uid_used; i++) {
56673 + index = uid_set[i];
56674 + j = i;
56675 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
56676 + uid_set[j] = uid_set[j - 1];
56677 + j--;
56678 + }
56679 + uid_set[j] = index;
56680 + }
56681 +
56682 + return;
56683 +}
56684 +
56685 +static __inline__ void
56686 +gr_insert_uid(const uid_t uid, const unsigned long expires)
56687 +{
56688 + int loc;
56689 +
56690 + if (uid_used == GR_UIDTABLE_MAX)
56691 + return;
56692 +
56693 + loc = gr_find_uid(uid);
56694 +
56695 + if (loc >= 0) {
56696 + uid_set[loc].expires = expires;
56697 + return;
56698 + }
56699 +
56700 + uid_set[uid_used].uid = uid;
56701 + uid_set[uid_used].expires = expires;
56702 + uid_used++;
56703 +
56704 + gr_insertsort();
56705 +
56706 + return;
56707 +}
56708 +
56709 +void
56710 +gr_remove_uid(const unsigned short loc)
56711 +{
56712 + unsigned short i;
56713 +
56714 + for (i = loc + 1; i < uid_used; i++)
56715 + uid_set[i - 1] = uid_set[i];
56716 +
56717 + uid_used--;
56718 +
56719 + return;
56720 +}
56721 +
56722 +int
56723 +gr_check_crash_uid(const uid_t uid)
56724 +{
56725 + int loc;
56726 + int ret = 0;
56727 +
56728 + if (unlikely(!gr_acl_is_enabled()))
56729 + return 0;
56730 +
56731 + spin_lock(&gr_uid_lock);
56732 + loc = gr_find_uid(uid);
56733 +
56734 + if (loc < 0)
56735 + goto out_unlock;
56736 +
56737 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
56738 + gr_remove_uid(loc);
56739 + else
56740 + ret = 1;
56741 +
56742 +out_unlock:
56743 + spin_unlock(&gr_uid_lock);
56744 + return ret;
56745 +}
56746 +
56747 +static __inline__ int
56748 +proc_is_setxid(const struct cred *cred)
56749 +{
56750 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
56751 + cred->uid != cred->fsuid)
56752 + return 1;
56753 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
56754 + cred->gid != cred->fsgid)
56755 + return 1;
56756 +
56757 + return 0;
56758 +}
56759 +
56760 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
56761 +
56762 +void
56763 +gr_handle_crash(struct task_struct *task, const int sig)
56764 +{
56765 + struct acl_subject_label *curr;
56766 + struct task_struct *tsk, *tsk2;
56767 + const struct cred *cred;
56768 + const struct cred *cred2;
56769 +
56770 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
56771 + return;
56772 +
56773 + if (unlikely(!gr_acl_is_enabled()))
56774 + return;
56775 +
56776 + curr = task->acl;
56777 +
56778 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
56779 + return;
56780 +
56781 + if (time_before_eq(curr->expires, get_seconds())) {
56782 + curr->expires = 0;
56783 + curr->crashes = 0;
56784 + }
56785 +
56786 + curr->crashes++;
56787 +
56788 + if (!curr->expires)
56789 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
56790 +
56791 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
56792 + time_after(curr->expires, get_seconds())) {
56793 + rcu_read_lock();
56794 + cred = __task_cred(task);
56795 + if (cred->uid && proc_is_setxid(cred)) {
56796 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
56797 + spin_lock(&gr_uid_lock);
56798 + gr_insert_uid(cred->uid, curr->expires);
56799 + spin_unlock(&gr_uid_lock);
56800 + curr->expires = 0;
56801 + curr->crashes = 0;
56802 + read_lock(&tasklist_lock);
56803 + do_each_thread(tsk2, tsk) {
56804 + cred2 = __task_cred(tsk);
56805 + if (tsk != task && cred2->uid == cred->uid)
56806 + gr_fake_force_sig(SIGKILL, tsk);
56807 + } while_each_thread(tsk2, tsk);
56808 + read_unlock(&tasklist_lock);
56809 + } else {
56810 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
56811 + read_lock(&tasklist_lock);
56812 + read_lock(&grsec_exec_file_lock);
56813 + do_each_thread(tsk2, tsk) {
56814 + if (likely(tsk != task)) {
56815 + // if this thread has the same subject as the one that triggered
56816 + // RES_CRASH and it's the same binary, kill it
56817 + if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
56818 + gr_fake_force_sig(SIGKILL, tsk);
56819 + }
56820 + } while_each_thread(tsk2, tsk);
56821 + read_unlock(&grsec_exec_file_lock);
56822 + read_unlock(&tasklist_lock);
56823 + }
56824 + rcu_read_unlock();
56825 + }
56826 +
56827 + return;
56828 +}
56829 +
56830 +int
56831 +gr_check_crash_exec(const struct file *filp)
56832 +{
56833 + struct acl_subject_label *curr;
56834 +
56835 + if (unlikely(!gr_acl_is_enabled()))
56836 + return 0;
56837 +
56838 + read_lock(&gr_inode_lock);
56839 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
56840 + __get_dev(filp->f_path.dentry),
56841 + current->role);
56842 + read_unlock(&gr_inode_lock);
56843 +
56844 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
56845 + (!curr->crashes && !curr->expires))
56846 + return 0;
56847 +
56848 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
56849 + time_after(curr->expires, get_seconds()))
56850 + return 1;
56851 + else if (time_before_eq(curr->expires, get_seconds())) {
56852 + curr->crashes = 0;
56853 + curr->expires = 0;
56854 + }
56855 +
56856 + return 0;
56857 +}
56858 +
56859 +void
56860 +gr_handle_alertkill(struct task_struct *task)
56861 +{
56862 + struct acl_subject_label *curracl;
56863 + __u32 curr_ip;
56864 + struct task_struct *p, *p2;
56865 +
56866 + if (unlikely(!gr_acl_is_enabled()))
56867 + return;
56868 +
56869 + curracl = task->acl;
56870 + curr_ip = task->signal->curr_ip;
56871 +
56872 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
56873 + read_lock(&tasklist_lock);
56874 + do_each_thread(p2, p) {
56875 + if (p->signal->curr_ip == curr_ip)
56876 + gr_fake_force_sig(SIGKILL, p);
56877 + } while_each_thread(p2, p);
56878 + read_unlock(&tasklist_lock);
56879 + } else if (curracl->mode & GR_KILLPROC)
56880 + gr_fake_force_sig(SIGKILL, task);
56881 +
56882 + return;
56883 +}
56884 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
56885 new file mode 100644
56886 index 0000000..9d83a69
56887 --- /dev/null
56888 +++ b/grsecurity/gracl_shm.c
56889 @@ -0,0 +1,40 @@
56890 +#include <linux/kernel.h>
56891 +#include <linux/mm.h>
56892 +#include <linux/sched.h>
56893 +#include <linux/file.h>
56894 +#include <linux/ipc.h>
56895 +#include <linux/gracl.h>
56896 +#include <linux/grsecurity.h>
56897 +#include <linux/grinternal.h>
56898 +
56899 +int
56900 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56901 + const time_t shm_createtime, const uid_t cuid, const int shmid)
56902 +{
56903 + struct task_struct *task;
56904 +
56905 + if (!gr_acl_is_enabled())
56906 + return 1;
56907 +
56908 + rcu_read_lock();
56909 + read_lock(&tasklist_lock);
56910 +
56911 + task = find_task_by_vpid(shm_cprid);
56912 +
56913 + if (unlikely(!task))
56914 + task = find_task_by_vpid(shm_lapid);
56915 +
56916 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
56917 + (task->pid == shm_lapid)) &&
56918 + (task->acl->mode & GR_PROTSHM) &&
56919 + (task->acl != current->acl))) {
56920 + read_unlock(&tasklist_lock);
56921 + rcu_read_unlock();
56922 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
56923 + return 0;
56924 + }
56925 + read_unlock(&tasklist_lock);
56926 + rcu_read_unlock();
56927 +
56928 + return 1;
56929 +}
56930 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
56931 new file mode 100644
56932 index 0000000..bc0be01
56933 --- /dev/null
56934 +++ b/grsecurity/grsec_chdir.c
56935 @@ -0,0 +1,19 @@
56936 +#include <linux/kernel.h>
56937 +#include <linux/sched.h>
56938 +#include <linux/fs.h>
56939 +#include <linux/file.h>
56940 +#include <linux/grsecurity.h>
56941 +#include <linux/grinternal.h>
56942 +
56943 +void
56944 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
56945 +{
56946 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
56947 + if ((grsec_enable_chdir && grsec_enable_group &&
56948 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
56949 + !grsec_enable_group)) {
56950 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
56951 + }
56952 +#endif
56953 + return;
56954 +}
56955 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
56956 new file mode 100644
56957 index 0000000..9807ee2
56958 --- /dev/null
56959 +++ b/grsecurity/grsec_chroot.c
56960 @@ -0,0 +1,368 @@
56961 +#include <linux/kernel.h>
56962 +#include <linux/module.h>
56963 +#include <linux/sched.h>
56964 +#include <linux/file.h>
56965 +#include <linux/fs.h>
56966 +#include <linux/mount.h>
56967 +#include <linux/types.h>
56968 +#include "../fs/mount.h"
56969 +#include <linux/grsecurity.h>
56970 +#include <linux/grinternal.h>
56971 +
56972 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
56973 +{
56974 +#ifdef CONFIG_GRKERNSEC
56975 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
56976 + path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root)
56977 + task->gr_is_chrooted = 1;
56978 + else
56979 + task->gr_is_chrooted = 0;
56980 +
56981 + task->gr_chroot_dentry = path->dentry;
56982 +#endif
56983 + return;
56984 +}
56985 +
56986 +void gr_clear_chroot_entries(struct task_struct *task)
56987 +{
56988 +#ifdef CONFIG_GRKERNSEC
56989 + task->gr_is_chrooted = 0;
56990 + task->gr_chroot_dentry = NULL;
56991 +#endif
56992 + return;
56993 +}
56994 +
56995 +int
56996 +gr_handle_chroot_unix(const pid_t pid)
56997 +{
56998 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
56999 + struct task_struct *p;
57000 +
57001 + if (unlikely(!grsec_enable_chroot_unix))
57002 + return 1;
57003 +
57004 + if (likely(!proc_is_chrooted(current)))
57005 + return 1;
57006 +
57007 + rcu_read_lock();
57008 + read_lock(&tasklist_lock);
57009 + p = find_task_by_vpid_unrestricted(pid);
57010 + if (unlikely(p && !have_same_root(current, p))) {
57011 + read_unlock(&tasklist_lock);
57012 + rcu_read_unlock();
57013 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
57014 + return 0;
57015 + }
57016 + read_unlock(&tasklist_lock);
57017 + rcu_read_unlock();
57018 +#endif
57019 + return 1;
57020 +}
57021 +
57022 +int
57023 +gr_handle_chroot_nice(void)
57024 +{
57025 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
57026 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
57027 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
57028 + return -EPERM;
57029 + }
57030 +#endif
57031 + return 0;
57032 +}
57033 +
57034 +int
57035 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
57036 +{
57037 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
57038 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
57039 + && proc_is_chrooted(current)) {
57040 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
57041 + return -EACCES;
57042 + }
57043 +#endif
57044 + return 0;
57045 +}
57046 +
57047 +int
57048 +gr_handle_chroot_rawio(const struct inode *inode)
57049 +{
57050 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57051 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
57052 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
57053 + return 1;
57054 +#endif
57055 + return 0;
57056 +}
57057 +
57058 +int
57059 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
57060 +{
57061 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57062 + struct task_struct *p;
57063 + int ret = 0;
57064 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
57065 + return ret;
57066 +
57067 + read_lock(&tasklist_lock);
57068 + do_each_pid_task(pid, type, p) {
57069 + if (!have_same_root(current, p)) {
57070 + ret = 1;
57071 + goto out;
57072 + }
57073 + } while_each_pid_task(pid, type, p);
57074 +out:
57075 + read_unlock(&tasklist_lock);
57076 + return ret;
57077 +#endif
57078 + return 0;
57079 +}
57080 +
57081 +int
57082 +gr_pid_is_chrooted(struct task_struct *p)
57083 +{
57084 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57085 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
57086 + return 0;
57087 +
57088 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
57089 + !have_same_root(current, p)) {
57090 + return 1;
57091 + }
57092 +#endif
57093 + return 0;
57094 +}
57095 +
57096 +EXPORT_SYMBOL(gr_pid_is_chrooted);
57097 +
57098 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
57099 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
57100 +{
57101 + struct path path, currentroot;
57102 + int ret = 0;
57103 +
57104 + path.dentry = (struct dentry *)u_dentry;
57105 + path.mnt = (struct vfsmount *)u_mnt;
57106 + get_fs_root(current->fs, &currentroot);
57107 + if (path_is_under(&path, &currentroot))
57108 + ret = 1;
57109 + path_put(&currentroot);
57110 +
57111 + return ret;
57112 +}
57113 +#endif
57114 +
57115 +int
57116 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
57117 +{
57118 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
57119 + if (!grsec_enable_chroot_fchdir)
57120 + return 1;
57121 +
57122 + if (!proc_is_chrooted(current))
57123 + return 1;
57124 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
57125 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
57126 + return 0;
57127 + }
57128 +#endif
57129 + return 1;
57130 +}
57131 +
57132 +int
57133 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
57134 + const time_t shm_createtime)
57135 +{
57136 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
57137 + struct task_struct *p;
57138 + time_t starttime;
57139 +
57140 + if (unlikely(!grsec_enable_chroot_shmat))
57141 + return 1;
57142 +
57143 + if (likely(!proc_is_chrooted(current)))
57144 + return 1;
57145 +
57146 + rcu_read_lock();
57147 + read_lock(&tasklist_lock);
57148 +
57149 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
57150 + starttime = p->start_time.tv_sec;
57151 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
57152 + if (have_same_root(current, p)) {
57153 + goto allow;
57154 + } else {
57155 + read_unlock(&tasklist_lock);
57156 + rcu_read_unlock();
57157 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
57158 + return 0;
57159 + }
57160 + }
57161 + /* creator exited, pid reuse, fall through to next check */
57162 + }
57163 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
57164 + if (unlikely(!have_same_root(current, p))) {
57165 + read_unlock(&tasklist_lock);
57166 + rcu_read_unlock();
57167 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
57168 + return 0;
57169 + }
57170 + }
57171 +
57172 +allow:
57173 + read_unlock(&tasklist_lock);
57174 + rcu_read_unlock();
57175 +#endif
57176 + return 1;
57177 +}
57178 +
57179 +void
57180 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
57181 +{
57182 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
57183 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
57184 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
57185 +#endif
57186 + return;
57187 +}
57188 +
57189 +int
57190 +gr_handle_chroot_mknod(const struct dentry *dentry,
57191 + const struct vfsmount *mnt, const int mode)
57192 +{
57193 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
57194 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
57195 + proc_is_chrooted(current)) {
57196 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
57197 + return -EPERM;
57198 + }
57199 +#endif
57200 + return 0;
57201 +}
57202 +
57203 +int
57204 +gr_handle_chroot_mount(const struct dentry *dentry,
57205 + const struct vfsmount *mnt, const char *dev_name)
57206 +{
57207 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
57208 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
57209 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
57210 + return -EPERM;
57211 + }
57212 +#endif
57213 + return 0;
57214 +}
57215 +
57216 +int
57217 +gr_handle_chroot_pivot(void)
57218 +{
57219 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
57220 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
57221 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
57222 + return -EPERM;
57223 + }
57224 +#endif
57225 + return 0;
57226 +}
57227 +
57228 +int
57229 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
57230 +{
57231 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
57232 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
57233 + !gr_is_outside_chroot(dentry, mnt)) {
57234 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
57235 + return -EPERM;
57236 + }
57237 +#endif
57238 + return 0;
57239 +}
57240 +
57241 +extern const char *captab_log[];
57242 +extern int captab_log_entries;
57243 +
57244 +int
57245 +gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
57246 +{
57247 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57248 + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
57249 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
57250 + if (cap_raised(chroot_caps, cap)) {
57251 + if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
57252 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
57253 + }
57254 + return 0;
57255 + }
57256 + }
57257 +#endif
57258 + return 1;
57259 +}
57260 +
57261 +int
57262 +gr_chroot_is_capable(const int cap)
57263 +{
57264 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57265 + return gr_task_chroot_is_capable(current, current_cred(), cap);
57266 +#endif
57267 + return 1;
57268 +}
57269 +
57270 +int
57271 +gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
57272 +{
57273 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57274 + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
57275 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
57276 + if (cap_raised(chroot_caps, cap)) {
57277 + return 0;
57278 + }
57279 + }
57280 +#endif
57281 + return 1;
57282 +}
57283 +
57284 +int
57285 +gr_chroot_is_capable_nolog(const int cap)
57286 +{
57287 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57288 + return gr_task_chroot_is_capable_nolog(current, cap);
57289 +#endif
57290 + return 1;
57291 +}
57292 +
57293 +int
57294 +gr_handle_chroot_sysctl(const int op)
57295 +{
57296 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
57297 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
57298 + proc_is_chrooted(current))
57299 + return -EACCES;
57300 +#endif
57301 + return 0;
57302 +}
57303 +
57304 +void
57305 +gr_handle_chroot_chdir(struct path *path)
57306 +{
57307 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
57308 + if (grsec_enable_chroot_chdir)
57309 + set_fs_pwd(current->fs, path);
57310 +#endif
57311 + return;
57312 +}
57313 +
57314 +int
57315 +gr_handle_chroot_chmod(const struct dentry *dentry,
57316 + const struct vfsmount *mnt, const int mode)
57317 +{
57318 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
57319 + /* allow chmod +s on directories, but not files */
57320 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
57321 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
57322 + proc_is_chrooted(current)) {
57323 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
57324 + return -EPERM;
57325 + }
57326 +#endif
57327 + return 0;
57328 +}
57329 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
57330 new file mode 100644
57331 index 0000000..213ad8b
57332 --- /dev/null
57333 +++ b/grsecurity/grsec_disabled.c
57334 @@ -0,0 +1,437 @@
57335 +#include <linux/kernel.h>
57336 +#include <linux/module.h>
57337 +#include <linux/sched.h>
57338 +#include <linux/file.h>
57339 +#include <linux/fs.h>
57340 +#include <linux/kdev_t.h>
57341 +#include <linux/net.h>
57342 +#include <linux/in.h>
57343 +#include <linux/ip.h>
57344 +#include <linux/skbuff.h>
57345 +#include <linux/sysctl.h>
57346 +
57347 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
57348 +void
57349 +pax_set_initial_flags(struct linux_binprm *bprm)
57350 +{
57351 + return;
57352 +}
57353 +#endif
57354 +
57355 +#ifdef CONFIG_SYSCTL
57356 +__u32
57357 +gr_handle_sysctl(const struct ctl_table * table, const int op)
57358 +{
57359 + return 0;
57360 +}
57361 +#endif
57362 +
57363 +#ifdef CONFIG_TASKSTATS
57364 +int gr_is_taskstats_denied(int pid)
57365 +{
57366 + return 0;
57367 +}
57368 +#endif
57369 +
57370 +int
57371 +gr_acl_is_enabled(void)
57372 +{
57373 + return 0;
57374 +}
57375 +
57376 +void
57377 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
57378 +{
57379 + return;
57380 +}
57381 +
57382 +int
57383 +gr_handle_rawio(const struct inode *inode)
57384 +{
57385 + return 0;
57386 +}
57387 +
57388 +void
57389 +gr_acl_handle_psacct(struct task_struct *task, const long code)
57390 +{
57391 + return;
57392 +}
57393 +
57394 +int
57395 +gr_handle_ptrace(struct task_struct *task, const long request)
57396 +{
57397 + return 0;
57398 +}
57399 +
57400 +int
57401 +gr_handle_proc_ptrace(struct task_struct *task)
57402 +{
57403 + return 0;
57404 +}
57405 +
57406 +void
57407 +gr_learn_resource(const struct task_struct *task,
57408 + const int res, const unsigned long wanted, const int gt)
57409 +{
57410 + return;
57411 +}
57412 +
57413 +int
57414 +gr_set_acls(const int type)
57415 +{
57416 + return 0;
57417 +}
57418 +
57419 +int
57420 +gr_check_hidden_task(const struct task_struct *tsk)
57421 +{
57422 + return 0;
57423 +}
57424 +
57425 +int
57426 +gr_check_protected_task(const struct task_struct *task)
57427 +{
57428 + return 0;
57429 +}
57430 +
57431 +int
57432 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
57433 +{
57434 + return 0;
57435 +}
57436 +
57437 +void
57438 +gr_copy_label(struct task_struct *tsk)
57439 +{
57440 + return;
57441 +}
57442 +
57443 +void
57444 +gr_set_pax_flags(struct task_struct *task)
57445 +{
57446 + return;
57447 +}
57448 +
57449 +int
57450 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
57451 + const int unsafe_share)
57452 +{
57453 + return 0;
57454 +}
57455 +
57456 +void
57457 +gr_handle_delete(const ino_t ino, const dev_t dev)
57458 +{
57459 + return;
57460 +}
57461 +
57462 +void
57463 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
57464 +{
57465 + return;
57466 +}
57467 +
57468 +void
57469 +gr_handle_crash(struct task_struct *task, const int sig)
57470 +{
57471 + return;
57472 +}
57473 +
57474 +int
57475 +gr_check_crash_exec(const struct file *filp)
57476 +{
57477 + return 0;
57478 +}
57479 +
57480 +int
57481 +gr_check_crash_uid(const uid_t uid)
57482 +{
57483 + return 0;
57484 +}
57485 +
57486 +void
57487 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
57488 + struct dentry *old_dentry,
57489 + struct dentry *new_dentry,
57490 + struct vfsmount *mnt, const __u8 replace)
57491 +{
57492 + return;
57493 +}
57494 +
57495 +int
57496 +gr_search_socket(const int family, const int type, const int protocol)
57497 +{
57498 + return 1;
57499 +}
57500 +
57501 +int
57502 +gr_search_connectbind(const int mode, const struct socket *sock,
57503 + const struct sockaddr_in *addr)
57504 +{
57505 + return 0;
57506 +}
57507 +
57508 +void
57509 +gr_handle_alertkill(struct task_struct *task)
57510 +{
57511 + return;
57512 +}
57513 +
57514 +__u32
57515 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
57516 +{
57517 + return 1;
57518 +}
57519 +
57520 +__u32
57521 +gr_acl_handle_hidden_file(const struct dentry * dentry,
57522 + const struct vfsmount * mnt)
57523 +{
57524 + return 1;
57525 +}
57526 +
57527 +__u32
57528 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
57529 + int acc_mode)
57530 +{
57531 + return 1;
57532 +}
57533 +
57534 +__u32
57535 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
57536 +{
57537 + return 1;
57538 +}
57539 +
57540 +__u32
57541 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
57542 +{
57543 + return 1;
57544 +}
57545 +
57546 +int
57547 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
57548 + unsigned int *vm_flags)
57549 +{
57550 + return 1;
57551 +}
57552 +
57553 +__u32
57554 +gr_acl_handle_truncate(const struct dentry * dentry,
57555 + const struct vfsmount * mnt)
57556 +{
57557 + return 1;
57558 +}
57559 +
57560 +__u32
57561 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
57562 +{
57563 + return 1;
57564 +}
57565 +
57566 +__u32
57567 +gr_acl_handle_access(const struct dentry * dentry,
57568 + const struct vfsmount * mnt, const int fmode)
57569 +{
57570 + return 1;
57571 +}
57572 +
57573 +__u32
57574 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
57575 + umode_t *mode)
57576 +{
57577 + return 1;
57578 +}
57579 +
57580 +__u32
57581 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
57582 +{
57583 + return 1;
57584 +}
57585 +
57586 +__u32
57587 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
57588 +{
57589 + return 1;
57590 +}
57591 +
57592 +void
57593 +grsecurity_init(void)
57594 +{
57595 + return;
57596 +}
57597 +
57598 +umode_t gr_acl_umask(void)
57599 +{
57600 + return 0;
57601 +}
57602 +
57603 +__u32
57604 +gr_acl_handle_mknod(const struct dentry * new_dentry,
57605 + const struct dentry * parent_dentry,
57606 + const struct vfsmount * parent_mnt,
57607 + const int mode)
57608 +{
57609 + return 1;
57610 +}
57611 +
57612 +__u32
57613 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
57614 + const struct dentry * parent_dentry,
57615 + const struct vfsmount * parent_mnt)
57616 +{
57617 + return 1;
57618 +}
57619 +
57620 +__u32
57621 +gr_acl_handle_symlink(const struct dentry * new_dentry,
57622 + const struct dentry * parent_dentry,
57623 + const struct vfsmount * parent_mnt, const char *from)
57624 +{
57625 + return 1;
57626 +}
57627 +
57628 +__u32
57629 +gr_acl_handle_link(const struct dentry * new_dentry,
57630 + const struct dentry * parent_dentry,
57631 + const struct vfsmount * parent_mnt,
57632 + const struct dentry * old_dentry,
57633 + const struct vfsmount * old_mnt, const char *to)
57634 +{
57635 + return 1;
57636 +}
57637 +
57638 +int
57639 +gr_acl_handle_rename(const struct dentry *new_dentry,
57640 + const struct dentry *parent_dentry,
57641 + const struct vfsmount *parent_mnt,
57642 + const struct dentry *old_dentry,
57643 + const struct inode *old_parent_inode,
57644 + const struct vfsmount *old_mnt, const char *newname)
57645 +{
57646 + return 0;
57647 +}
57648 +
57649 +int
57650 +gr_acl_handle_filldir(const struct file *file, const char *name,
57651 + const int namelen, const ino_t ino)
57652 +{
57653 + return 1;
57654 +}
57655 +
57656 +int
57657 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
57658 + const time_t shm_createtime, const uid_t cuid, const int shmid)
57659 +{
57660 + return 1;
57661 +}
57662 +
57663 +int
57664 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
57665 +{
57666 + return 0;
57667 +}
57668 +
57669 +int
57670 +gr_search_accept(const struct socket *sock)
57671 +{
57672 + return 0;
57673 +}
57674 +
57675 +int
57676 +gr_search_listen(const struct socket *sock)
57677 +{
57678 + return 0;
57679 +}
57680 +
57681 +int
57682 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
57683 +{
57684 + return 0;
57685 +}
57686 +
57687 +__u32
57688 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
57689 +{
57690 + return 1;
57691 +}
57692 +
57693 +__u32
57694 +gr_acl_handle_creat(const struct dentry * dentry,
57695 + const struct dentry * p_dentry,
57696 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
57697 + const int imode)
57698 +{
57699 + return 1;
57700 +}
57701 +
57702 +void
57703 +gr_acl_handle_exit(void)
57704 +{
57705 + return;
57706 +}
57707 +
57708 +int
57709 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
57710 +{
57711 + return 1;
57712 +}
57713 +
57714 +void
57715 +gr_set_role_label(const uid_t uid, const gid_t gid)
57716 +{
57717 + return;
57718 +}
57719 +
57720 +int
57721 +gr_acl_handle_procpidmem(const struct task_struct *task)
57722 +{
57723 + return 0;
57724 +}
57725 +
57726 +int
57727 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
57728 +{
57729 + return 0;
57730 +}
57731 +
57732 +int
57733 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
57734 +{
57735 + return 0;
57736 +}
57737 +
57738 +void
57739 +gr_set_kernel_label(struct task_struct *task)
57740 +{
57741 + return;
57742 +}
57743 +
57744 +int
57745 +gr_check_user_change(int real, int effective, int fs)
57746 +{
57747 + return 0;
57748 +}
57749 +
57750 +int
57751 +gr_check_group_change(int real, int effective, int fs)
57752 +{
57753 + return 0;
57754 +}
57755 +
57756 +int gr_acl_enable_at_secure(void)
57757 +{
57758 + return 0;
57759 +}
57760 +
57761 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
57762 +{
57763 + return dentry->d_inode->i_sb->s_dev;
57764 +}
57765 +
57766 +EXPORT_SYMBOL(gr_learn_resource);
57767 +EXPORT_SYMBOL(gr_set_kernel_label);
57768 +#ifdef CONFIG_SECURITY
57769 +EXPORT_SYMBOL(gr_check_user_change);
57770 +EXPORT_SYMBOL(gr_check_group_change);
57771 +#endif
57772 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
57773 new file mode 100644
57774 index 0000000..abfa971
57775 --- /dev/null
57776 +++ b/grsecurity/grsec_exec.c
57777 @@ -0,0 +1,174 @@
57778 +#include <linux/kernel.h>
57779 +#include <linux/sched.h>
57780 +#include <linux/file.h>
57781 +#include <linux/binfmts.h>
57782 +#include <linux/fs.h>
57783 +#include <linux/types.h>
57784 +#include <linux/grdefs.h>
57785 +#include <linux/grsecurity.h>
57786 +#include <linux/grinternal.h>
57787 +#include <linux/capability.h>
57788 +#include <linux/module.h>
57789 +
57790 +#include <asm/uaccess.h>
57791 +
57792 +#ifdef CONFIG_GRKERNSEC_EXECLOG
57793 +static char gr_exec_arg_buf[132];
57794 +static DEFINE_MUTEX(gr_exec_arg_mutex);
57795 +#endif
57796 +
57797 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
57798 +
57799 +void
57800 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
57801 +{
57802 +#ifdef CONFIG_GRKERNSEC_EXECLOG
57803 + char *grarg = gr_exec_arg_buf;
57804 + unsigned int i, x, execlen = 0;
57805 + char c;
57806 +
57807 + if (!((grsec_enable_execlog && grsec_enable_group &&
57808 + in_group_p(grsec_audit_gid))
57809 + || (grsec_enable_execlog && !grsec_enable_group)))
57810 + return;
57811 +
57812 + mutex_lock(&gr_exec_arg_mutex);
57813 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
57814 +
57815 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
57816 + const char __user *p;
57817 + unsigned int len;
57818 +
57819 + p = get_user_arg_ptr(argv, i);
57820 + if (IS_ERR(p))
57821 + goto log;
57822 +
57823 + len = strnlen_user(p, 128 - execlen);
57824 + if (len > 128 - execlen)
57825 + len = 128 - execlen;
57826 + else if (len > 0)
57827 + len--;
57828 + if (copy_from_user(grarg + execlen, p, len))
57829 + goto log;
57830 +
57831 + /* rewrite unprintable characters */
57832 + for (x = 0; x < len; x++) {
57833 + c = *(grarg + execlen + x);
57834 + if (c < 32 || c > 126)
57835 + *(grarg + execlen + x) = ' ';
57836 + }
57837 +
57838 + execlen += len;
57839 + *(grarg + execlen) = ' ';
57840 + *(grarg + execlen + 1) = '\0';
57841 + execlen++;
57842 + }
57843 +
57844 + log:
57845 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
57846 + bprm->file->f_path.mnt, grarg);
57847 + mutex_unlock(&gr_exec_arg_mutex);
57848 +#endif
57849 + return;
57850 +}
57851 +
57852 +#ifdef CONFIG_GRKERNSEC
57853 +extern int gr_acl_is_capable(const int cap);
57854 +extern int gr_acl_is_capable_nolog(const int cap);
57855 +extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
57856 +extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
57857 +extern int gr_chroot_is_capable(const int cap);
57858 +extern int gr_chroot_is_capable_nolog(const int cap);
57859 +extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
57860 +extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
57861 +#endif
57862 +
57863 +const char *captab_log[] = {
57864 + "CAP_CHOWN",
57865 + "CAP_DAC_OVERRIDE",
57866 + "CAP_DAC_READ_SEARCH",
57867 + "CAP_FOWNER",
57868 + "CAP_FSETID",
57869 + "CAP_KILL",
57870 + "CAP_SETGID",
57871 + "CAP_SETUID",
57872 + "CAP_SETPCAP",
57873 + "CAP_LINUX_IMMUTABLE",
57874 + "CAP_NET_BIND_SERVICE",
57875 + "CAP_NET_BROADCAST",
57876 + "CAP_NET_ADMIN",
57877 + "CAP_NET_RAW",
57878 + "CAP_IPC_LOCK",
57879 + "CAP_IPC_OWNER",
57880 + "CAP_SYS_MODULE",
57881 + "CAP_SYS_RAWIO",
57882 + "CAP_SYS_CHROOT",
57883 + "CAP_SYS_PTRACE",
57884 + "CAP_SYS_PACCT",
57885 + "CAP_SYS_ADMIN",
57886 + "CAP_SYS_BOOT",
57887 + "CAP_SYS_NICE",
57888 + "CAP_SYS_RESOURCE",
57889 + "CAP_SYS_TIME",
57890 + "CAP_SYS_TTY_CONFIG",
57891 + "CAP_MKNOD",
57892 + "CAP_LEASE",
57893 + "CAP_AUDIT_WRITE",
57894 + "CAP_AUDIT_CONTROL",
57895 + "CAP_SETFCAP",
57896 + "CAP_MAC_OVERRIDE",
57897 + "CAP_MAC_ADMIN",
57898 + "CAP_SYSLOG",
57899 + "CAP_WAKE_ALARM"
57900 +};
57901 +
57902 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
57903 +
57904 +int gr_is_capable(const int cap)
57905 +{
57906 +#ifdef CONFIG_GRKERNSEC
57907 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
57908 + return 1;
57909 + return 0;
57910 +#else
57911 + return 1;
57912 +#endif
57913 +}
57914 +
57915 +int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
57916 +{
57917 +#ifdef CONFIG_GRKERNSEC
57918 + if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
57919 + return 1;
57920 + return 0;
57921 +#else
57922 + return 1;
57923 +#endif
57924 +}
57925 +
57926 +int gr_is_capable_nolog(const int cap)
57927 +{
57928 +#ifdef CONFIG_GRKERNSEC
57929 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
57930 + return 1;
57931 + return 0;
57932 +#else
57933 + return 1;
57934 +#endif
57935 +}
57936 +
57937 +int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
57938 +{
57939 +#ifdef CONFIG_GRKERNSEC
57940 + if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
57941 + return 1;
57942 + return 0;
57943 +#else
57944 + return 1;
57945 +#endif
57946 +}
57947 +
57948 +EXPORT_SYMBOL(gr_is_capable);
57949 +EXPORT_SYMBOL(gr_is_capable_nolog);
57950 +EXPORT_SYMBOL(gr_task_is_capable);
57951 +EXPORT_SYMBOL(gr_task_is_capable_nolog);
57952 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
57953 new file mode 100644
57954 index 0000000..d3ee748
57955 --- /dev/null
57956 +++ b/grsecurity/grsec_fifo.c
57957 @@ -0,0 +1,24 @@
57958 +#include <linux/kernel.h>
57959 +#include <linux/sched.h>
57960 +#include <linux/fs.h>
57961 +#include <linux/file.h>
57962 +#include <linux/grinternal.h>
57963 +
57964 +int
57965 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
57966 + const struct dentry *dir, const int flag, const int acc_mode)
57967 +{
57968 +#ifdef CONFIG_GRKERNSEC_FIFO
57969 + const struct cred *cred = current_cred();
57970 +
57971 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
57972 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
57973 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
57974 + (cred->fsuid != dentry->d_inode->i_uid)) {
57975 + if (!inode_permission(dentry->d_inode, acc_mode))
57976 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
57977 + return -EACCES;
57978 + }
57979 +#endif
57980 + return 0;
57981 +}
57982 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
57983 new file mode 100644
57984 index 0000000..8ca18bf
57985 --- /dev/null
57986 +++ b/grsecurity/grsec_fork.c
57987 @@ -0,0 +1,23 @@
57988 +#include <linux/kernel.h>
57989 +#include <linux/sched.h>
57990 +#include <linux/grsecurity.h>
57991 +#include <linux/grinternal.h>
57992 +#include <linux/errno.h>
57993 +
57994 +void
57995 +gr_log_forkfail(const int retval)
57996 +{
57997 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
57998 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
57999 + switch (retval) {
58000 + case -EAGAIN:
58001 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
58002 + break;
58003 + case -ENOMEM:
58004 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
58005 + break;
58006 + }
58007 + }
58008 +#endif
58009 + return;
58010 +}
58011 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
58012 new file mode 100644
58013 index 0000000..01ddde4
58014 --- /dev/null
58015 +++ b/grsecurity/grsec_init.c
58016 @@ -0,0 +1,277 @@
58017 +#include <linux/kernel.h>
58018 +#include <linux/sched.h>
58019 +#include <linux/mm.h>
58020 +#include <linux/gracl.h>
58021 +#include <linux/slab.h>
58022 +#include <linux/vmalloc.h>
58023 +#include <linux/percpu.h>
58024 +#include <linux/module.h>
58025 +
58026 +int grsec_enable_ptrace_readexec;
58027 +int grsec_enable_setxid;
58028 +int grsec_enable_brute;
58029 +int grsec_enable_link;
58030 +int grsec_enable_dmesg;
58031 +int grsec_enable_harden_ptrace;
58032 +int grsec_enable_fifo;
58033 +int grsec_enable_execlog;
58034 +int grsec_enable_signal;
58035 +int grsec_enable_forkfail;
58036 +int grsec_enable_audit_ptrace;
58037 +int grsec_enable_time;
58038 +int grsec_enable_audit_textrel;
58039 +int grsec_enable_group;
58040 +int grsec_audit_gid;
58041 +int grsec_enable_chdir;
58042 +int grsec_enable_mount;
58043 +int grsec_enable_rofs;
58044 +int grsec_enable_chroot_findtask;
58045 +int grsec_enable_chroot_mount;
58046 +int grsec_enable_chroot_shmat;
58047 +int grsec_enable_chroot_fchdir;
58048 +int grsec_enable_chroot_double;
58049 +int grsec_enable_chroot_pivot;
58050 +int grsec_enable_chroot_chdir;
58051 +int grsec_enable_chroot_chmod;
58052 +int grsec_enable_chroot_mknod;
58053 +int grsec_enable_chroot_nice;
58054 +int grsec_enable_chroot_execlog;
58055 +int grsec_enable_chroot_caps;
58056 +int grsec_enable_chroot_sysctl;
58057 +int grsec_enable_chroot_unix;
58058 +int grsec_enable_tpe;
58059 +int grsec_tpe_gid;
58060 +int grsec_enable_blackhole;
58061 +#ifdef CONFIG_IPV6_MODULE
58062 +EXPORT_SYMBOL(grsec_enable_blackhole);
58063 +#endif
58064 +int grsec_lastack_retries;
58065 +int grsec_enable_tpe_all;
58066 +int grsec_enable_tpe_invert;
58067 +int grsec_enable_socket_all;
58068 +int grsec_socket_all_gid;
58069 +int grsec_enable_socket_client;
58070 +int grsec_socket_client_gid;
58071 +int grsec_enable_socket_server;
58072 +int grsec_socket_server_gid;
58073 +int grsec_resource_logging;
58074 +int grsec_disable_privio;
58075 +int grsec_enable_log_rwxmaps;
58076 +int grsec_lock;
58077 +
58078 +DEFINE_SPINLOCK(grsec_alert_lock);
58079 +unsigned long grsec_alert_wtime = 0;
58080 +unsigned long grsec_alert_fyet = 0;
58081 +
58082 +DEFINE_SPINLOCK(grsec_audit_lock);
58083 +
58084 +DEFINE_RWLOCK(grsec_exec_file_lock);
58085 +
58086 +char *gr_shared_page[4];
58087 +
58088 +char *gr_alert_log_fmt;
58089 +char *gr_audit_log_fmt;
58090 +char *gr_alert_log_buf;
58091 +char *gr_audit_log_buf;
58092 +
58093 +extern struct gr_arg *gr_usermode;
58094 +extern unsigned char *gr_system_salt;
58095 +extern unsigned char *gr_system_sum;
58096 +
58097 +void __init
58098 +grsecurity_init(void)
58099 +{
58100 + int j;
58101 + /* create the per-cpu shared pages */
58102 +
58103 +#ifdef CONFIG_X86
58104 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
58105 +#endif
58106 +
58107 + for (j = 0; j < 4; j++) {
58108 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
58109 + if (gr_shared_page[j] == NULL) {
58110 + panic("Unable to allocate grsecurity shared page");
58111 + return;
58112 + }
58113 + }
58114 +
58115 + /* allocate log buffers */
58116 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
58117 + if (!gr_alert_log_fmt) {
58118 + panic("Unable to allocate grsecurity alert log format buffer");
58119 + return;
58120 + }
58121 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
58122 + if (!gr_audit_log_fmt) {
58123 + panic("Unable to allocate grsecurity audit log format buffer");
58124 + return;
58125 + }
58126 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
58127 + if (!gr_alert_log_buf) {
58128 + panic("Unable to allocate grsecurity alert log buffer");
58129 + return;
58130 + }
58131 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
58132 + if (!gr_audit_log_buf) {
58133 + panic("Unable to allocate grsecurity audit log buffer");
58134 + return;
58135 + }
58136 +
58137 + /* allocate memory for authentication structure */
58138 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
58139 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
58140 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
58141 +
58142 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
58143 + panic("Unable to allocate grsecurity authentication structure");
58144 + return;
58145 + }
58146 +
58147 +
58148 +#ifdef CONFIG_GRKERNSEC_IO
58149 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
58150 + grsec_disable_privio = 1;
58151 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
58152 + grsec_disable_privio = 1;
58153 +#else
58154 + grsec_disable_privio = 0;
58155 +#endif
58156 +#endif
58157 +
58158 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
58159 + /* for backward compatibility, tpe_invert always defaults to on if
58160 + enabled in the kernel
58161 + */
58162 + grsec_enable_tpe_invert = 1;
58163 +#endif
58164 +
58165 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
58166 +#ifndef CONFIG_GRKERNSEC_SYSCTL
58167 + grsec_lock = 1;
58168 +#endif
58169 +
58170 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
58171 + grsec_enable_audit_textrel = 1;
58172 +#endif
58173 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58174 + grsec_enable_log_rwxmaps = 1;
58175 +#endif
58176 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
58177 + grsec_enable_group = 1;
58178 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
58179 +#endif
58180 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
58181 + grsec_enable_ptrace_readexec = 1;
58182 +#endif
58183 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
58184 + grsec_enable_chdir = 1;
58185 +#endif
58186 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
58187 + grsec_enable_harden_ptrace = 1;
58188 +#endif
58189 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58190 + grsec_enable_mount = 1;
58191 +#endif
58192 +#ifdef CONFIG_GRKERNSEC_LINK
58193 + grsec_enable_link = 1;
58194 +#endif
58195 +#ifdef CONFIG_GRKERNSEC_BRUTE
58196 + grsec_enable_brute = 1;
58197 +#endif
58198 +#ifdef CONFIG_GRKERNSEC_DMESG
58199 + grsec_enable_dmesg = 1;
58200 +#endif
58201 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
58202 + grsec_enable_blackhole = 1;
58203 + grsec_lastack_retries = 4;
58204 +#endif
58205 +#ifdef CONFIG_GRKERNSEC_FIFO
58206 + grsec_enable_fifo = 1;
58207 +#endif
58208 +#ifdef CONFIG_GRKERNSEC_EXECLOG
58209 + grsec_enable_execlog = 1;
58210 +#endif
58211 +#ifdef CONFIG_GRKERNSEC_SETXID
58212 + grsec_enable_setxid = 1;
58213 +#endif
58214 +#ifdef CONFIG_GRKERNSEC_SIGNAL
58215 + grsec_enable_signal = 1;
58216 +#endif
58217 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
58218 + grsec_enable_forkfail = 1;
58219 +#endif
58220 +#ifdef CONFIG_GRKERNSEC_TIME
58221 + grsec_enable_time = 1;
58222 +#endif
58223 +#ifdef CONFIG_GRKERNSEC_RESLOG
58224 + grsec_resource_logging = 1;
58225 +#endif
58226 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
58227 + grsec_enable_chroot_findtask = 1;
58228 +#endif
58229 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
58230 + grsec_enable_chroot_unix = 1;
58231 +#endif
58232 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
58233 + grsec_enable_chroot_mount = 1;
58234 +#endif
58235 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
58236 + grsec_enable_chroot_fchdir = 1;
58237 +#endif
58238 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
58239 + grsec_enable_chroot_shmat = 1;
58240 +#endif
58241 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
58242 + grsec_enable_audit_ptrace = 1;
58243 +#endif
58244 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
58245 + grsec_enable_chroot_double = 1;
58246 +#endif
58247 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
58248 + grsec_enable_chroot_pivot = 1;
58249 +#endif
58250 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
58251 + grsec_enable_chroot_chdir = 1;
58252 +#endif
58253 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
58254 + grsec_enable_chroot_chmod = 1;
58255 +#endif
58256 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
58257 + grsec_enable_chroot_mknod = 1;
58258 +#endif
58259 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
58260 + grsec_enable_chroot_nice = 1;
58261 +#endif
58262 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
58263 + grsec_enable_chroot_execlog = 1;
58264 +#endif
58265 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
58266 + grsec_enable_chroot_caps = 1;
58267 +#endif
58268 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
58269 + grsec_enable_chroot_sysctl = 1;
58270 +#endif
58271 +#ifdef CONFIG_GRKERNSEC_TPE
58272 + grsec_enable_tpe = 1;
58273 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
58274 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
58275 + grsec_enable_tpe_all = 1;
58276 +#endif
58277 +#endif
58278 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
58279 + grsec_enable_socket_all = 1;
58280 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
58281 +#endif
58282 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
58283 + grsec_enable_socket_client = 1;
58284 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
58285 +#endif
58286 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58287 + grsec_enable_socket_server = 1;
58288 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
58289 +#endif
58290 +#endif
58291 +
58292 + return;
58293 +}
58294 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
58295 new file mode 100644
58296 index 0000000..3efe141
58297 --- /dev/null
58298 +++ b/grsecurity/grsec_link.c
58299 @@ -0,0 +1,43 @@
58300 +#include <linux/kernel.h>
58301 +#include <linux/sched.h>
58302 +#include <linux/fs.h>
58303 +#include <linux/file.h>
58304 +#include <linux/grinternal.h>
58305 +
58306 +int
58307 +gr_handle_follow_link(const struct inode *parent,
58308 + const struct inode *inode,
58309 + const struct dentry *dentry, const struct vfsmount *mnt)
58310 +{
58311 +#ifdef CONFIG_GRKERNSEC_LINK
58312 + const struct cred *cred = current_cred();
58313 +
58314 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
58315 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
58316 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
58317 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
58318 + return -EACCES;
58319 + }
58320 +#endif
58321 + return 0;
58322 +}
58323 +
58324 +int
58325 +gr_handle_hardlink(const struct dentry *dentry,
58326 + const struct vfsmount *mnt,
58327 + struct inode *inode, const int mode, const char *to)
58328 +{
58329 +#ifdef CONFIG_GRKERNSEC_LINK
58330 + const struct cred *cred = current_cred();
58331 +
58332 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
58333 + (!S_ISREG(mode) || (mode & S_ISUID) ||
58334 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
58335 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
58336 + !capable(CAP_FOWNER) && cred->uid) {
58337 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
58338 + return -EPERM;
58339 + }
58340 +#endif
58341 + return 0;
58342 +}
58343 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
58344 new file mode 100644
58345 index 0000000..a45d2e9
58346 --- /dev/null
58347 +++ b/grsecurity/grsec_log.c
58348 @@ -0,0 +1,322 @@
58349 +#include <linux/kernel.h>
58350 +#include <linux/sched.h>
58351 +#include <linux/file.h>
58352 +#include <linux/tty.h>
58353 +#include <linux/fs.h>
58354 +#include <linux/grinternal.h>
58355 +
58356 +#ifdef CONFIG_TREE_PREEMPT_RCU
58357 +#define DISABLE_PREEMPT() preempt_disable()
58358 +#define ENABLE_PREEMPT() preempt_enable()
58359 +#else
58360 +#define DISABLE_PREEMPT()
58361 +#define ENABLE_PREEMPT()
58362 +#endif
58363 +
58364 +#define BEGIN_LOCKS(x) \
58365 + DISABLE_PREEMPT(); \
58366 + rcu_read_lock(); \
58367 + read_lock(&tasklist_lock); \
58368 + read_lock(&grsec_exec_file_lock); \
58369 + if (x != GR_DO_AUDIT) \
58370 + spin_lock(&grsec_alert_lock); \
58371 + else \
58372 + spin_lock(&grsec_audit_lock)
58373 +
58374 +#define END_LOCKS(x) \
58375 + if (x != GR_DO_AUDIT) \
58376 + spin_unlock(&grsec_alert_lock); \
58377 + else \
58378 + spin_unlock(&grsec_audit_lock); \
58379 + read_unlock(&grsec_exec_file_lock); \
58380 + read_unlock(&tasklist_lock); \
58381 + rcu_read_unlock(); \
58382 + ENABLE_PREEMPT(); \
58383 + if (x == GR_DONT_AUDIT) \
58384 + gr_handle_alertkill(current)
58385 +
58386 +enum {
58387 + FLOODING,
58388 + NO_FLOODING
58389 +};
58390 +
58391 +extern char *gr_alert_log_fmt;
58392 +extern char *gr_audit_log_fmt;
58393 +extern char *gr_alert_log_buf;
58394 +extern char *gr_audit_log_buf;
58395 +
58396 +static int gr_log_start(int audit)
58397 +{
58398 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
58399 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
58400 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58401 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
58402 + unsigned long curr_secs = get_seconds();
58403 +
58404 + if (audit == GR_DO_AUDIT)
58405 + goto set_fmt;
58406 +
58407 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
58408 + grsec_alert_wtime = curr_secs;
58409 + grsec_alert_fyet = 0;
58410 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
58411 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
58412 + grsec_alert_fyet++;
58413 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
58414 + grsec_alert_wtime = curr_secs;
58415 + grsec_alert_fyet++;
58416 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
58417 + return FLOODING;
58418 + }
58419 + else return FLOODING;
58420 +
58421 +set_fmt:
58422 +#endif
58423 + memset(buf, 0, PAGE_SIZE);
58424 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
58425 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
58426 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
58427 + } else if (current->signal->curr_ip) {
58428 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
58429 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
58430 + } else if (gr_acl_is_enabled()) {
58431 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
58432 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
58433 + } else {
58434 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
58435 + strcpy(buf, fmt);
58436 + }
58437 +
58438 + return NO_FLOODING;
58439 +}
58440 +
58441 +static void gr_log_middle(int audit, const char *msg, va_list ap)
58442 + __attribute__ ((format (printf, 2, 0)));
58443 +
58444 +static void gr_log_middle(int audit, const char *msg, va_list ap)
58445 +{
58446 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58447 + unsigned int len = strlen(buf);
58448 +
58449 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
58450 +
58451 + return;
58452 +}
58453 +
58454 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
58455 + __attribute__ ((format (printf, 2, 3)));
58456 +
58457 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
58458 +{
58459 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58460 + unsigned int len = strlen(buf);
58461 + va_list ap;
58462 +
58463 + va_start(ap, msg);
58464 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
58465 + va_end(ap);
58466 +
58467 + return;
58468 +}
58469 +
58470 +static void gr_log_end(int audit, int append_default)
58471 +{
58472 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58473 +
58474 + if (append_default) {
58475 + unsigned int len = strlen(buf);
58476 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
58477 + }
58478 +
58479 + printk("%s\n", buf);
58480 +
58481 + return;
58482 +}
58483 +
58484 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
58485 +{
58486 + int logtype;
58487 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
58488 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
58489 + void *voidptr = NULL;
58490 + int num1 = 0, num2 = 0;
58491 + unsigned long ulong1 = 0, ulong2 = 0;
58492 + struct dentry *dentry = NULL;
58493 + struct vfsmount *mnt = NULL;
58494 + struct file *file = NULL;
58495 + struct task_struct *task = NULL;
58496 + const struct cred *cred, *pcred;
58497 + va_list ap;
58498 +
58499 + BEGIN_LOCKS(audit);
58500 + logtype = gr_log_start(audit);
58501 + if (logtype == FLOODING) {
58502 + END_LOCKS(audit);
58503 + return;
58504 + }
58505 + va_start(ap, argtypes);
58506 + switch (argtypes) {
58507 + case GR_TTYSNIFF:
58508 + task = va_arg(ap, struct task_struct *);
58509 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
58510 + break;
58511 + case GR_SYSCTL_HIDDEN:
58512 + str1 = va_arg(ap, char *);
58513 + gr_log_middle_varargs(audit, msg, result, str1);
58514 + break;
58515 + case GR_RBAC:
58516 + dentry = va_arg(ap, struct dentry *);
58517 + mnt = va_arg(ap, struct vfsmount *);
58518 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
58519 + break;
58520 + case GR_RBAC_STR:
58521 + dentry = va_arg(ap, struct dentry *);
58522 + mnt = va_arg(ap, struct vfsmount *);
58523 + str1 = va_arg(ap, char *);
58524 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
58525 + break;
58526 + case GR_STR_RBAC:
58527 + str1 = va_arg(ap, char *);
58528 + dentry = va_arg(ap, struct dentry *);
58529 + mnt = va_arg(ap, struct vfsmount *);
58530 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
58531 + break;
58532 + case GR_RBAC_MODE2:
58533 + dentry = va_arg(ap, struct dentry *);
58534 + mnt = va_arg(ap, struct vfsmount *);
58535 + str1 = va_arg(ap, char *);
58536 + str2 = va_arg(ap, char *);
58537 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
58538 + break;
58539 + case GR_RBAC_MODE3:
58540 + dentry = va_arg(ap, struct dentry *);
58541 + mnt = va_arg(ap, struct vfsmount *);
58542 + str1 = va_arg(ap, char *);
58543 + str2 = va_arg(ap, char *);
58544 + str3 = va_arg(ap, char *);
58545 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
58546 + break;
58547 + case GR_FILENAME:
58548 + dentry = va_arg(ap, struct dentry *);
58549 + mnt = va_arg(ap, struct vfsmount *);
58550 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
58551 + break;
58552 + case GR_STR_FILENAME:
58553 + str1 = va_arg(ap, char *);
58554 + dentry = va_arg(ap, struct dentry *);
58555 + mnt = va_arg(ap, struct vfsmount *);
58556 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
58557 + break;
58558 + case GR_FILENAME_STR:
58559 + dentry = va_arg(ap, struct dentry *);
58560 + mnt = va_arg(ap, struct vfsmount *);
58561 + str1 = va_arg(ap, char *);
58562 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
58563 + break;
58564 + case GR_FILENAME_TWO_INT:
58565 + dentry = va_arg(ap, struct dentry *);
58566 + mnt = va_arg(ap, struct vfsmount *);
58567 + num1 = va_arg(ap, int);
58568 + num2 = va_arg(ap, int);
58569 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
58570 + break;
58571 + case GR_FILENAME_TWO_INT_STR:
58572 + dentry = va_arg(ap, struct dentry *);
58573 + mnt = va_arg(ap, struct vfsmount *);
58574 + num1 = va_arg(ap, int);
58575 + num2 = va_arg(ap, int);
58576 + str1 = va_arg(ap, char *);
58577 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
58578 + break;
58579 + case GR_TEXTREL:
58580 + file = va_arg(ap, struct file *);
58581 + ulong1 = va_arg(ap, unsigned long);
58582 + ulong2 = va_arg(ap, unsigned long);
58583 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
58584 + break;
58585 + case GR_PTRACE:
58586 + task = va_arg(ap, struct task_struct *);
58587 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
58588 + break;
58589 + case GR_RESOURCE:
58590 + task = va_arg(ap, struct task_struct *);
58591 + cred = __task_cred(task);
58592 + pcred = __task_cred(task->real_parent);
58593 + ulong1 = va_arg(ap, unsigned long);
58594 + str1 = va_arg(ap, char *);
58595 + ulong2 = va_arg(ap, unsigned long);
58596 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58597 + break;
58598 + case GR_CAP:
58599 + task = va_arg(ap, struct task_struct *);
58600 + cred = __task_cred(task);
58601 + pcred = __task_cred(task->real_parent);
58602 + str1 = va_arg(ap, char *);
58603 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58604 + break;
58605 + case GR_SIG:
58606 + str1 = va_arg(ap, char *);
58607 + voidptr = va_arg(ap, void *);
58608 + gr_log_middle_varargs(audit, msg, str1, voidptr);
58609 + break;
58610 + case GR_SIG2:
58611 + task = va_arg(ap, struct task_struct *);
58612 + cred = __task_cred(task);
58613 + pcred = __task_cred(task->real_parent);
58614 + num1 = va_arg(ap, int);
58615 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58616 + break;
58617 + case GR_CRASH1:
58618 + task = va_arg(ap, struct task_struct *);
58619 + cred = __task_cred(task);
58620 + pcred = __task_cred(task->real_parent);
58621 + ulong1 = va_arg(ap, unsigned long);
58622 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
58623 + break;
58624 + case GR_CRASH2:
58625 + task = va_arg(ap, struct task_struct *);
58626 + cred = __task_cred(task);
58627 + pcred = __task_cred(task->real_parent);
58628 + ulong1 = va_arg(ap, unsigned long);
58629 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
58630 + break;
58631 + case GR_RWXMAP:
58632 + file = va_arg(ap, struct file *);
58633 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
58634 + break;
58635 + case GR_PSACCT:
58636 + {
58637 + unsigned int wday, cday;
58638 + __u8 whr, chr;
58639 + __u8 wmin, cmin;
58640 + __u8 wsec, csec;
58641 + char cur_tty[64] = { 0 };
58642 + char parent_tty[64] = { 0 };
58643 +
58644 + task = va_arg(ap, struct task_struct *);
58645 + wday = va_arg(ap, unsigned int);
58646 + cday = va_arg(ap, unsigned int);
58647 + whr = va_arg(ap, int);
58648 + chr = va_arg(ap, int);
58649 + wmin = va_arg(ap, int);
58650 + cmin = va_arg(ap, int);
58651 + wsec = va_arg(ap, int);
58652 + csec = va_arg(ap, int);
58653 + ulong1 = va_arg(ap, unsigned long);
58654 + cred = __task_cred(task);
58655 + pcred = __task_cred(task->real_parent);
58656 +
58657 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58658 + }
58659 + break;
58660 + default:
58661 + gr_log_middle(audit, msg, ap);
58662 + }
58663 + va_end(ap);
58664 + // these don't need DEFAULTSECARGS printed on the end
58665 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
58666 + gr_log_end(audit, 0);
58667 + else
58668 + gr_log_end(audit, 1);
58669 + END_LOCKS(audit);
58670 +}
58671 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
58672 new file mode 100644
58673 index 0000000..f536303
58674 --- /dev/null
58675 +++ b/grsecurity/grsec_mem.c
58676 @@ -0,0 +1,40 @@
58677 +#include <linux/kernel.h>
58678 +#include <linux/sched.h>
58679 +#include <linux/mm.h>
58680 +#include <linux/mman.h>
58681 +#include <linux/grinternal.h>
58682 +
58683 +void
58684 +gr_handle_ioperm(void)
58685 +{
58686 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
58687 + return;
58688 +}
58689 +
58690 +void
58691 +gr_handle_iopl(void)
58692 +{
58693 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
58694 + return;
58695 +}
58696 +
58697 +void
58698 +gr_handle_mem_readwrite(u64 from, u64 to)
58699 +{
58700 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
58701 + return;
58702 +}
58703 +
58704 +void
58705 +gr_handle_vm86(void)
58706 +{
58707 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
58708 + return;
58709 +}
58710 +
58711 +void
58712 +gr_log_badprocpid(const char *entry)
58713 +{
58714 + gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
58715 + return;
58716 +}
58717 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
58718 new file mode 100644
58719 index 0000000..2131422
58720 --- /dev/null
58721 +++ b/grsecurity/grsec_mount.c
58722 @@ -0,0 +1,62 @@
58723 +#include <linux/kernel.h>
58724 +#include <linux/sched.h>
58725 +#include <linux/mount.h>
58726 +#include <linux/grsecurity.h>
58727 +#include <linux/grinternal.h>
58728 +
58729 +void
58730 +gr_log_remount(const char *devname, const int retval)
58731 +{
58732 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58733 + if (grsec_enable_mount && (retval >= 0))
58734 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
58735 +#endif
58736 + return;
58737 +}
58738 +
58739 +void
58740 +gr_log_unmount(const char *devname, const int retval)
58741 +{
58742 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58743 + if (grsec_enable_mount && (retval >= 0))
58744 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
58745 +#endif
58746 + return;
58747 +}
58748 +
58749 +void
58750 +gr_log_mount(const char *from, const char *to, const int retval)
58751 +{
58752 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58753 + if (grsec_enable_mount && (retval >= 0))
58754 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
58755 +#endif
58756 + return;
58757 +}
58758 +
58759 +int
58760 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
58761 +{
58762 +#ifdef CONFIG_GRKERNSEC_ROFS
58763 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
58764 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
58765 + return -EPERM;
58766 + } else
58767 + return 0;
58768 +#endif
58769 + return 0;
58770 +}
58771 +
58772 +int
58773 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
58774 +{
58775 +#ifdef CONFIG_GRKERNSEC_ROFS
58776 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
58777 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
58778 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
58779 + return -EPERM;
58780 + } else
58781 + return 0;
58782 +#endif
58783 + return 0;
58784 +}
58785 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
58786 new file mode 100644
58787 index 0000000..a3b12a0
58788 --- /dev/null
58789 +++ b/grsecurity/grsec_pax.c
58790 @@ -0,0 +1,36 @@
58791 +#include <linux/kernel.h>
58792 +#include <linux/sched.h>
58793 +#include <linux/mm.h>
58794 +#include <linux/file.h>
58795 +#include <linux/grinternal.h>
58796 +#include <linux/grsecurity.h>
58797 +
58798 +void
58799 +gr_log_textrel(struct vm_area_struct * vma)
58800 +{
58801 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
58802 + if (grsec_enable_audit_textrel)
58803 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
58804 +#endif
58805 + return;
58806 +}
58807 +
58808 +void
58809 +gr_log_rwxmmap(struct file *file)
58810 +{
58811 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58812 + if (grsec_enable_log_rwxmaps)
58813 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
58814 +#endif
58815 + return;
58816 +}
58817 +
58818 +void
58819 +gr_log_rwxmprotect(struct file *file)
58820 +{
58821 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58822 + if (grsec_enable_log_rwxmaps)
58823 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
58824 +#endif
58825 + return;
58826 +}
58827 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
58828 new file mode 100644
58829 index 0000000..f7f29aa
58830 --- /dev/null
58831 +++ b/grsecurity/grsec_ptrace.c
58832 @@ -0,0 +1,30 @@
58833 +#include <linux/kernel.h>
58834 +#include <linux/sched.h>
58835 +#include <linux/grinternal.h>
58836 +#include <linux/security.h>
58837 +
58838 +void
58839 +gr_audit_ptrace(struct task_struct *task)
58840 +{
58841 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
58842 + if (grsec_enable_audit_ptrace)
58843 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
58844 +#endif
58845 + return;
58846 +}
58847 +
58848 +int
58849 +gr_ptrace_readexec(struct file *file, int unsafe_flags)
58850 +{
58851 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
58852 + const struct dentry *dentry = file->f_path.dentry;
58853 + const struct vfsmount *mnt = file->f_path.mnt;
58854 +
58855 + if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
58856 + (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
58857 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
58858 + return -EACCES;
58859 + }
58860 +#endif
58861 + return 0;
58862 +}
58863 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
58864 new file mode 100644
58865 index 0000000..7a5b2de
58866 --- /dev/null
58867 +++ b/grsecurity/grsec_sig.c
58868 @@ -0,0 +1,207 @@
58869 +#include <linux/kernel.h>
58870 +#include <linux/sched.h>
58871 +#include <linux/delay.h>
58872 +#include <linux/grsecurity.h>
58873 +#include <linux/grinternal.h>
58874 +#include <linux/hardirq.h>
58875 +
58876 +char *signames[] = {
58877 + [SIGSEGV] = "Segmentation fault",
58878 + [SIGILL] = "Illegal instruction",
58879 + [SIGABRT] = "Abort",
58880 + [SIGBUS] = "Invalid alignment/Bus error"
58881 +};
58882 +
58883 +void
58884 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
58885 +{
58886 +#ifdef CONFIG_GRKERNSEC_SIGNAL
58887 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
58888 + (sig == SIGABRT) || (sig == SIGBUS))) {
58889 + if (t->pid == current->pid) {
58890 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
58891 + } else {
58892 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
58893 + }
58894 + }
58895 +#endif
58896 + return;
58897 +}
58898 +
58899 +int
58900 +gr_handle_signal(const struct task_struct *p, const int sig)
58901 +{
58902 +#ifdef CONFIG_GRKERNSEC
58903 + /* ignore the 0 signal for protected task checks */
58904 + if (current->pid > 1 && sig && gr_check_protected_task(p)) {
58905 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
58906 + return -EPERM;
58907 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
58908 + return -EPERM;
58909 + }
58910 +#endif
58911 + return 0;
58912 +}
58913 +
58914 +#ifdef CONFIG_GRKERNSEC
58915 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
58916 +
58917 +int gr_fake_force_sig(int sig, struct task_struct *t)
58918 +{
58919 + unsigned long int flags;
58920 + int ret, blocked, ignored;
58921 + struct k_sigaction *action;
58922 +
58923 + spin_lock_irqsave(&t->sighand->siglock, flags);
58924 + action = &t->sighand->action[sig-1];
58925 + ignored = action->sa.sa_handler == SIG_IGN;
58926 + blocked = sigismember(&t->blocked, sig);
58927 + if (blocked || ignored) {
58928 + action->sa.sa_handler = SIG_DFL;
58929 + if (blocked) {
58930 + sigdelset(&t->blocked, sig);
58931 + recalc_sigpending_and_wake(t);
58932 + }
58933 + }
58934 + if (action->sa.sa_handler == SIG_DFL)
58935 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
58936 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
58937 +
58938 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
58939 +
58940 + return ret;
58941 +}
58942 +#endif
58943 +
58944 +#ifdef CONFIG_GRKERNSEC_BRUTE
58945 +#define GR_USER_BAN_TIME (15 * 60)
58946 +
58947 +static int __get_dumpable(unsigned long mm_flags)
58948 +{
58949 + int ret;
58950 +
58951 + ret = mm_flags & MMF_DUMPABLE_MASK;
58952 + return (ret >= 2) ? 2 : ret;
58953 +}
58954 +#endif
58955 +
58956 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
58957 +{
58958 +#ifdef CONFIG_GRKERNSEC_BRUTE
58959 + uid_t uid = 0;
58960 +
58961 + if (!grsec_enable_brute)
58962 + return;
58963 +
58964 + rcu_read_lock();
58965 + read_lock(&tasklist_lock);
58966 + read_lock(&grsec_exec_file_lock);
58967 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
58968 + p->real_parent->brute = 1;
58969 + else {
58970 + const struct cred *cred = __task_cred(p), *cred2;
58971 + struct task_struct *tsk, *tsk2;
58972 +
58973 + if (!__get_dumpable(mm_flags) && cred->uid) {
58974 + struct user_struct *user;
58975 +
58976 + uid = cred->uid;
58977 +
58978 + /* this is put upon execution past expiration */
58979 + user = find_user(uid);
58980 + if (user == NULL)
58981 + goto unlock;
58982 + user->banned = 1;
58983 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
58984 + if (user->ban_expires == ~0UL)
58985 + user->ban_expires--;
58986 +
58987 + do_each_thread(tsk2, tsk) {
58988 + cred2 = __task_cred(tsk);
58989 + if (tsk != p && cred2->uid == uid)
58990 + gr_fake_force_sig(SIGKILL, tsk);
58991 + } while_each_thread(tsk2, tsk);
58992 + }
58993 + }
58994 +unlock:
58995 + read_unlock(&grsec_exec_file_lock);
58996 + read_unlock(&tasklist_lock);
58997 + rcu_read_unlock();
58998 +
58999 + if (uid)
59000 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
59001 +
59002 +#endif
59003 + return;
59004 +}
59005 +
59006 +void gr_handle_brute_check(void)
59007 +{
59008 +#ifdef CONFIG_GRKERNSEC_BRUTE
59009 + if (current->brute)
59010 + msleep(30 * 1000);
59011 +#endif
59012 + return;
59013 +}
59014 +
59015 +void gr_handle_kernel_exploit(void)
59016 +{
59017 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
59018 + const struct cred *cred;
59019 + struct task_struct *tsk, *tsk2;
59020 + struct user_struct *user;
59021 + uid_t uid;
59022 +
59023 + if (in_irq() || in_serving_softirq() || in_nmi())
59024 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
59025 +
59026 + uid = current_uid();
59027 +
59028 + if (uid == 0)
59029 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
59030 + else {
59031 + /* kill all the processes of this user, hold a reference
59032 + to their creds struct, and prevent them from creating
59033 + another process until system reset
59034 + */
59035 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
59036 + /* we intentionally leak this ref */
59037 + user = get_uid(current->cred->user);
59038 + if (user) {
59039 + user->banned = 1;
59040 + user->ban_expires = ~0UL;
59041 + }
59042 +
59043 + read_lock(&tasklist_lock);
59044 + do_each_thread(tsk2, tsk) {
59045 + cred = __task_cred(tsk);
59046 + if (cred->uid == uid)
59047 + gr_fake_force_sig(SIGKILL, tsk);
59048 + } while_each_thread(tsk2, tsk);
59049 + read_unlock(&tasklist_lock);
59050 + }
59051 +#endif
59052 +}
59053 +
59054 +int __gr_process_user_ban(struct user_struct *user)
59055 +{
59056 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
59057 + if (unlikely(user->banned)) {
59058 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
59059 + user->banned = 0;
59060 + user->ban_expires = 0;
59061 + free_uid(user);
59062 + } else
59063 + return -EPERM;
59064 + }
59065 +#endif
59066 + return 0;
59067 +}
59068 +
59069 +int gr_process_user_ban(void)
59070 +{
59071 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
59072 + return __gr_process_user_ban(current->cred->user);
59073 +#endif
59074 + return 0;
59075 +}
59076 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
59077 new file mode 100644
59078 index 0000000..4030d57
59079 --- /dev/null
59080 +++ b/grsecurity/grsec_sock.c
59081 @@ -0,0 +1,244 @@
59082 +#include <linux/kernel.h>
59083 +#include <linux/module.h>
59084 +#include <linux/sched.h>
59085 +#include <linux/file.h>
59086 +#include <linux/net.h>
59087 +#include <linux/in.h>
59088 +#include <linux/ip.h>
59089 +#include <net/sock.h>
59090 +#include <net/inet_sock.h>
59091 +#include <linux/grsecurity.h>
59092 +#include <linux/grinternal.h>
59093 +#include <linux/gracl.h>
59094 +
59095 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
59096 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
59097 +
59098 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
59099 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
59100 +
59101 +#ifdef CONFIG_UNIX_MODULE
59102 +EXPORT_SYMBOL(gr_acl_handle_unix);
59103 +EXPORT_SYMBOL(gr_acl_handle_mknod);
59104 +EXPORT_SYMBOL(gr_handle_chroot_unix);
59105 +EXPORT_SYMBOL(gr_handle_create);
59106 +#endif
59107 +
59108 +#ifdef CONFIG_GRKERNSEC
59109 +#define gr_conn_table_size 32749
59110 +struct conn_table_entry {
59111 + struct conn_table_entry *next;
59112 + struct signal_struct *sig;
59113 +};
59114 +
59115 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
59116 +DEFINE_SPINLOCK(gr_conn_table_lock);
59117 +
59118 +extern const char * gr_socktype_to_name(unsigned char type);
59119 +extern const char * gr_proto_to_name(unsigned char proto);
59120 +extern const char * gr_sockfamily_to_name(unsigned char family);
59121 +
59122 +static __inline__ int
59123 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
59124 +{
59125 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
59126 +}
59127 +
59128 +static __inline__ int
59129 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
59130 + __u16 sport, __u16 dport)
59131 +{
59132 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
59133 + sig->gr_sport == sport && sig->gr_dport == dport))
59134 + return 1;
59135 + else
59136 + return 0;
59137 +}
59138 +
59139 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
59140 +{
59141 + struct conn_table_entry **match;
59142 + unsigned int index;
59143 +
59144 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
59145 + sig->gr_sport, sig->gr_dport,
59146 + gr_conn_table_size);
59147 +
59148 + newent->sig = sig;
59149 +
59150 + match = &gr_conn_table[index];
59151 + newent->next = *match;
59152 + *match = newent;
59153 +
59154 + return;
59155 +}
59156 +
59157 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
59158 +{
59159 + struct conn_table_entry *match, *last = NULL;
59160 + unsigned int index;
59161 +
59162 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
59163 + sig->gr_sport, sig->gr_dport,
59164 + gr_conn_table_size);
59165 +
59166 + match = gr_conn_table[index];
59167 + while (match && !conn_match(match->sig,
59168 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
59169 + sig->gr_dport)) {
59170 + last = match;
59171 + match = match->next;
59172 + }
59173 +
59174 + if (match) {
59175 + if (last)
59176 + last->next = match->next;
59177 + else
59178 + gr_conn_table[index] = NULL;
59179 + kfree(match);
59180 + }
59181 +
59182 + return;
59183 +}
59184 +
59185 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
59186 + __u16 sport, __u16 dport)
59187 +{
59188 + struct conn_table_entry *match;
59189 + unsigned int index;
59190 +
59191 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
59192 +
59193 + match = gr_conn_table[index];
59194 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
59195 + match = match->next;
59196 +
59197 + if (match)
59198 + return match->sig;
59199 + else
59200 + return NULL;
59201 +}
59202 +
59203 +#endif
59204 +
59205 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
59206 +{
59207 +#ifdef CONFIG_GRKERNSEC
59208 + struct signal_struct *sig = task->signal;
59209 + struct conn_table_entry *newent;
59210 +
59211 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
59212 + if (newent == NULL)
59213 + return;
59214 + /* no bh lock needed since we are called with bh disabled */
59215 + spin_lock(&gr_conn_table_lock);
59216 + gr_del_task_from_ip_table_nolock(sig);
59217 + sig->gr_saddr = inet->inet_rcv_saddr;
59218 + sig->gr_daddr = inet->inet_daddr;
59219 + sig->gr_sport = inet->inet_sport;
59220 + sig->gr_dport = inet->inet_dport;
59221 + gr_add_to_task_ip_table_nolock(sig, newent);
59222 + spin_unlock(&gr_conn_table_lock);
59223 +#endif
59224 + return;
59225 +}
59226 +
59227 +void gr_del_task_from_ip_table(struct task_struct *task)
59228 +{
59229 +#ifdef CONFIG_GRKERNSEC
59230 + spin_lock_bh(&gr_conn_table_lock);
59231 + gr_del_task_from_ip_table_nolock(task->signal);
59232 + spin_unlock_bh(&gr_conn_table_lock);
59233 +#endif
59234 + return;
59235 +}
59236 +
59237 +void
59238 +gr_attach_curr_ip(const struct sock *sk)
59239 +{
59240 +#ifdef CONFIG_GRKERNSEC
59241 + struct signal_struct *p, *set;
59242 + const struct inet_sock *inet = inet_sk(sk);
59243 +
59244 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
59245 + return;
59246 +
59247 + set = current->signal;
59248 +
59249 + spin_lock_bh(&gr_conn_table_lock);
59250 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
59251 + inet->inet_dport, inet->inet_sport);
59252 + if (unlikely(p != NULL)) {
59253 + set->curr_ip = p->curr_ip;
59254 + set->used_accept = 1;
59255 + gr_del_task_from_ip_table_nolock(p);
59256 + spin_unlock_bh(&gr_conn_table_lock);
59257 + return;
59258 + }
59259 + spin_unlock_bh(&gr_conn_table_lock);
59260 +
59261 + set->curr_ip = inet->inet_daddr;
59262 + set->used_accept = 1;
59263 +#endif
59264 + return;
59265 +}
59266 +
59267 +int
59268 +gr_handle_sock_all(const int family, const int type, const int protocol)
59269 +{
59270 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
59271 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
59272 + (family != AF_UNIX)) {
59273 + if (family == AF_INET)
59274 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
59275 + else
59276 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
59277 + return -EACCES;
59278 + }
59279 +#endif
59280 + return 0;
59281 +}
59282 +
59283 +int
59284 +gr_handle_sock_server(const struct sockaddr *sck)
59285 +{
59286 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
59287 + if (grsec_enable_socket_server &&
59288 + in_group_p(grsec_socket_server_gid) &&
59289 + sck && (sck->sa_family != AF_UNIX) &&
59290 + (sck->sa_family != AF_LOCAL)) {
59291 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
59292 + return -EACCES;
59293 + }
59294 +#endif
59295 + return 0;
59296 +}
59297 +
59298 +int
59299 +gr_handle_sock_server_other(const struct sock *sck)
59300 +{
59301 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
59302 + if (grsec_enable_socket_server &&
59303 + in_group_p(grsec_socket_server_gid) &&
59304 + sck && (sck->sk_family != AF_UNIX) &&
59305 + (sck->sk_family != AF_LOCAL)) {
59306 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
59307 + return -EACCES;
59308 + }
59309 +#endif
59310 + return 0;
59311 +}
59312 +
59313 +int
59314 +gr_handle_sock_client(const struct sockaddr *sck)
59315 +{
59316 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
59317 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
59318 + sck && (sck->sa_family != AF_UNIX) &&
59319 + (sck->sa_family != AF_LOCAL)) {
59320 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
59321 + return -EACCES;
59322 + }
59323 +#endif
59324 + return 0;
59325 +}
59326 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
59327 new file mode 100644
59328 index 0000000..a1aedd7
59329 --- /dev/null
59330 +++ b/grsecurity/grsec_sysctl.c
59331 @@ -0,0 +1,451 @@
59332 +#include <linux/kernel.h>
59333 +#include <linux/sched.h>
59334 +#include <linux/sysctl.h>
59335 +#include <linux/grsecurity.h>
59336 +#include <linux/grinternal.h>
59337 +
59338 +int
59339 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
59340 +{
59341 +#ifdef CONFIG_GRKERNSEC_SYSCTL
59342 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
59343 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
59344 + return -EACCES;
59345 + }
59346 +#endif
59347 + return 0;
59348 +}
59349 +
59350 +#ifdef CONFIG_GRKERNSEC_ROFS
59351 +static int __maybe_unused one = 1;
59352 +#endif
59353 +
59354 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
59355 +struct ctl_table grsecurity_table[] = {
59356 +#ifdef CONFIG_GRKERNSEC_SYSCTL
59357 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
59358 +#ifdef CONFIG_GRKERNSEC_IO
59359 + {
59360 + .procname = "disable_priv_io",
59361 + .data = &grsec_disable_privio,
59362 + .maxlen = sizeof(int),
59363 + .mode = 0600,
59364 + .proc_handler = &proc_dointvec,
59365 + },
59366 +#endif
59367 +#endif
59368 +#ifdef CONFIG_GRKERNSEC_LINK
59369 + {
59370 + .procname = "linking_restrictions",
59371 + .data = &grsec_enable_link,
59372 + .maxlen = sizeof(int),
59373 + .mode = 0600,
59374 + .proc_handler = &proc_dointvec,
59375 + },
59376 +#endif
59377 +#ifdef CONFIG_GRKERNSEC_BRUTE
59378 + {
59379 + .procname = "deter_bruteforce",
59380 + .data = &grsec_enable_brute,
59381 + .maxlen = sizeof(int),
59382 + .mode = 0600,
59383 + .proc_handler = &proc_dointvec,
59384 + },
59385 +#endif
59386 +#ifdef CONFIG_GRKERNSEC_FIFO
59387 + {
59388 + .procname = "fifo_restrictions",
59389 + .data = &grsec_enable_fifo,
59390 + .maxlen = sizeof(int),
59391 + .mode = 0600,
59392 + .proc_handler = &proc_dointvec,
59393 + },
59394 +#endif
59395 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
59396 + {
59397 + .procname = "ptrace_readexec",
59398 + .data = &grsec_enable_ptrace_readexec,
59399 + .maxlen = sizeof(int),
59400 + .mode = 0600,
59401 + .proc_handler = &proc_dointvec,
59402 + },
59403 +#endif
59404 +#ifdef CONFIG_GRKERNSEC_SETXID
59405 + {
59406 + .procname = "consistent_setxid",
59407 + .data = &grsec_enable_setxid,
59408 + .maxlen = sizeof(int),
59409 + .mode = 0600,
59410 + .proc_handler = &proc_dointvec,
59411 + },
59412 +#endif
59413 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
59414 + {
59415 + .procname = "ip_blackhole",
59416 + .data = &grsec_enable_blackhole,
59417 + .maxlen = sizeof(int),
59418 + .mode = 0600,
59419 + .proc_handler = &proc_dointvec,
59420 + },
59421 + {
59422 + .procname = "lastack_retries",
59423 + .data = &grsec_lastack_retries,
59424 + .maxlen = sizeof(int),
59425 + .mode = 0600,
59426 + .proc_handler = &proc_dointvec,
59427 + },
59428 +#endif
59429 +#ifdef CONFIG_GRKERNSEC_EXECLOG
59430 + {
59431 + .procname = "exec_logging",
59432 + .data = &grsec_enable_execlog,
59433 + .maxlen = sizeof(int),
59434 + .mode = 0600,
59435 + .proc_handler = &proc_dointvec,
59436 + },
59437 +#endif
59438 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
59439 + {
59440 + .procname = "rwxmap_logging",
59441 + .data = &grsec_enable_log_rwxmaps,
59442 + .maxlen = sizeof(int),
59443 + .mode = 0600,
59444 + .proc_handler = &proc_dointvec,
59445 + },
59446 +#endif
59447 +#ifdef CONFIG_GRKERNSEC_SIGNAL
59448 + {
59449 + .procname = "signal_logging",
59450 + .data = &grsec_enable_signal,
59451 + .maxlen = sizeof(int),
59452 + .mode = 0600,
59453 + .proc_handler = &proc_dointvec,
59454 + },
59455 +#endif
59456 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
59457 + {
59458 + .procname = "forkfail_logging",
59459 + .data = &grsec_enable_forkfail,
59460 + .maxlen = sizeof(int),
59461 + .mode = 0600,
59462 + .proc_handler = &proc_dointvec,
59463 + },
59464 +#endif
59465 +#ifdef CONFIG_GRKERNSEC_TIME
59466 + {
59467 + .procname = "timechange_logging",
59468 + .data = &grsec_enable_time,
59469 + .maxlen = sizeof(int),
59470 + .mode = 0600,
59471 + .proc_handler = &proc_dointvec,
59472 + },
59473 +#endif
59474 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
59475 + {
59476 + .procname = "chroot_deny_shmat",
59477 + .data = &grsec_enable_chroot_shmat,
59478 + .maxlen = sizeof(int),
59479 + .mode = 0600,
59480 + .proc_handler = &proc_dointvec,
59481 + },
59482 +#endif
59483 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
59484 + {
59485 + .procname = "chroot_deny_unix",
59486 + .data = &grsec_enable_chroot_unix,
59487 + .maxlen = sizeof(int),
59488 + .mode = 0600,
59489 + .proc_handler = &proc_dointvec,
59490 + },
59491 +#endif
59492 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
59493 + {
59494 + .procname = "chroot_deny_mount",
59495 + .data = &grsec_enable_chroot_mount,
59496 + .maxlen = sizeof(int),
59497 + .mode = 0600,
59498 + .proc_handler = &proc_dointvec,
59499 + },
59500 +#endif
59501 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
59502 + {
59503 + .procname = "chroot_deny_fchdir",
59504 + .data = &grsec_enable_chroot_fchdir,
59505 + .maxlen = sizeof(int),
59506 + .mode = 0600,
59507 + .proc_handler = &proc_dointvec,
59508 + },
59509 +#endif
59510 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
59511 + {
59512 + .procname = "chroot_deny_chroot",
59513 + .data = &grsec_enable_chroot_double,
59514 + .maxlen = sizeof(int),
59515 + .mode = 0600,
59516 + .proc_handler = &proc_dointvec,
59517 + },
59518 +#endif
59519 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
59520 + {
59521 + .procname = "chroot_deny_pivot",
59522 + .data = &grsec_enable_chroot_pivot,
59523 + .maxlen = sizeof(int),
59524 + .mode = 0600,
59525 + .proc_handler = &proc_dointvec,
59526 + },
59527 +#endif
59528 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
59529 + {
59530 + .procname = "chroot_enforce_chdir",
59531 + .data = &grsec_enable_chroot_chdir,
59532 + .maxlen = sizeof(int),
59533 + .mode = 0600,
59534 + .proc_handler = &proc_dointvec,
59535 + },
59536 +#endif
59537 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
59538 + {
59539 + .procname = "chroot_deny_chmod",
59540 + .data = &grsec_enable_chroot_chmod,
59541 + .maxlen = sizeof(int),
59542 + .mode = 0600,
59543 + .proc_handler = &proc_dointvec,
59544 + },
59545 +#endif
59546 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
59547 + {
59548 + .procname = "chroot_deny_mknod",
59549 + .data = &grsec_enable_chroot_mknod,
59550 + .maxlen = sizeof(int),
59551 + .mode = 0600,
59552 + .proc_handler = &proc_dointvec,
59553 + },
59554 +#endif
59555 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
59556 + {
59557 + .procname = "chroot_restrict_nice",
59558 + .data = &grsec_enable_chroot_nice,
59559 + .maxlen = sizeof(int),
59560 + .mode = 0600,
59561 + .proc_handler = &proc_dointvec,
59562 + },
59563 +#endif
59564 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
59565 + {
59566 + .procname = "chroot_execlog",
59567 + .data = &grsec_enable_chroot_execlog,
59568 + .maxlen = sizeof(int),
59569 + .mode = 0600,
59570 + .proc_handler = &proc_dointvec,
59571 + },
59572 +#endif
59573 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
59574 + {
59575 + .procname = "chroot_caps",
59576 + .data = &grsec_enable_chroot_caps,
59577 + .maxlen = sizeof(int),
59578 + .mode = 0600,
59579 + .proc_handler = &proc_dointvec,
59580 + },
59581 +#endif
59582 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
59583 + {
59584 + .procname = "chroot_deny_sysctl",
59585 + .data = &grsec_enable_chroot_sysctl,
59586 + .maxlen = sizeof(int),
59587 + .mode = 0600,
59588 + .proc_handler = &proc_dointvec,
59589 + },
59590 +#endif
59591 +#ifdef CONFIG_GRKERNSEC_TPE
59592 + {
59593 + .procname = "tpe",
59594 + .data = &grsec_enable_tpe,
59595 + .maxlen = sizeof(int),
59596 + .mode = 0600,
59597 + .proc_handler = &proc_dointvec,
59598 + },
59599 + {
59600 + .procname = "tpe_gid",
59601 + .data = &grsec_tpe_gid,
59602 + .maxlen = sizeof(int),
59603 + .mode = 0600,
59604 + .proc_handler = &proc_dointvec,
59605 + },
59606 +#endif
59607 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
59608 + {
59609 + .procname = "tpe_invert",
59610 + .data = &grsec_enable_tpe_invert,
59611 + .maxlen = sizeof(int),
59612 + .mode = 0600,
59613 + .proc_handler = &proc_dointvec,
59614 + },
59615 +#endif
59616 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
59617 + {
59618 + .procname = "tpe_restrict_all",
59619 + .data = &grsec_enable_tpe_all,
59620 + .maxlen = sizeof(int),
59621 + .mode = 0600,
59622 + .proc_handler = &proc_dointvec,
59623 + },
59624 +#endif
59625 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
59626 + {
59627 + .procname = "socket_all",
59628 + .data = &grsec_enable_socket_all,
59629 + .maxlen = sizeof(int),
59630 + .mode = 0600,
59631 + .proc_handler = &proc_dointvec,
59632 + },
59633 + {
59634 + .procname = "socket_all_gid",
59635 + .data = &grsec_socket_all_gid,
59636 + .maxlen = sizeof(int),
59637 + .mode = 0600,
59638 + .proc_handler = &proc_dointvec,
59639 + },
59640 +#endif
59641 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
59642 + {
59643 + .procname = "socket_client",
59644 + .data = &grsec_enable_socket_client,
59645 + .maxlen = sizeof(int),
59646 + .mode = 0600,
59647 + .proc_handler = &proc_dointvec,
59648 + },
59649 + {
59650 + .procname = "socket_client_gid",
59651 + .data = &grsec_socket_client_gid,
59652 + .maxlen = sizeof(int),
59653 + .mode = 0600,
59654 + .proc_handler = &proc_dointvec,
59655 + },
59656 +#endif
59657 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
59658 + {
59659 + .procname = "socket_server",
59660 + .data = &grsec_enable_socket_server,
59661 + .maxlen = sizeof(int),
59662 + .mode = 0600,
59663 + .proc_handler = &proc_dointvec,
59664 + },
59665 + {
59666 + .procname = "socket_server_gid",
59667 + .data = &grsec_socket_server_gid,
59668 + .maxlen = sizeof(int),
59669 + .mode = 0600,
59670 + .proc_handler = &proc_dointvec,
59671 + },
59672 +#endif
59673 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
59674 + {
59675 + .procname = "audit_group",
59676 + .data = &grsec_enable_group,
59677 + .maxlen = sizeof(int),
59678 + .mode = 0600,
59679 + .proc_handler = &proc_dointvec,
59680 + },
59681 + {
59682 + .procname = "audit_gid",
59683 + .data = &grsec_audit_gid,
59684 + .maxlen = sizeof(int),
59685 + .mode = 0600,
59686 + .proc_handler = &proc_dointvec,
59687 + },
59688 +#endif
59689 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
59690 + {
59691 + .procname = "audit_chdir",
59692 + .data = &grsec_enable_chdir,
59693 + .maxlen = sizeof(int),
59694 + .mode = 0600,
59695 + .proc_handler = &proc_dointvec,
59696 + },
59697 +#endif
59698 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
59699 + {
59700 + .procname = "audit_mount",
59701 + .data = &grsec_enable_mount,
59702 + .maxlen = sizeof(int),
59703 + .mode = 0600,
59704 + .proc_handler = &proc_dointvec,
59705 + },
59706 +#endif
59707 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
59708 + {
59709 + .procname = "audit_textrel",
59710 + .data = &grsec_enable_audit_textrel,
59711 + .maxlen = sizeof(int),
59712 + .mode = 0600,
59713 + .proc_handler = &proc_dointvec,
59714 + },
59715 +#endif
59716 +#ifdef CONFIG_GRKERNSEC_DMESG
59717 + {
59718 + .procname = "dmesg",
59719 + .data = &grsec_enable_dmesg,
59720 + .maxlen = sizeof(int),
59721 + .mode = 0600,
59722 + .proc_handler = &proc_dointvec,
59723 + },
59724 +#endif
59725 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59726 + {
59727 + .procname = "chroot_findtask",
59728 + .data = &grsec_enable_chroot_findtask,
59729 + .maxlen = sizeof(int),
59730 + .mode = 0600,
59731 + .proc_handler = &proc_dointvec,
59732 + },
59733 +#endif
59734 +#ifdef CONFIG_GRKERNSEC_RESLOG
59735 + {
59736 + .procname = "resource_logging",
59737 + .data = &grsec_resource_logging,
59738 + .maxlen = sizeof(int),
59739 + .mode = 0600,
59740 + .proc_handler = &proc_dointvec,
59741 + },
59742 +#endif
59743 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
59744 + {
59745 + .procname = "audit_ptrace",
59746 + .data = &grsec_enable_audit_ptrace,
59747 + .maxlen = sizeof(int),
59748 + .mode = 0600,
59749 + .proc_handler = &proc_dointvec,
59750 + },
59751 +#endif
59752 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
59753 + {
59754 + .procname = "harden_ptrace",
59755 + .data = &grsec_enable_harden_ptrace,
59756 + .maxlen = sizeof(int),
59757 + .mode = 0600,
59758 + .proc_handler = &proc_dointvec,
59759 + },
59760 +#endif
59761 + {
59762 + .procname = "grsec_lock",
59763 + .data = &grsec_lock,
59764 + .maxlen = sizeof(int),
59765 + .mode = 0600,
59766 + .proc_handler = &proc_dointvec,
59767 + },
59768 +#endif
59769 +#ifdef CONFIG_GRKERNSEC_ROFS
59770 + {
59771 + .procname = "romount_protect",
59772 + .data = &grsec_enable_rofs,
59773 + .maxlen = sizeof(int),
59774 + .mode = 0600,
59775 + .proc_handler = &proc_dointvec_minmax,
59776 + .extra1 = &one,
59777 + .extra2 = &one,
59778 + },
59779 +#endif
59780 + { }
59781 +};
59782 +#endif
59783 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
59784 new file mode 100644
59785 index 0000000..0dc13c3
59786 --- /dev/null
59787 +++ b/grsecurity/grsec_time.c
59788 @@ -0,0 +1,16 @@
59789 +#include <linux/kernel.h>
59790 +#include <linux/sched.h>
59791 +#include <linux/grinternal.h>
59792 +#include <linux/module.h>
59793 +
59794 +void
59795 +gr_log_timechange(void)
59796 +{
59797 +#ifdef CONFIG_GRKERNSEC_TIME
59798 + if (grsec_enable_time)
59799 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
59800 +#endif
59801 + return;
59802 +}
59803 +
59804 +EXPORT_SYMBOL(gr_log_timechange);
59805 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
59806 new file mode 100644
59807 index 0000000..07e0dc0
59808 --- /dev/null
59809 +++ b/grsecurity/grsec_tpe.c
59810 @@ -0,0 +1,73 @@
59811 +#include <linux/kernel.h>
59812 +#include <linux/sched.h>
59813 +#include <linux/file.h>
59814 +#include <linux/fs.h>
59815 +#include <linux/grinternal.h>
59816 +
59817 +extern int gr_acl_tpe_check(void);
59818 +
59819 +int
59820 +gr_tpe_allow(const struct file *file)
59821 +{
59822 +#ifdef CONFIG_GRKERNSEC
59823 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
59824 + const struct cred *cred = current_cred();
59825 + char *msg = NULL;
59826 + char *msg2 = NULL;
59827 +
59828 + // never restrict root
59829 + if (!cred->uid)
59830 + return 1;
59831 +
59832 + if (grsec_enable_tpe) {
59833 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
59834 + if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
59835 + msg = "not being in trusted group";
59836 + else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
59837 + msg = "being in untrusted group";
59838 +#else
59839 + if (in_group_p(grsec_tpe_gid))
59840 + msg = "being in untrusted group";
59841 +#endif
59842 + }
59843 + if (!msg && gr_acl_tpe_check())
59844 + msg = "being in untrusted role";
59845 +
59846 + // not in any affected group/role
59847 + if (!msg)
59848 + goto next_check;
59849 +
59850 + if (inode->i_uid)
59851 + msg2 = "file in non-root-owned directory";
59852 + else if (inode->i_mode & S_IWOTH)
59853 + msg2 = "file in world-writable directory";
59854 + else if (inode->i_mode & S_IWGRP)
59855 + msg2 = "file in group-writable directory";
59856 +
59857 + if (msg && msg2) {
59858 + char fullmsg[70] = {0};
59859 + snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
59860 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
59861 + return 0;
59862 + }
59863 + msg = NULL;
59864 +next_check:
59865 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
59866 + if (!grsec_enable_tpe || !grsec_enable_tpe_all)
59867 + return 1;
59868 +
59869 + if (inode->i_uid && (inode->i_uid != cred->uid))
59870 + msg = "directory not owned by user";
59871 + else if (inode->i_mode & S_IWOTH)
59872 + msg = "file in world-writable directory";
59873 + else if (inode->i_mode & S_IWGRP)
59874 + msg = "file in group-writable directory";
59875 +
59876 + if (msg) {
59877 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
59878 + return 0;
59879 + }
59880 +#endif
59881 +#endif
59882 + return 1;
59883 +}
59884 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
59885 new file mode 100644
59886 index 0000000..9f7b1ac
59887 --- /dev/null
59888 +++ b/grsecurity/grsum.c
59889 @@ -0,0 +1,61 @@
59890 +#include <linux/err.h>
59891 +#include <linux/kernel.h>
59892 +#include <linux/sched.h>
59893 +#include <linux/mm.h>
59894 +#include <linux/scatterlist.h>
59895 +#include <linux/crypto.h>
59896 +#include <linux/gracl.h>
59897 +
59898 +
59899 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
59900 +#error "crypto and sha256 must be built into the kernel"
59901 +#endif
59902 +
59903 +int
59904 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
59905 +{
59906 + char *p;
59907 + struct crypto_hash *tfm;
59908 + struct hash_desc desc;
59909 + struct scatterlist sg;
59910 + unsigned char temp_sum[GR_SHA_LEN];
59911 + volatile int retval = 0;
59912 + volatile int dummy = 0;
59913 + unsigned int i;
59914 +
59915 + sg_init_table(&sg, 1);
59916 +
59917 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
59918 + if (IS_ERR(tfm)) {
59919 + /* should never happen, since sha256 should be built in */
59920 + return 1;
59921 + }
59922 +
59923 + desc.tfm = tfm;
59924 + desc.flags = 0;
59925 +
59926 + crypto_hash_init(&desc);
59927 +
59928 + p = salt;
59929 + sg_set_buf(&sg, p, GR_SALT_LEN);
59930 + crypto_hash_update(&desc, &sg, sg.length);
59931 +
59932 + p = entry->pw;
59933 + sg_set_buf(&sg, p, strlen(p));
59934 +
59935 + crypto_hash_update(&desc, &sg, sg.length);
59936 +
59937 + crypto_hash_final(&desc, temp_sum);
59938 +
59939 + memset(entry->pw, 0, GR_PW_LEN);
59940 +
59941 + for (i = 0; i < GR_SHA_LEN; i++)
59942 + if (sum[i] != temp_sum[i])
59943 + retval = 1;
59944 + else
59945 + dummy = 1; // waste a cycle
59946 +
59947 + crypto_free_hash(tfm);
59948 +
59949 + return retval;
59950 +}
59951 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
59952 index 6cd5b64..f620d2d 100644
59953 --- a/include/acpi/acpi_bus.h
59954 +++ b/include/acpi/acpi_bus.h
59955 @@ -107,7 +107,7 @@ struct acpi_device_ops {
59956 acpi_op_bind bind;
59957 acpi_op_unbind unbind;
59958 acpi_op_notify notify;
59959 -};
59960 +} __no_const;
59961
59962 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
59963
59964 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
59965 index b7babf0..71e4e74 100644
59966 --- a/include/asm-generic/atomic-long.h
59967 +++ b/include/asm-generic/atomic-long.h
59968 @@ -22,6 +22,12 @@
59969
59970 typedef atomic64_t atomic_long_t;
59971
59972 +#ifdef CONFIG_PAX_REFCOUNT
59973 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
59974 +#else
59975 +typedef atomic64_t atomic_long_unchecked_t;
59976 +#endif
59977 +
59978 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
59979
59980 static inline long atomic_long_read(atomic_long_t *l)
59981 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
59982 return (long)atomic64_read(v);
59983 }
59984
59985 +#ifdef CONFIG_PAX_REFCOUNT
59986 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
59987 +{
59988 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59989 +
59990 + return (long)atomic64_read_unchecked(v);
59991 +}
59992 +#endif
59993 +
59994 static inline void atomic_long_set(atomic_long_t *l, long i)
59995 {
59996 atomic64_t *v = (atomic64_t *)l;
59997 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
59998 atomic64_set(v, i);
59999 }
60000
60001 +#ifdef CONFIG_PAX_REFCOUNT
60002 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
60003 +{
60004 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60005 +
60006 + atomic64_set_unchecked(v, i);
60007 +}
60008 +#endif
60009 +
60010 static inline void atomic_long_inc(atomic_long_t *l)
60011 {
60012 atomic64_t *v = (atomic64_t *)l;
60013 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
60014 atomic64_inc(v);
60015 }
60016
60017 +#ifdef CONFIG_PAX_REFCOUNT
60018 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
60019 +{
60020 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60021 +
60022 + atomic64_inc_unchecked(v);
60023 +}
60024 +#endif
60025 +
60026 static inline void atomic_long_dec(atomic_long_t *l)
60027 {
60028 atomic64_t *v = (atomic64_t *)l;
60029 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
60030 atomic64_dec(v);
60031 }
60032
60033 +#ifdef CONFIG_PAX_REFCOUNT
60034 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
60035 +{
60036 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60037 +
60038 + atomic64_dec_unchecked(v);
60039 +}
60040 +#endif
60041 +
60042 static inline void atomic_long_add(long i, atomic_long_t *l)
60043 {
60044 atomic64_t *v = (atomic64_t *)l;
60045 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
60046 atomic64_add(i, v);
60047 }
60048
60049 +#ifdef CONFIG_PAX_REFCOUNT
60050 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
60051 +{
60052 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60053 +
60054 + atomic64_add_unchecked(i, v);
60055 +}
60056 +#endif
60057 +
60058 static inline void atomic_long_sub(long i, atomic_long_t *l)
60059 {
60060 atomic64_t *v = (atomic64_t *)l;
60061 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
60062 atomic64_sub(i, v);
60063 }
60064
60065 +#ifdef CONFIG_PAX_REFCOUNT
60066 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
60067 +{
60068 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60069 +
60070 + atomic64_sub_unchecked(i, v);
60071 +}
60072 +#endif
60073 +
60074 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
60075 {
60076 atomic64_t *v = (atomic64_t *)l;
60077 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
60078 return (long)atomic64_inc_return(v);
60079 }
60080
60081 +#ifdef CONFIG_PAX_REFCOUNT
60082 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
60083 +{
60084 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
60085 +
60086 + return (long)atomic64_inc_return_unchecked(v);
60087 +}
60088 +#endif
60089 +
60090 static inline long atomic_long_dec_return(atomic_long_t *l)
60091 {
60092 atomic64_t *v = (atomic64_t *)l;
60093 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
60094
60095 typedef atomic_t atomic_long_t;
60096
60097 +#ifdef CONFIG_PAX_REFCOUNT
60098 +typedef atomic_unchecked_t atomic_long_unchecked_t;
60099 +#else
60100 +typedef atomic_t atomic_long_unchecked_t;
60101 +#endif
60102 +
60103 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
60104 static inline long atomic_long_read(atomic_long_t *l)
60105 {
60106 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
60107 return (long)atomic_read(v);
60108 }
60109
60110 +#ifdef CONFIG_PAX_REFCOUNT
60111 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
60112 +{
60113 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60114 +
60115 + return (long)atomic_read_unchecked(v);
60116 +}
60117 +#endif
60118 +
60119 static inline void atomic_long_set(atomic_long_t *l, long i)
60120 {
60121 atomic_t *v = (atomic_t *)l;
60122 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
60123 atomic_set(v, i);
60124 }
60125
60126 +#ifdef CONFIG_PAX_REFCOUNT
60127 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
60128 +{
60129 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60130 +
60131 + atomic_set_unchecked(v, i);
60132 +}
60133 +#endif
60134 +
60135 static inline void atomic_long_inc(atomic_long_t *l)
60136 {
60137 atomic_t *v = (atomic_t *)l;
60138 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
60139 atomic_inc(v);
60140 }
60141
60142 +#ifdef CONFIG_PAX_REFCOUNT
60143 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
60144 +{
60145 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60146 +
60147 + atomic_inc_unchecked(v);
60148 +}
60149 +#endif
60150 +
60151 static inline void atomic_long_dec(atomic_long_t *l)
60152 {
60153 atomic_t *v = (atomic_t *)l;
60154 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
60155 atomic_dec(v);
60156 }
60157
60158 +#ifdef CONFIG_PAX_REFCOUNT
60159 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
60160 +{
60161 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60162 +
60163 + atomic_dec_unchecked(v);
60164 +}
60165 +#endif
60166 +
60167 static inline void atomic_long_add(long i, atomic_long_t *l)
60168 {
60169 atomic_t *v = (atomic_t *)l;
60170 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
60171 atomic_add(i, v);
60172 }
60173
60174 +#ifdef CONFIG_PAX_REFCOUNT
60175 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
60176 +{
60177 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60178 +
60179 + atomic_add_unchecked(i, v);
60180 +}
60181 +#endif
60182 +
60183 static inline void atomic_long_sub(long i, atomic_long_t *l)
60184 {
60185 atomic_t *v = (atomic_t *)l;
60186 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
60187 atomic_sub(i, v);
60188 }
60189
60190 +#ifdef CONFIG_PAX_REFCOUNT
60191 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
60192 +{
60193 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60194 +
60195 + atomic_sub_unchecked(i, v);
60196 +}
60197 +#endif
60198 +
60199 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
60200 {
60201 atomic_t *v = (atomic_t *)l;
60202 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
60203 return (long)atomic_inc_return(v);
60204 }
60205
60206 +#ifdef CONFIG_PAX_REFCOUNT
60207 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
60208 +{
60209 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
60210 +
60211 + return (long)atomic_inc_return_unchecked(v);
60212 +}
60213 +#endif
60214 +
60215 static inline long atomic_long_dec_return(atomic_long_t *l)
60216 {
60217 atomic_t *v = (atomic_t *)l;
60218 @@ -255,4 +393,49 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
60219
60220 #endif /* BITS_PER_LONG == 64 */
60221
60222 +#ifdef CONFIG_PAX_REFCOUNT
60223 +static inline void pax_refcount_needs_these_functions(void)
60224 +{
60225 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
60226 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
60227 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
60228 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
60229 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
60230 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
60231 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
60232 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
60233 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
60234 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
60235 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
60236 +
60237 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
60238 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
60239 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
60240 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
60241 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
60242 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
60243 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
60244 +}
60245 +#else
60246 +#define atomic_read_unchecked(v) atomic_read(v)
60247 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
60248 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
60249 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
60250 +#define atomic_inc_unchecked(v) atomic_inc(v)
60251 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
60252 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
60253 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
60254 +#define atomic_dec_unchecked(v) atomic_dec(v)
60255 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
60256 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
60257 +
60258 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
60259 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
60260 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
60261 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
60262 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
60263 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
60264 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
60265 +#endif
60266 +
60267 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
60268 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
60269 index b18ce4f..2ee2843 100644
60270 --- a/include/asm-generic/atomic64.h
60271 +++ b/include/asm-generic/atomic64.h
60272 @@ -16,6 +16,8 @@ typedef struct {
60273 long long counter;
60274 } atomic64_t;
60275
60276 +typedef atomic64_t atomic64_unchecked_t;
60277 +
60278 #define ATOMIC64_INIT(i) { (i) }
60279
60280 extern long long atomic64_read(const atomic64_t *v);
60281 @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
60282 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
60283 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
60284
60285 +#define atomic64_read_unchecked(v) atomic64_read(v)
60286 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
60287 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
60288 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
60289 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
60290 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
60291 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
60292 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
60293 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
60294 +
60295 #endif /* _ASM_GENERIC_ATOMIC64_H */
60296 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
60297 index 1bfcfe5..e04c5c9 100644
60298 --- a/include/asm-generic/cache.h
60299 +++ b/include/asm-generic/cache.h
60300 @@ -6,7 +6,7 @@
60301 * cache lines need to provide their own cache.h.
60302 */
60303
60304 -#define L1_CACHE_SHIFT 5
60305 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
60306 +#define L1_CACHE_SHIFT 5UL
60307 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
60308
60309 #endif /* __ASM_GENERIC_CACHE_H */
60310 diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
60311 index 0d68a1e..b74a761 100644
60312 --- a/include/asm-generic/emergency-restart.h
60313 +++ b/include/asm-generic/emergency-restart.h
60314 @@ -1,7 +1,7 @@
60315 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
60316 #define _ASM_GENERIC_EMERGENCY_RESTART_H
60317
60318 -static inline void machine_emergency_restart(void)
60319 +static inline __noreturn void machine_emergency_restart(void)
60320 {
60321 machine_restart(NULL);
60322 }
60323 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
60324 index 0232ccb..13d9165 100644
60325 --- a/include/asm-generic/kmap_types.h
60326 +++ b/include/asm-generic/kmap_types.h
60327 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
60328 KMAP_D(17) KM_NMI,
60329 KMAP_D(18) KM_NMI_PTE,
60330 KMAP_D(19) KM_KDB,
60331 +KMAP_D(20) KM_CLEARPAGE,
60332 /*
60333 * Remember to update debug_kmap_atomic() when adding new kmap types!
60334 */
60335 -KMAP_D(20) KM_TYPE_NR
60336 +KMAP_D(21) KM_TYPE_NR
60337 };
60338
60339 #undef KMAP_D
60340 diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
60341 index 9ceb03b..2efbcbd 100644
60342 --- a/include/asm-generic/local.h
60343 +++ b/include/asm-generic/local.h
60344 @@ -39,6 +39,7 @@ typedef struct
60345 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
60346 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
60347 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
60348 +#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
60349
60350 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
60351 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
60352 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
60353 index 725612b..9cc513a 100644
60354 --- a/include/asm-generic/pgtable-nopmd.h
60355 +++ b/include/asm-generic/pgtable-nopmd.h
60356 @@ -1,14 +1,19 @@
60357 #ifndef _PGTABLE_NOPMD_H
60358 #define _PGTABLE_NOPMD_H
60359
60360 -#ifndef __ASSEMBLY__
60361 -
60362 #include <asm-generic/pgtable-nopud.h>
60363
60364 -struct mm_struct;
60365 -
60366 #define __PAGETABLE_PMD_FOLDED
60367
60368 +#define PMD_SHIFT PUD_SHIFT
60369 +#define PTRS_PER_PMD 1
60370 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
60371 +#define PMD_MASK (~(PMD_SIZE-1))
60372 +
60373 +#ifndef __ASSEMBLY__
60374 +
60375 +struct mm_struct;
60376 +
60377 /*
60378 * Having the pmd type consist of a pud gets the size right, and allows
60379 * us to conceptually access the pud entry that this pmd is folded into
60380 @@ -16,11 +21,6 @@ struct mm_struct;
60381 */
60382 typedef struct { pud_t pud; } pmd_t;
60383
60384 -#define PMD_SHIFT PUD_SHIFT
60385 -#define PTRS_PER_PMD 1
60386 -#define PMD_SIZE (1UL << PMD_SHIFT)
60387 -#define PMD_MASK (~(PMD_SIZE-1))
60388 -
60389 /*
60390 * The "pud_xxx()" functions here are trivial for a folded two-level
60391 * setup: the pmd is never bad, and a pmd always exists (as it's folded
60392 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
60393 index 810431d..ccc3638 100644
60394 --- a/include/asm-generic/pgtable-nopud.h
60395 +++ b/include/asm-generic/pgtable-nopud.h
60396 @@ -1,10 +1,15 @@
60397 #ifndef _PGTABLE_NOPUD_H
60398 #define _PGTABLE_NOPUD_H
60399
60400 -#ifndef __ASSEMBLY__
60401 -
60402 #define __PAGETABLE_PUD_FOLDED
60403
60404 +#define PUD_SHIFT PGDIR_SHIFT
60405 +#define PTRS_PER_PUD 1
60406 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
60407 +#define PUD_MASK (~(PUD_SIZE-1))
60408 +
60409 +#ifndef __ASSEMBLY__
60410 +
60411 /*
60412 * Having the pud type consist of a pgd gets the size right, and allows
60413 * us to conceptually access the pgd entry that this pud is folded into
60414 @@ -12,11 +17,6 @@
60415 */
60416 typedef struct { pgd_t pgd; } pud_t;
60417
60418 -#define PUD_SHIFT PGDIR_SHIFT
60419 -#define PTRS_PER_PUD 1
60420 -#define PUD_SIZE (1UL << PUD_SHIFT)
60421 -#define PUD_MASK (~(PUD_SIZE-1))
60422 -
60423 /*
60424 * The "pgd_xxx()" functions here are trivial for a folded two-level
60425 * setup: the pud is never bad, and a pud always exists (as it's folded
60426 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
60427 index a03c098..7e5b223 100644
60428 --- a/include/asm-generic/pgtable.h
60429 +++ b/include/asm-generic/pgtable.h
60430 @@ -502,6 +502,14 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
60431 #endif
60432 }
60433
60434 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
60435 +static inline unsigned long pax_open_kernel(void) { return 0; }
60436 +#endif
60437 +
60438 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
60439 +static inline unsigned long pax_close_kernel(void) { return 0; }
60440 +#endif
60441 +
60442 #endif /* CONFIG_MMU */
60443
60444 #endif /* !__ASSEMBLY__ */
60445 diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
60446 index 9788568..510dece 100644
60447 --- a/include/asm-generic/uaccess.h
60448 +++ b/include/asm-generic/uaccess.h
60449 @@ -76,6 +76,8 @@ extern unsigned long search_exception_table(unsigned long);
60450 */
60451 #ifndef __copy_from_user
60452 static inline __must_check long __copy_from_user(void *to,
60453 + const void __user * from, unsigned long n) __size_overflow(3);
60454 +static inline __must_check long __copy_from_user(void *to,
60455 const void __user * from, unsigned long n)
60456 {
60457 if (__builtin_constant_p(n)) {
60458 @@ -106,6 +108,8 @@ static inline __must_check long __copy_from_user(void *to,
60459
60460 #ifndef __copy_to_user
60461 static inline __must_check long __copy_to_user(void __user *to,
60462 + const void *from, unsigned long n) __size_overflow(3);
60463 +static inline __must_check long __copy_to_user(void __user *to,
60464 const void *from, unsigned long n)
60465 {
60466 if (__builtin_constant_p(n)) {
60467 @@ -224,6 +228,7 @@ extern int __put_user_bad(void) __attribute__((noreturn));
60468 -EFAULT; \
60469 })
60470
60471 +static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) __size_overflow(1);
60472 static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
60473 {
60474 size = __copy_from_user(x, ptr, size);
60475 @@ -240,6 +245,7 @@ extern int __get_user_bad(void) __attribute__((noreturn));
60476 #define __copy_to_user_inatomic __copy_to_user
60477 #endif
60478
60479 +static inline long copy_from_user(void *to, const void __user * from, unsigned long n) __size_overflow(3);
60480 static inline long copy_from_user(void *to,
60481 const void __user * from, unsigned long n)
60482 {
60483 @@ -250,6 +256,7 @@ static inline long copy_from_user(void *to,
60484 return n;
60485 }
60486
60487 +static inline long copy_to_user(void __user *to, const void *from, unsigned long n) __size_overflow(3);
60488 static inline long copy_to_user(void __user *to,
60489 const void *from, unsigned long n)
60490 {
60491 @@ -314,6 +321,8 @@ static inline long strlen_user(const char __user *src)
60492 */
60493 #ifndef __clear_user
60494 static inline __must_check unsigned long
60495 +__clear_user(void __user *to, unsigned long n) __size_overflow(2);
60496 +static inline __must_check unsigned long
60497 __clear_user(void __user *to, unsigned long n)
60498 {
60499 memset((void __force *)to, 0, n);
60500 @@ -322,6 +331,8 @@ __clear_user(void __user *to, unsigned long n)
60501 #endif
60502
60503 static inline __must_check unsigned long
60504 +clear_user(void __user *to, unsigned long n) __size_overflow(2);
60505 +static inline __must_check unsigned long
60506 clear_user(void __user *to, unsigned long n)
60507 {
60508 might_sleep();
60509 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
60510 index b5e2e4c..6a5373e 100644
60511 --- a/include/asm-generic/vmlinux.lds.h
60512 +++ b/include/asm-generic/vmlinux.lds.h
60513 @@ -217,6 +217,7 @@
60514 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
60515 VMLINUX_SYMBOL(__start_rodata) = .; \
60516 *(.rodata) *(.rodata.*) \
60517 + *(.data..read_only) \
60518 *(__vermagic) /* Kernel version magic */ \
60519 . = ALIGN(8); \
60520 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
60521 @@ -722,17 +723,18 @@
60522 * section in the linker script will go there too. @phdr should have
60523 * a leading colon.
60524 *
60525 - * Note that this macros defines __per_cpu_load as an absolute symbol.
60526 + * Note that this macros defines per_cpu_load as an absolute symbol.
60527 * If there is no need to put the percpu section at a predetermined
60528 * address, use PERCPU_SECTION.
60529 */
60530 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
60531 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
60532 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
60533 + per_cpu_load = .; \
60534 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
60535 - LOAD_OFFSET) { \
60536 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
60537 PERCPU_INPUT(cacheline) \
60538 } phdr \
60539 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
60540 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
60541
60542 /**
60543 * PERCPU_SECTION - define output section for percpu area, simple version
60544 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
60545 index 92f0981..d44a37c 100644
60546 --- a/include/drm/drmP.h
60547 +++ b/include/drm/drmP.h
60548 @@ -72,6 +72,7 @@
60549 #include <linux/workqueue.h>
60550 #include <linux/poll.h>
60551 #include <asm/pgalloc.h>
60552 +#include <asm/local.h>
60553 #include "drm.h"
60554
60555 #include <linux/idr.h>
60556 @@ -1038,7 +1039,7 @@ struct drm_device {
60557
60558 /** \name Usage Counters */
60559 /*@{ */
60560 - int open_count; /**< Outstanding files open */
60561 + local_t open_count; /**< Outstanding files open */
60562 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
60563 atomic_t vma_count; /**< Outstanding vma areas open */
60564 int buf_use; /**< Buffers in use -- cannot alloc */
60565 @@ -1049,7 +1050,7 @@ struct drm_device {
60566 /*@{ */
60567 unsigned long counters;
60568 enum drm_stat_type types[15];
60569 - atomic_t counts[15];
60570 + atomic_unchecked_t counts[15];
60571 /*@} */
60572
60573 struct list_head filelist;
60574 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
60575 index 37515d1..34fa8b0 100644
60576 --- a/include/drm/drm_crtc_helper.h
60577 +++ b/include/drm/drm_crtc_helper.h
60578 @@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
60579
60580 /* disable crtc when not in use - more explicit than dpms off */
60581 void (*disable)(struct drm_crtc *crtc);
60582 -};
60583 +} __no_const;
60584
60585 struct drm_encoder_helper_funcs {
60586 void (*dpms)(struct drm_encoder *encoder, int mode);
60587 @@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
60588 struct drm_connector *connector);
60589 /* disable encoder when not in use - more explicit than dpms off */
60590 void (*disable)(struct drm_encoder *encoder);
60591 -};
60592 +} __no_const;
60593
60594 struct drm_connector_helper_funcs {
60595 int (*get_modes)(struct drm_connector *connector);
60596 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
60597 index 26c1f78..6722682 100644
60598 --- a/include/drm/ttm/ttm_memory.h
60599 +++ b/include/drm/ttm/ttm_memory.h
60600 @@ -47,7 +47,7 @@
60601
60602 struct ttm_mem_shrink {
60603 int (*do_shrink) (struct ttm_mem_shrink *);
60604 -};
60605 +} __no_const;
60606
60607 /**
60608 * struct ttm_mem_global - Global memory accounting structure.
60609 diff --git a/include/linux/a.out.h b/include/linux/a.out.h
60610 index e86dfca..40cc55f 100644
60611 --- a/include/linux/a.out.h
60612 +++ b/include/linux/a.out.h
60613 @@ -39,6 +39,14 @@ enum machine_type {
60614 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
60615 };
60616
60617 +/* Constants for the N_FLAGS field */
60618 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
60619 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
60620 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
60621 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
60622 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
60623 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
60624 +
60625 #if !defined (N_MAGIC)
60626 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
60627 #endif
60628 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
60629 index f4ff882..84b53a6 100644
60630 --- a/include/linux/atmdev.h
60631 +++ b/include/linux/atmdev.h
60632 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
60633 #endif
60634
60635 struct k_atm_aal_stats {
60636 -#define __HANDLE_ITEM(i) atomic_t i
60637 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
60638 __AAL_STAT_ITEMS
60639 #undef __HANDLE_ITEM
60640 };
60641 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
60642 index 0092102..8a801b4 100644
60643 --- a/include/linux/binfmts.h
60644 +++ b/include/linux/binfmts.h
60645 @@ -89,6 +89,7 @@ struct linux_binfmt {
60646 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
60647 int (*load_shlib)(struct file *);
60648 int (*core_dump)(struct coredump_params *cprm);
60649 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
60650 unsigned long min_coredump; /* minimal dump size */
60651 };
60652
60653 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
60654 index 606cf33..b72c577 100644
60655 --- a/include/linux/blkdev.h
60656 +++ b/include/linux/blkdev.h
60657 @@ -1379,7 +1379,7 @@ struct block_device_operations {
60658 /* this callback is with swap_lock and sometimes page table lock held */
60659 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
60660 struct module *owner;
60661 -};
60662 +} __do_const;
60663
60664 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
60665 unsigned long);
60666 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
60667 index 4d1a074..88f929a 100644
60668 --- a/include/linux/blktrace_api.h
60669 +++ b/include/linux/blktrace_api.h
60670 @@ -162,7 +162,7 @@ struct blk_trace {
60671 struct dentry *dir;
60672 struct dentry *dropped_file;
60673 struct dentry *msg_file;
60674 - atomic_t dropped;
60675 + atomic_unchecked_t dropped;
60676 };
60677
60678 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
60679 diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
60680 index 83195fb..0b0f77d 100644
60681 --- a/include/linux/byteorder/little_endian.h
60682 +++ b/include/linux/byteorder/little_endian.h
60683 @@ -42,51 +42,51 @@
60684
60685 static inline __le64 __cpu_to_le64p(const __u64 *p)
60686 {
60687 - return (__force __le64)*p;
60688 + return (__force const __le64)*p;
60689 }
60690 static inline __u64 __le64_to_cpup(const __le64 *p)
60691 {
60692 - return (__force __u64)*p;
60693 + return (__force const __u64)*p;
60694 }
60695 static inline __le32 __cpu_to_le32p(const __u32 *p)
60696 {
60697 - return (__force __le32)*p;
60698 + return (__force const __le32)*p;
60699 }
60700 static inline __u32 __le32_to_cpup(const __le32 *p)
60701 {
60702 - return (__force __u32)*p;
60703 + return (__force const __u32)*p;
60704 }
60705 static inline __le16 __cpu_to_le16p(const __u16 *p)
60706 {
60707 - return (__force __le16)*p;
60708 + return (__force const __le16)*p;
60709 }
60710 static inline __u16 __le16_to_cpup(const __le16 *p)
60711 {
60712 - return (__force __u16)*p;
60713 + return (__force const __u16)*p;
60714 }
60715 static inline __be64 __cpu_to_be64p(const __u64 *p)
60716 {
60717 - return (__force __be64)__swab64p(p);
60718 + return (__force const __be64)__swab64p(p);
60719 }
60720 static inline __u64 __be64_to_cpup(const __be64 *p)
60721 {
60722 - return __swab64p((__u64 *)p);
60723 + return __swab64p((const __u64 *)p);
60724 }
60725 static inline __be32 __cpu_to_be32p(const __u32 *p)
60726 {
60727 - return (__force __be32)__swab32p(p);
60728 + return (__force const __be32)__swab32p(p);
60729 }
60730 static inline __u32 __be32_to_cpup(const __be32 *p)
60731 {
60732 - return __swab32p((__u32 *)p);
60733 + return __swab32p((const __u32 *)p);
60734 }
60735 static inline __be16 __cpu_to_be16p(const __u16 *p)
60736 {
60737 - return (__force __be16)__swab16p(p);
60738 + return (__force const __be16)__swab16p(p);
60739 }
60740 static inline __u16 __be16_to_cpup(const __be16 *p)
60741 {
60742 - return __swab16p((__u16 *)p);
60743 + return __swab16p((const __u16 *)p);
60744 }
60745 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
60746 #define __le64_to_cpus(x) do { (void)(x); } while (0)
60747 diff --git a/include/linux/cache.h b/include/linux/cache.h
60748 index 4c57065..4307975 100644
60749 --- a/include/linux/cache.h
60750 +++ b/include/linux/cache.h
60751 @@ -16,6 +16,10 @@
60752 #define __read_mostly
60753 #endif
60754
60755 +#ifndef __read_only
60756 +#define __read_only __read_mostly
60757 +#endif
60758 +
60759 #ifndef ____cacheline_aligned
60760 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
60761 #endif
60762 diff --git a/include/linux/capability.h b/include/linux/capability.h
60763 index 12d52de..b5f7fa7 100644
60764 --- a/include/linux/capability.h
60765 +++ b/include/linux/capability.h
60766 @@ -548,6 +548,8 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
60767 extern bool capable(int cap);
60768 extern bool ns_capable(struct user_namespace *ns, int cap);
60769 extern bool nsown_capable(int cap);
60770 +extern bool capable_nolog(int cap);
60771 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
60772
60773 /* audit system wants to get cap info from files as well */
60774 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
60775 diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
60776 index 04ffb2e..6799180 100644
60777 --- a/include/linux/cleancache.h
60778 +++ b/include/linux/cleancache.h
60779 @@ -31,7 +31,7 @@ struct cleancache_ops {
60780 void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
60781 void (*flush_inode)(int, struct cleancache_filekey);
60782 void (*flush_fs)(int);
60783 -};
60784 +} __no_const;
60785
60786 extern struct cleancache_ops
60787 cleancache_register_ops(struct cleancache_ops *ops);
60788 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
60789 index 2f40791..567b215 100644
60790 --- a/include/linux/compiler-gcc4.h
60791 +++ b/include/linux/compiler-gcc4.h
60792 @@ -32,6 +32,15 @@
60793 #define __linktime_error(message) __attribute__((__error__(message)))
60794
60795 #if __GNUC_MINOR__ >= 5
60796 +
60797 +#ifdef CONSTIFY_PLUGIN
60798 +#define __no_const __attribute__((no_const))
60799 +#define __do_const __attribute__((do_const))
60800 +#endif
60801 +
60802 +#ifdef SIZE_OVERFLOW_PLUGIN
60803 +#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
60804 +#endif
60805 /*
60806 * Mark a position in code as unreachable. This can be used to
60807 * suppress control flow warnings after asm blocks that transfer
60808 @@ -47,6 +56,11 @@
60809 #define __noclone __attribute__((__noclone__))
60810
60811 #endif
60812 +
60813 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
60814 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
60815 +#define __bos0(ptr) __bos((ptr), 0)
60816 +#define __bos1(ptr) __bos((ptr), 1)
60817 #endif
60818
60819 #if __GNUC_MINOR__ > 0
60820 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
60821 index 4a24354..ecaff7a 100644
60822 --- a/include/linux/compiler.h
60823 +++ b/include/linux/compiler.h
60824 @@ -5,31 +5,62 @@
60825
60826 #ifdef __CHECKER__
60827 # define __user __attribute__((noderef, address_space(1)))
60828 +# define __force_user __force __user
60829 # define __kernel __attribute__((address_space(0)))
60830 +# define __force_kernel __force __kernel
60831 # define __safe __attribute__((safe))
60832 # define __force __attribute__((force))
60833 # define __nocast __attribute__((nocast))
60834 # define __iomem __attribute__((noderef, address_space(2)))
60835 +# define __force_iomem __force __iomem
60836 # define __acquires(x) __attribute__((context(x,0,1)))
60837 # define __releases(x) __attribute__((context(x,1,0)))
60838 # define __acquire(x) __context__(x,1)
60839 # define __release(x) __context__(x,-1)
60840 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
60841 # define __percpu __attribute__((noderef, address_space(3)))
60842 +# define __force_percpu __force __percpu
60843 #ifdef CONFIG_SPARSE_RCU_POINTER
60844 # define __rcu __attribute__((noderef, address_space(4)))
60845 +# define __force_rcu __force __rcu
60846 #else
60847 # define __rcu
60848 +# define __force_rcu
60849 #endif
60850 extern void __chk_user_ptr(const volatile void __user *);
60851 extern void __chk_io_ptr(const volatile void __iomem *);
60852 +#elif defined(CHECKER_PLUGIN)
60853 +//# define __user
60854 +//# define __force_user
60855 +//# define __kernel
60856 +//# define __force_kernel
60857 +# define __safe
60858 +# define __force
60859 +# define __nocast
60860 +# define __iomem
60861 +# define __force_iomem
60862 +# define __chk_user_ptr(x) (void)0
60863 +# define __chk_io_ptr(x) (void)0
60864 +# define __builtin_warning(x, y...) (1)
60865 +# define __acquires(x)
60866 +# define __releases(x)
60867 +# define __acquire(x) (void)0
60868 +# define __release(x) (void)0
60869 +# define __cond_lock(x,c) (c)
60870 +# define __percpu
60871 +# define __force_percpu
60872 +# define __rcu
60873 +# define __force_rcu
60874 #else
60875 # define __user
60876 +# define __force_user
60877 # define __kernel
60878 +# define __force_kernel
60879 # define __safe
60880 # define __force
60881 # define __nocast
60882 # define __iomem
60883 +# define __force_iomem
60884 # define __chk_user_ptr(x) (void)0
60885 # define __chk_io_ptr(x) (void)0
60886 # define __builtin_warning(x, y...) (1)
60887 @@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
60888 # define __release(x) (void)0
60889 # define __cond_lock(x,c) (c)
60890 # define __percpu
60891 +# define __force_percpu
60892 # define __rcu
60893 +# define __force_rcu
60894 #endif
60895
60896 #ifdef __KERNEL__
60897 @@ -264,6 +297,17 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
60898 # define __attribute_const__ /* unimplemented */
60899 #endif
60900
60901 +#ifndef __no_const
60902 +# define __no_const
60903 +#endif
60904 +
60905 +#ifndef __do_const
60906 +# define __do_const
60907 +#endif
60908 +
60909 +#ifndef __size_overflow
60910 +# define __size_overflow(...)
60911 +#endif
60912 /*
60913 * Tell gcc if a function is cold. The compiler will assume any path
60914 * directly leading to the call is unlikely.
60915 @@ -273,6 +317,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
60916 #define __cold
60917 #endif
60918
60919 +#ifndef __alloc_size
60920 +#define __alloc_size(...)
60921 +#endif
60922 +
60923 +#ifndef __bos
60924 +#define __bos(ptr, arg)
60925 +#endif
60926 +
60927 +#ifndef __bos0
60928 +#define __bos0(ptr)
60929 +#endif
60930 +
60931 +#ifndef __bos1
60932 +#define __bos1(ptr)
60933 +#endif
60934 +
60935 /* Simple shorthand for a section definition */
60936 #ifndef __section
60937 # define __section(S) __attribute__ ((__section__(#S)))
60938 @@ -308,6 +368,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
60939 * use is to mediate communication between process-level code and irq/NMI
60940 * handlers, all running on the same CPU.
60941 */
60942 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
60943 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
60944 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
60945
60946 #endif /* __LINUX_COMPILER_H */
60947 diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
60948 index e9eaec5..bfeb9bb 100644
60949 --- a/include/linux/cpuset.h
60950 +++ b/include/linux/cpuset.h
60951 @@ -118,7 +118,7 @@ static inline void put_mems_allowed(void)
60952 * nodemask.
60953 */
60954 smp_mb();
60955 - --ACCESS_ONCE(current->mems_allowed_change_disable);
60956 + --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
60957 }
60958
60959 static inline void set_mems_allowed(nodemask_t nodemask)
60960 diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
60961 index b936763..48685ee 100644
60962 --- a/include/linux/crash_dump.h
60963 +++ b/include/linux/crash_dump.h
60964 @@ -14,7 +14,7 @@ extern unsigned long long elfcorehdr_addr;
60965 extern unsigned long long elfcorehdr_size;
60966
60967 extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
60968 - unsigned long, int);
60969 + unsigned long, int) __size_overflow(3);
60970
60971 /* Architecture code defines this if there are other possible ELF
60972 * machine types, e.g. on bi-arch capable hardware. */
60973 diff --git a/include/linux/cred.h b/include/linux/cred.h
60974 index adadf71..6af5560 100644
60975 --- a/include/linux/cred.h
60976 +++ b/include/linux/cred.h
60977 @@ -207,6 +207,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
60978 static inline void validate_process_creds(void)
60979 {
60980 }
60981 +static inline void validate_task_creds(struct task_struct *task)
60982 +{
60983 +}
60984 #endif
60985
60986 /**
60987 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
60988 index 8a94217..15d49e3 100644
60989 --- a/include/linux/crypto.h
60990 +++ b/include/linux/crypto.h
60991 @@ -365,7 +365,7 @@ struct cipher_tfm {
60992 const u8 *key, unsigned int keylen);
60993 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
60994 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
60995 -};
60996 +} __no_const;
60997
60998 struct hash_tfm {
60999 int (*init)(struct hash_desc *desc);
61000 @@ -386,13 +386,13 @@ struct compress_tfm {
61001 int (*cot_decompress)(struct crypto_tfm *tfm,
61002 const u8 *src, unsigned int slen,
61003 u8 *dst, unsigned int *dlen);
61004 -};
61005 +} __no_const;
61006
61007 struct rng_tfm {
61008 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
61009 unsigned int dlen);
61010 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
61011 -};
61012 +} __no_const;
61013
61014 #define crt_ablkcipher crt_u.ablkcipher
61015 #define crt_aead crt_u.aead
61016 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
61017 index 7925bf0..d5143d2 100644
61018 --- a/include/linux/decompress/mm.h
61019 +++ b/include/linux/decompress/mm.h
61020 @@ -77,7 +77,7 @@ static void free(void *where)
61021 * warnings when not needed (indeed large_malloc / large_free are not
61022 * needed by inflate */
61023
61024 -#define malloc(a) kmalloc(a, GFP_KERNEL)
61025 +#define malloc(a) kmalloc((a), GFP_KERNEL)
61026 #define free(a) kfree(a)
61027
61028 #define large_malloc(a) vmalloc(a)
61029 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
61030 index e13117c..e9fc938 100644
61031 --- a/include/linux/dma-mapping.h
61032 +++ b/include/linux/dma-mapping.h
61033 @@ -46,7 +46,7 @@ struct dma_map_ops {
61034 u64 (*get_required_mask)(struct device *dev);
61035 #endif
61036 int is_phys;
61037 -};
61038 +} __do_const;
61039
61040 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
61041
61042 diff --git a/include/linux/efi.h b/include/linux/efi.h
61043 index 7cce0ea..c2085e4 100644
61044 --- a/include/linux/efi.h
61045 +++ b/include/linux/efi.h
61046 @@ -591,7 +591,7 @@ struct efivar_operations {
61047 efi_get_variable_t *get_variable;
61048 efi_get_next_variable_t *get_next_variable;
61049 efi_set_variable_t *set_variable;
61050 -};
61051 +} __no_const;
61052
61053 struct efivars {
61054 /*
61055 diff --git a/include/linux/elf.h b/include/linux/elf.h
61056 index 999b4f5..57753b4 100644
61057 --- a/include/linux/elf.h
61058 +++ b/include/linux/elf.h
61059 @@ -40,6 +40,17 @@ typedef __s64 Elf64_Sxword;
61060 #define PT_GNU_EH_FRAME 0x6474e550
61061
61062 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
61063 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
61064 +
61065 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
61066 +
61067 +/* Constants for the e_flags field */
61068 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
61069 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
61070 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
61071 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
61072 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
61073 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
61074
61075 /*
61076 * Extended Numbering
61077 @@ -97,6 +108,8 @@ typedef __s64 Elf64_Sxword;
61078 #define DT_DEBUG 21
61079 #define DT_TEXTREL 22
61080 #define DT_JMPREL 23
61081 +#define DT_FLAGS 30
61082 + #define DF_TEXTREL 0x00000004
61083 #define DT_ENCODING 32
61084 #define OLD_DT_LOOS 0x60000000
61085 #define DT_LOOS 0x6000000d
61086 @@ -243,6 +256,19 @@ typedef struct elf64_hdr {
61087 #define PF_W 0x2
61088 #define PF_X 0x1
61089
61090 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
61091 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
61092 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
61093 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
61094 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
61095 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
61096 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
61097 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
61098 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
61099 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
61100 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
61101 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
61102 +
61103 typedef struct elf32_phdr{
61104 Elf32_Word p_type;
61105 Elf32_Off p_offset;
61106 @@ -335,6 +361,8 @@ typedef struct elf64_shdr {
61107 #define EI_OSABI 7
61108 #define EI_PAD 8
61109
61110 +#define EI_PAX 14
61111 +
61112 #define ELFMAG0 0x7f /* EI_MAG */
61113 #define ELFMAG1 'E'
61114 #define ELFMAG2 'L'
61115 @@ -421,6 +449,7 @@ extern Elf32_Dyn _DYNAMIC [];
61116 #define elf_note elf32_note
61117 #define elf_addr_t Elf32_Off
61118 #define Elf_Half Elf32_Half
61119 +#define elf_dyn Elf32_Dyn
61120
61121 #else
61122
61123 @@ -431,6 +460,7 @@ extern Elf64_Dyn _DYNAMIC [];
61124 #define elf_note elf64_note
61125 #define elf_addr_t Elf64_Off
61126 #define Elf_Half Elf64_Half
61127 +#define elf_dyn Elf64_Dyn
61128
61129 #endif
61130
61131 diff --git a/include/linux/filter.h b/include/linux/filter.h
61132 index 8eeb205..d59bfa2 100644
61133 --- a/include/linux/filter.h
61134 +++ b/include/linux/filter.h
61135 @@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
61136
61137 struct sk_buff;
61138 struct sock;
61139 +struct bpf_jit_work;
61140
61141 struct sk_filter
61142 {
61143 @@ -141,6 +142,9 @@ struct sk_filter
61144 unsigned int len; /* Number of filter blocks */
61145 unsigned int (*bpf_func)(const struct sk_buff *skb,
61146 const struct sock_filter *filter);
61147 +#ifdef CONFIG_BPF_JIT
61148 + struct bpf_jit_work *work;
61149 +#endif
61150 struct rcu_head rcu;
61151 struct sock_filter insns[0];
61152 };
61153 diff --git a/include/linux/firewire.h b/include/linux/firewire.h
61154 index 84ccf8e..2e9b14c 100644
61155 --- a/include/linux/firewire.h
61156 +++ b/include/linux/firewire.h
61157 @@ -428,7 +428,7 @@ struct fw_iso_context {
61158 union {
61159 fw_iso_callback_t sc;
61160 fw_iso_mc_callback_t mc;
61161 - } callback;
61162 + } __no_const callback;
61163 void *callback_data;
61164 };
61165
61166 diff --git a/include/linux/fs.h b/include/linux/fs.h
61167 index f4b6e06..d6ba573 100644
61168 --- a/include/linux/fs.h
61169 +++ b/include/linux/fs.h
61170 @@ -1628,7 +1628,8 @@ struct file_operations {
61171 int (*setlease)(struct file *, long, struct file_lock **);
61172 long (*fallocate)(struct file *file, int mode, loff_t offset,
61173 loff_t len);
61174 -};
61175 +} __do_const;
61176 +typedef struct file_operations __no_const file_operations_no_const;
61177
61178 struct inode_operations {
61179 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
61180 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
61181 index 003dc0f..3c4ea97 100644
61182 --- a/include/linux/fs_struct.h
61183 +++ b/include/linux/fs_struct.h
61184 @@ -6,7 +6,7 @@
61185 #include <linux/seqlock.h>
61186
61187 struct fs_struct {
61188 - int users;
61189 + atomic_t users;
61190 spinlock_t lock;
61191 seqcount_t seq;
61192 int umask;
61193 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
61194 index ce31408..b1ad003 100644
61195 --- a/include/linux/fscache-cache.h
61196 +++ b/include/linux/fscache-cache.h
61197 @@ -102,7 +102,7 @@ struct fscache_operation {
61198 fscache_operation_release_t release;
61199 };
61200
61201 -extern atomic_t fscache_op_debug_id;
61202 +extern atomic_unchecked_t fscache_op_debug_id;
61203 extern void fscache_op_work_func(struct work_struct *work);
61204
61205 extern void fscache_enqueue_operation(struct fscache_operation *);
61206 @@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
61207 {
61208 INIT_WORK(&op->work, fscache_op_work_func);
61209 atomic_set(&op->usage, 1);
61210 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
61211 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
61212 op->processor = processor;
61213 op->release = release;
61214 INIT_LIST_HEAD(&op->pend_link);
61215 diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
61216 index 2a53f10..0187fdf 100644
61217 --- a/include/linux/fsnotify.h
61218 +++ b/include/linux/fsnotify.h
61219 @@ -314,7 +314,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
61220 */
61221 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
61222 {
61223 - return kstrdup(name, GFP_KERNEL);
61224 + return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
61225 }
61226
61227 /*
61228 diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
61229 index 91d0e0a3..035666b 100644
61230 --- a/include/linux/fsnotify_backend.h
61231 +++ b/include/linux/fsnotify_backend.h
61232 @@ -105,6 +105,7 @@ struct fsnotify_ops {
61233 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
61234 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
61235 };
61236 +typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
61237
61238 /*
61239 * A group is a "thing" that wants to receive notification about filesystem
61240 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
61241 index c3da42d..c70e0df 100644
61242 --- a/include/linux/ftrace_event.h
61243 +++ b/include/linux/ftrace_event.h
61244 @@ -97,7 +97,7 @@ struct trace_event_functions {
61245 trace_print_func raw;
61246 trace_print_func hex;
61247 trace_print_func binary;
61248 -};
61249 +} __no_const;
61250
61251 struct trace_event {
61252 struct hlist_node node;
61253 @@ -254,7 +254,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
61254 extern int trace_add_event_call(struct ftrace_event_call *call);
61255 extern void trace_remove_event_call(struct ftrace_event_call *call);
61256
61257 -#define is_signed_type(type) (((type)(-1)) < 0)
61258 +#define is_signed_type(type) (((type)(-1)) < (type)1)
61259
61260 int trace_set_clr_event(const char *system, const char *event, int set);
61261
61262 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
61263 index e61d319..0da8505 100644
61264 --- a/include/linux/genhd.h
61265 +++ b/include/linux/genhd.h
61266 @@ -185,7 +185,7 @@ struct gendisk {
61267 struct kobject *slave_dir;
61268
61269 struct timer_rand_state *random;
61270 - atomic_t sync_io; /* RAID */
61271 + atomic_unchecked_t sync_io; /* RAID */
61272 struct disk_events *ev;
61273 #ifdef CONFIG_BLK_DEV_INTEGRITY
61274 struct blk_integrity *integrity;
61275 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
61276 new file mode 100644
61277 index 0000000..8a130b6
61278 --- /dev/null
61279 +++ b/include/linux/gracl.h
61280 @@ -0,0 +1,319 @@
61281 +#ifndef GR_ACL_H
61282 +#define GR_ACL_H
61283 +
61284 +#include <linux/grdefs.h>
61285 +#include <linux/resource.h>
61286 +#include <linux/capability.h>
61287 +#include <linux/dcache.h>
61288 +#include <asm/resource.h>
61289 +
61290 +/* Major status information */
61291 +
61292 +#define GR_VERSION "grsecurity 2.9"
61293 +#define GRSECURITY_VERSION 0x2900
61294 +
61295 +enum {
61296 + GR_SHUTDOWN = 0,
61297 + GR_ENABLE = 1,
61298 + GR_SPROLE = 2,
61299 + GR_RELOAD = 3,
61300 + GR_SEGVMOD = 4,
61301 + GR_STATUS = 5,
61302 + GR_UNSPROLE = 6,
61303 + GR_PASSSET = 7,
61304 + GR_SPROLEPAM = 8,
61305 +};
61306 +
61307 +/* Password setup definitions
61308 + * kernel/grhash.c */
61309 +enum {
61310 + GR_PW_LEN = 128,
61311 + GR_SALT_LEN = 16,
61312 + GR_SHA_LEN = 32,
61313 +};
61314 +
61315 +enum {
61316 + GR_SPROLE_LEN = 64,
61317 +};
61318 +
61319 +enum {
61320 + GR_NO_GLOB = 0,
61321 + GR_REG_GLOB,
61322 + GR_CREATE_GLOB
61323 +};
61324 +
61325 +#define GR_NLIMITS 32
61326 +
61327 +/* Begin Data Structures */
61328 +
61329 +struct sprole_pw {
61330 + unsigned char *rolename;
61331 + unsigned char salt[GR_SALT_LEN];
61332 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
61333 +};
61334 +
61335 +struct name_entry {
61336 + __u32 key;
61337 + ino_t inode;
61338 + dev_t device;
61339 + char *name;
61340 + __u16 len;
61341 + __u8 deleted;
61342 + struct name_entry *prev;
61343 + struct name_entry *next;
61344 +};
61345 +
61346 +struct inodev_entry {
61347 + struct name_entry *nentry;
61348 + struct inodev_entry *prev;
61349 + struct inodev_entry *next;
61350 +};
61351 +
61352 +struct acl_role_db {
61353 + struct acl_role_label **r_hash;
61354 + __u32 r_size;
61355 +};
61356 +
61357 +struct inodev_db {
61358 + struct inodev_entry **i_hash;
61359 + __u32 i_size;
61360 +};
61361 +
61362 +struct name_db {
61363 + struct name_entry **n_hash;
61364 + __u32 n_size;
61365 +};
61366 +
61367 +struct crash_uid {
61368 + uid_t uid;
61369 + unsigned long expires;
61370 +};
61371 +
61372 +struct gr_hash_struct {
61373 + void **table;
61374 + void **nametable;
61375 + void *first;
61376 + __u32 table_size;
61377 + __u32 used_size;
61378 + int type;
61379 +};
61380 +
61381 +/* Userspace Grsecurity ACL data structures */
61382 +
61383 +struct acl_subject_label {
61384 + char *filename;
61385 + ino_t inode;
61386 + dev_t device;
61387 + __u32 mode;
61388 + kernel_cap_t cap_mask;
61389 + kernel_cap_t cap_lower;
61390 + kernel_cap_t cap_invert_audit;
61391 +
61392 + struct rlimit res[GR_NLIMITS];
61393 + __u32 resmask;
61394 +
61395 + __u8 user_trans_type;
61396 + __u8 group_trans_type;
61397 + uid_t *user_transitions;
61398 + gid_t *group_transitions;
61399 + __u16 user_trans_num;
61400 + __u16 group_trans_num;
61401 +
61402 + __u32 sock_families[2];
61403 + __u32 ip_proto[8];
61404 + __u32 ip_type;
61405 + struct acl_ip_label **ips;
61406 + __u32 ip_num;
61407 + __u32 inaddr_any_override;
61408 +
61409 + __u32 crashes;
61410 + unsigned long expires;
61411 +
61412 + struct acl_subject_label *parent_subject;
61413 + struct gr_hash_struct *hash;
61414 + struct acl_subject_label *prev;
61415 + struct acl_subject_label *next;
61416 +
61417 + struct acl_object_label **obj_hash;
61418 + __u32 obj_hash_size;
61419 + __u16 pax_flags;
61420 +};
61421 +
61422 +struct role_allowed_ip {
61423 + __u32 addr;
61424 + __u32 netmask;
61425 +
61426 + struct role_allowed_ip *prev;
61427 + struct role_allowed_ip *next;
61428 +};
61429 +
61430 +struct role_transition {
61431 + char *rolename;
61432 +
61433 + struct role_transition *prev;
61434 + struct role_transition *next;
61435 +};
61436 +
61437 +struct acl_role_label {
61438 + char *rolename;
61439 + uid_t uidgid;
61440 + __u16 roletype;
61441 +
61442 + __u16 auth_attempts;
61443 + unsigned long expires;
61444 +
61445 + struct acl_subject_label *root_label;
61446 + struct gr_hash_struct *hash;
61447 +
61448 + struct acl_role_label *prev;
61449 + struct acl_role_label *next;
61450 +
61451 + struct role_transition *transitions;
61452 + struct role_allowed_ip *allowed_ips;
61453 + uid_t *domain_children;
61454 + __u16 domain_child_num;
61455 +
61456 + umode_t umask;
61457 +
61458 + struct acl_subject_label **subj_hash;
61459 + __u32 subj_hash_size;
61460 +};
61461 +
61462 +struct user_acl_role_db {
61463 + struct acl_role_label **r_table;
61464 + __u32 num_pointers; /* Number of allocations to track */
61465 + __u32 num_roles; /* Number of roles */
61466 + __u32 num_domain_children; /* Number of domain children */
61467 + __u32 num_subjects; /* Number of subjects */
61468 + __u32 num_objects; /* Number of objects */
61469 +};
61470 +
61471 +struct acl_object_label {
61472 + char *filename;
61473 + ino_t inode;
61474 + dev_t device;
61475 + __u32 mode;
61476 +
61477 + struct acl_subject_label *nested;
61478 + struct acl_object_label *globbed;
61479 +
61480 + /* next two structures not used */
61481 +
61482 + struct acl_object_label *prev;
61483 + struct acl_object_label *next;
61484 +};
61485 +
61486 +struct acl_ip_label {
61487 + char *iface;
61488 + __u32 addr;
61489 + __u32 netmask;
61490 + __u16 low, high;
61491 + __u8 mode;
61492 + __u32 type;
61493 + __u32 proto[8];
61494 +
61495 + /* next two structures not used */
61496 +
61497 + struct acl_ip_label *prev;
61498 + struct acl_ip_label *next;
61499 +};
61500 +
61501 +struct gr_arg {
61502 + struct user_acl_role_db role_db;
61503 + unsigned char pw[GR_PW_LEN];
61504 + unsigned char salt[GR_SALT_LEN];
61505 + unsigned char sum[GR_SHA_LEN];
61506 + unsigned char sp_role[GR_SPROLE_LEN];
61507 + struct sprole_pw *sprole_pws;
61508 + dev_t segv_device;
61509 + ino_t segv_inode;
61510 + uid_t segv_uid;
61511 + __u16 num_sprole_pws;
61512 + __u16 mode;
61513 +};
61514 +
61515 +struct gr_arg_wrapper {
61516 + struct gr_arg *arg;
61517 + __u32 version;
61518 + __u32 size;
61519 +};
61520 +
61521 +struct subject_map {
61522 + struct acl_subject_label *user;
61523 + struct acl_subject_label *kernel;
61524 + struct subject_map *prev;
61525 + struct subject_map *next;
61526 +};
61527 +
61528 +struct acl_subj_map_db {
61529 + struct subject_map **s_hash;
61530 + __u32 s_size;
61531 +};
61532 +
61533 +/* End Data Structures Section */
61534 +
61535 +/* Hash functions generated by empirical testing by Brad Spengler
61536 + Makes good use of the low bits of the inode. Generally 0-1 times
61537 + in loop for successful match. 0-3 for unsuccessful match.
61538 + Shift/add algorithm with modulus of table size and an XOR*/
61539 +
61540 +static __inline__ unsigned int
61541 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
61542 +{
61543 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
61544 +}
61545 +
61546 + static __inline__ unsigned int
61547 +shash(const struct acl_subject_label *userp, const unsigned int sz)
61548 +{
61549 + return ((const unsigned long)userp % sz);
61550 +}
61551 +
61552 +static __inline__ unsigned int
61553 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
61554 +{
61555 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
61556 +}
61557 +
61558 +static __inline__ unsigned int
61559 +nhash(const char *name, const __u16 len, const unsigned int sz)
61560 +{
61561 + return full_name_hash((const unsigned char *)name, len) % sz;
61562 +}
61563 +
61564 +#define FOR_EACH_ROLE_START(role) \
61565 + role = role_list; \
61566 + while (role) {
61567 +
61568 +#define FOR_EACH_ROLE_END(role) \
61569 + role = role->prev; \
61570 + }
61571 +
61572 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
61573 + subj = NULL; \
61574 + iter = 0; \
61575 + while (iter < role->subj_hash_size) { \
61576 + if (subj == NULL) \
61577 + subj = role->subj_hash[iter]; \
61578 + if (subj == NULL) { \
61579 + iter++; \
61580 + continue; \
61581 + }
61582 +
61583 +#define FOR_EACH_SUBJECT_END(subj,iter) \
61584 + subj = subj->next; \
61585 + if (subj == NULL) \
61586 + iter++; \
61587 + }
61588 +
61589 +
61590 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
61591 + subj = role->hash->first; \
61592 + while (subj != NULL) {
61593 +
61594 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
61595 + subj = subj->next; \
61596 + }
61597 +
61598 +#endif
61599 +
61600 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
61601 new file mode 100644
61602 index 0000000..323ecf2
61603 --- /dev/null
61604 +++ b/include/linux/gralloc.h
61605 @@ -0,0 +1,9 @@
61606 +#ifndef __GRALLOC_H
61607 +#define __GRALLOC_H
61608 +
61609 +void acl_free_all(void);
61610 +int acl_alloc_stack_init(unsigned long size);
61611 +void *acl_alloc(unsigned long len);
61612 +void *acl_alloc_num(unsigned long num, unsigned long len);
61613 +
61614 +#endif
61615 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
61616 new file mode 100644
61617 index 0000000..b30e9bc
61618 --- /dev/null
61619 +++ b/include/linux/grdefs.h
61620 @@ -0,0 +1,140 @@
61621 +#ifndef GRDEFS_H
61622 +#define GRDEFS_H
61623 +
61624 +/* Begin grsecurity status declarations */
61625 +
61626 +enum {
61627 + GR_READY = 0x01,
61628 + GR_STATUS_INIT = 0x00 // disabled state
61629 +};
61630 +
61631 +/* Begin ACL declarations */
61632 +
61633 +/* Role flags */
61634 +
61635 +enum {
61636 + GR_ROLE_USER = 0x0001,
61637 + GR_ROLE_GROUP = 0x0002,
61638 + GR_ROLE_DEFAULT = 0x0004,
61639 + GR_ROLE_SPECIAL = 0x0008,
61640 + GR_ROLE_AUTH = 0x0010,
61641 + GR_ROLE_NOPW = 0x0020,
61642 + GR_ROLE_GOD = 0x0040,
61643 + GR_ROLE_LEARN = 0x0080,
61644 + GR_ROLE_TPE = 0x0100,
61645 + GR_ROLE_DOMAIN = 0x0200,
61646 + GR_ROLE_PAM = 0x0400,
61647 + GR_ROLE_PERSIST = 0x0800
61648 +};
61649 +
61650 +/* ACL Subject and Object mode flags */
61651 +enum {
61652 + GR_DELETED = 0x80000000
61653 +};
61654 +
61655 +/* ACL Object-only mode flags */
61656 +enum {
61657 + GR_READ = 0x00000001,
61658 + GR_APPEND = 0x00000002,
61659 + GR_WRITE = 0x00000004,
61660 + GR_EXEC = 0x00000008,
61661 + GR_FIND = 0x00000010,
61662 + GR_INHERIT = 0x00000020,
61663 + GR_SETID = 0x00000040,
61664 + GR_CREATE = 0x00000080,
61665 + GR_DELETE = 0x00000100,
61666 + GR_LINK = 0x00000200,
61667 + GR_AUDIT_READ = 0x00000400,
61668 + GR_AUDIT_APPEND = 0x00000800,
61669 + GR_AUDIT_WRITE = 0x00001000,
61670 + GR_AUDIT_EXEC = 0x00002000,
61671 + GR_AUDIT_FIND = 0x00004000,
61672 + GR_AUDIT_INHERIT= 0x00008000,
61673 + GR_AUDIT_SETID = 0x00010000,
61674 + GR_AUDIT_CREATE = 0x00020000,
61675 + GR_AUDIT_DELETE = 0x00040000,
61676 + GR_AUDIT_LINK = 0x00080000,
61677 + GR_PTRACERD = 0x00100000,
61678 + GR_NOPTRACE = 0x00200000,
61679 + GR_SUPPRESS = 0x00400000,
61680 + GR_NOLEARN = 0x00800000,
61681 + GR_INIT_TRANSFER= 0x01000000
61682 +};
61683 +
61684 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
61685 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
61686 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
61687 +
61688 +/* ACL subject-only mode flags */
61689 +enum {
61690 + GR_KILL = 0x00000001,
61691 + GR_VIEW = 0x00000002,
61692 + GR_PROTECTED = 0x00000004,
61693 + GR_LEARN = 0x00000008,
61694 + GR_OVERRIDE = 0x00000010,
61695 + /* just a placeholder, this mode is only used in userspace */
61696 + GR_DUMMY = 0x00000020,
61697 + GR_PROTSHM = 0x00000040,
61698 + GR_KILLPROC = 0x00000080,
61699 + GR_KILLIPPROC = 0x00000100,
61700 + /* just a placeholder, this mode is only used in userspace */
61701 + GR_NOTROJAN = 0x00000200,
61702 + GR_PROTPROCFD = 0x00000400,
61703 + GR_PROCACCT = 0x00000800,
61704 + GR_RELAXPTRACE = 0x00001000,
61705 + GR_NESTED = 0x00002000,
61706 + GR_INHERITLEARN = 0x00004000,
61707 + GR_PROCFIND = 0x00008000,
61708 + GR_POVERRIDE = 0x00010000,
61709 + GR_KERNELAUTH = 0x00020000,
61710 + GR_ATSECURE = 0x00040000,
61711 + GR_SHMEXEC = 0x00080000
61712 +};
61713 +
61714 +enum {
61715 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
61716 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
61717 + GR_PAX_ENABLE_MPROTECT = 0x0004,
61718 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
61719 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
61720 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
61721 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
61722 + GR_PAX_DISABLE_MPROTECT = 0x0400,
61723 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
61724 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
61725 +};
61726 +
61727 +enum {
61728 + GR_ID_USER = 0x01,
61729 + GR_ID_GROUP = 0x02,
61730 +};
61731 +
61732 +enum {
61733 + GR_ID_ALLOW = 0x01,
61734 + GR_ID_DENY = 0x02,
61735 +};
61736 +
61737 +#define GR_CRASH_RES 31
61738 +#define GR_UIDTABLE_MAX 500
61739 +
61740 +/* begin resource learning section */
61741 +enum {
61742 + GR_RLIM_CPU_BUMP = 60,
61743 + GR_RLIM_FSIZE_BUMP = 50000,
61744 + GR_RLIM_DATA_BUMP = 10000,
61745 + GR_RLIM_STACK_BUMP = 1000,
61746 + GR_RLIM_CORE_BUMP = 10000,
61747 + GR_RLIM_RSS_BUMP = 500000,
61748 + GR_RLIM_NPROC_BUMP = 1,
61749 + GR_RLIM_NOFILE_BUMP = 5,
61750 + GR_RLIM_MEMLOCK_BUMP = 50000,
61751 + GR_RLIM_AS_BUMP = 500000,
61752 + GR_RLIM_LOCKS_BUMP = 2,
61753 + GR_RLIM_SIGPENDING_BUMP = 5,
61754 + GR_RLIM_MSGQUEUE_BUMP = 10000,
61755 + GR_RLIM_NICE_BUMP = 1,
61756 + GR_RLIM_RTPRIO_BUMP = 1,
61757 + GR_RLIM_RTTIME_BUMP = 1000000
61758 +};
61759 +
61760 +#endif
61761 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
61762 new file mode 100644
61763 index 0000000..da390f1
61764 --- /dev/null
61765 +++ b/include/linux/grinternal.h
61766 @@ -0,0 +1,221 @@
61767 +#ifndef __GRINTERNAL_H
61768 +#define __GRINTERNAL_H
61769 +
61770 +#ifdef CONFIG_GRKERNSEC
61771 +
61772 +#include <linux/fs.h>
61773 +#include <linux/mnt_namespace.h>
61774 +#include <linux/nsproxy.h>
61775 +#include <linux/gracl.h>
61776 +#include <linux/grdefs.h>
61777 +#include <linux/grmsg.h>
61778 +
61779 +void gr_add_learn_entry(const char *fmt, ...)
61780 + __attribute__ ((format (printf, 1, 2)));
61781 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
61782 + const struct vfsmount *mnt);
61783 +__u32 gr_check_create(const struct dentry *new_dentry,
61784 + const struct dentry *parent,
61785 + const struct vfsmount *mnt, const __u32 mode);
61786 +int gr_check_protected_task(const struct task_struct *task);
61787 +__u32 to_gr_audit(const __u32 reqmode);
61788 +int gr_set_acls(const int type);
61789 +int gr_apply_subject_to_task(struct task_struct *task);
61790 +int gr_acl_is_enabled(void);
61791 +char gr_roletype_to_char(void);
61792 +
61793 +void gr_handle_alertkill(struct task_struct *task);
61794 +char *gr_to_filename(const struct dentry *dentry,
61795 + const struct vfsmount *mnt);
61796 +char *gr_to_filename1(const struct dentry *dentry,
61797 + const struct vfsmount *mnt);
61798 +char *gr_to_filename2(const struct dentry *dentry,
61799 + const struct vfsmount *mnt);
61800 +char *gr_to_filename3(const struct dentry *dentry,
61801 + const struct vfsmount *mnt);
61802 +
61803 +extern int grsec_enable_ptrace_readexec;
61804 +extern int grsec_enable_harden_ptrace;
61805 +extern int grsec_enable_link;
61806 +extern int grsec_enable_fifo;
61807 +extern int grsec_enable_execve;
61808 +extern int grsec_enable_shm;
61809 +extern int grsec_enable_execlog;
61810 +extern int grsec_enable_signal;
61811 +extern int grsec_enable_audit_ptrace;
61812 +extern int grsec_enable_forkfail;
61813 +extern int grsec_enable_time;
61814 +extern int grsec_enable_rofs;
61815 +extern int grsec_enable_chroot_shmat;
61816 +extern int grsec_enable_chroot_mount;
61817 +extern int grsec_enable_chroot_double;
61818 +extern int grsec_enable_chroot_pivot;
61819 +extern int grsec_enable_chroot_chdir;
61820 +extern int grsec_enable_chroot_chmod;
61821 +extern int grsec_enable_chroot_mknod;
61822 +extern int grsec_enable_chroot_fchdir;
61823 +extern int grsec_enable_chroot_nice;
61824 +extern int grsec_enable_chroot_execlog;
61825 +extern int grsec_enable_chroot_caps;
61826 +extern int grsec_enable_chroot_sysctl;
61827 +extern int grsec_enable_chroot_unix;
61828 +extern int grsec_enable_tpe;
61829 +extern int grsec_tpe_gid;
61830 +extern int grsec_enable_tpe_all;
61831 +extern int grsec_enable_tpe_invert;
61832 +extern int grsec_enable_socket_all;
61833 +extern int grsec_socket_all_gid;
61834 +extern int grsec_enable_socket_client;
61835 +extern int grsec_socket_client_gid;
61836 +extern int grsec_enable_socket_server;
61837 +extern int grsec_socket_server_gid;
61838 +extern int grsec_audit_gid;
61839 +extern int grsec_enable_group;
61840 +extern int grsec_enable_audit_textrel;
61841 +extern int grsec_enable_log_rwxmaps;
61842 +extern int grsec_enable_mount;
61843 +extern int grsec_enable_chdir;
61844 +extern int grsec_resource_logging;
61845 +extern int grsec_enable_blackhole;
61846 +extern int grsec_lastack_retries;
61847 +extern int grsec_enable_brute;
61848 +extern int grsec_lock;
61849 +
61850 +extern spinlock_t grsec_alert_lock;
61851 +extern unsigned long grsec_alert_wtime;
61852 +extern unsigned long grsec_alert_fyet;
61853 +
61854 +extern spinlock_t grsec_audit_lock;
61855 +
61856 +extern rwlock_t grsec_exec_file_lock;
61857 +
61858 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
61859 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
61860 + (tsk)->exec_file->f_vfsmnt) : "/")
61861 +
61862 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
61863 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
61864 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
61865 +
61866 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
61867 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
61868 + (tsk)->exec_file->f_vfsmnt) : "/")
61869 +
61870 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
61871 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
61872 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
61873 +
61874 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
61875 +
61876 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
61877 +
61878 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
61879 + (task)->pid, (cred)->uid, \
61880 + (cred)->euid, (cred)->gid, (cred)->egid, \
61881 + gr_parent_task_fullpath(task), \
61882 + (task)->real_parent->comm, (task)->real_parent->pid, \
61883 + (pcred)->uid, (pcred)->euid, \
61884 + (pcred)->gid, (pcred)->egid
61885 +
61886 +#define GR_CHROOT_CAPS {{ \
61887 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
61888 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
61889 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
61890 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
61891 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
61892 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
61893 + CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
61894 +
61895 +#define security_learn(normal_msg,args...) \
61896 +({ \
61897 + read_lock(&grsec_exec_file_lock); \
61898 + gr_add_learn_entry(normal_msg "\n", ## args); \
61899 + read_unlock(&grsec_exec_file_lock); \
61900 +})
61901 +
61902 +enum {
61903 + GR_DO_AUDIT,
61904 + GR_DONT_AUDIT,
61905 + /* used for non-audit messages that we shouldn't kill the task on */
61906 + GR_DONT_AUDIT_GOOD
61907 +};
61908 +
61909 +enum {
61910 + GR_TTYSNIFF,
61911 + GR_RBAC,
61912 + GR_RBAC_STR,
61913 + GR_STR_RBAC,
61914 + GR_RBAC_MODE2,
61915 + GR_RBAC_MODE3,
61916 + GR_FILENAME,
61917 + GR_SYSCTL_HIDDEN,
61918 + GR_NOARGS,
61919 + GR_ONE_INT,
61920 + GR_ONE_INT_TWO_STR,
61921 + GR_ONE_STR,
61922 + GR_STR_INT,
61923 + GR_TWO_STR_INT,
61924 + GR_TWO_INT,
61925 + GR_TWO_U64,
61926 + GR_THREE_INT,
61927 + GR_FIVE_INT_TWO_STR,
61928 + GR_TWO_STR,
61929 + GR_THREE_STR,
61930 + GR_FOUR_STR,
61931 + GR_STR_FILENAME,
61932 + GR_FILENAME_STR,
61933 + GR_FILENAME_TWO_INT,
61934 + GR_FILENAME_TWO_INT_STR,
61935 + GR_TEXTREL,
61936 + GR_PTRACE,
61937 + GR_RESOURCE,
61938 + GR_CAP,
61939 + GR_SIG,
61940 + GR_SIG2,
61941 + GR_CRASH1,
61942 + GR_CRASH2,
61943 + GR_PSACCT,
61944 + GR_RWXMAP
61945 +};
61946 +
61947 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
61948 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
61949 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
61950 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
61951 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
61952 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
61953 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
61954 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
61955 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
61956 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
61957 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
61958 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
61959 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
61960 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
61961 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
61962 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
61963 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
61964 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
61965 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
61966 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
61967 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
61968 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
61969 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
61970 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
61971 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
61972 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
61973 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
61974 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
61975 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
61976 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
61977 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
61978 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
61979 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
61980 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
61981 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
61982 +
61983 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
61984 +
61985 +#endif
61986 +
61987 +#endif
61988 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
61989 new file mode 100644
61990 index 0000000..ae576a1
61991 --- /dev/null
61992 +++ b/include/linux/grmsg.h
61993 @@ -0,0 +1,109 @@
61994 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
61995 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
61996 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
61997 +#define GR_STOPMOD_MSG "denied modification of module state by "
61998 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
61999 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
62000 +#define GR_IOPERM_MSG "denied use of ioperm() by "
62001 +#define GR_IOPL_MSG "denied use of iopl() by "
62002 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
62003 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
62004 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
62005 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
62006 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
62007 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
62008 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
62009 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
62010 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
62011 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
62012 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
62013 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
62014 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
62015 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
62016 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
62017 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
62018 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
62019 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
62020 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
62021 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
62022 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
62023 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
62024 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
62025 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
62026 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
62027 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
62028 +#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
62029 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
62030 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
62031 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
62032 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
62033 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
62034 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
62035 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
62036 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
62037 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
62038 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
62039 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
62040 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
62041 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
62042 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
62043 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
62044 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
62045 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
62046 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
62047 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
62048 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
62049 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
62050 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
62051 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
62052 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
62053 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
62054 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
62055 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
62056 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
62057 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
62058 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
62059 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
62060 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
62061 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
62062 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
62063 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
62064 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
62065 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
62066 +#define GR_NICE_CHROOT_MSG "denied priority change by "
62067 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
62068 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
62069 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
62070 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
62071 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
62072 +#define GR_TIME_MSG "time set by "
62073 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
62074 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
62075 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
62076 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
62077 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
62078 +#define GR_BIND_MSG "denied bind() by "
62079 +#define GR_CONNECT_MSG "denied connect() by "
62080 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
62081 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
62082 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
62083 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
62084 +#define GR_CAP_ACL_MSG "use of %s denied for "
62085 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
62086 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
62087 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
62088 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
62089 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
62090 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
62091 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
62092 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
62093 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
62094 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
62095 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
62096 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
62097 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
62098 +#define GR_VM86_MSG "denied use of vm86 by "
62099 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
62100 +#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
62101 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
62102 +#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
62103 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
62104 new file mode 100644
62105 index 0000000..acd05db
62106 --- /dev/null
62107 +++ b/include/linux/grsecurity.h
62108 @@ -0,0 +1,232 @@
62109 +#ifndef GR_SECURITY_H
62110 +#define GR_SECURITY_H
62111 +#include <linux/fs.h>
62112 +#include <linux/fs_struct.h>
62113 +#include <linux/binfmts.h>
62114 +#include <linux/gracl.h>
62115 +
62116 +/* notify of brain-dead configs */
62117 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62118 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
62119 +#endif
62120 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
62121 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
62122 +#endif
62123 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
62124 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
62125 +#endif
62126 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
62127 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
62128 +#endif
62129 +
62130 +#include <linux/compat.h>
62131 +
62132 +struct user_arg_ptr {
62133 +#ifdef CONFIG_COMPAT
62134 + bool is_compat;
62135 +#endif
62136 + union {
62137 + const char __user *const __user *native;
62138 +#ifdef CONFIG_COMPAT
62139 + compat_uptr_t __user *compat;
62140 +#endif
62141 + } ptr;
62142 +};
62143 +
62144 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
62145 +void gr_handle_brute_check(void);
62146 +void gr_handle_kernel_exploit(void);
62147 +int gr_process_user_ban(void);
62148 +
62149 +char gr_roletype_to_char(void);
62150 +
62151 +int gr_acl_enable_at_secure(void);
62152 +
62153 +int gr_check_user_change(int real, int effective, int fs);
62154 +int gr_check_group_change(int real, int effective, int fs);
62155 +
62156 +void gr_del_task_from_ip_table(struct task_struct *p);
62157 +
62158 +int gr_pid_is_chrooted(struct task_struct *p);
62159 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
62160 +int gr_handle_chroot_nice(void);
62161 +int gr_handle_chroot_sysctl(const int op);
62162 +int gr_handle_chroot_setpriority(struct task_struct *p,
62163 + const int niceval);
62164 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
62165 +int gr_handle_chroot_chroot(const struct dentry *dentry,
62166 + const struct vfsmount *mnt);
62167 +void gr_handle_chroot_chdir(struct path *path);
62168 +int gr_handle_chroot_chmod(const struct dentry *dentry,
62169 + const struct vfsmount *mnt, const int mode);
62170 +int gr_handle_chroot_mknod(const struct dentry *dentry,
62171 + const struct vfsmount *mnt, const int mode);
62172 +int gr_handle_chroot_mount(const struct dentry *dentry,
62173 + const struct vfsmount *mnt,
62174 + const char *dev_name);
62175 +int gr_handle_chroot_pivot(void);
62176 +int gr_handle_chroot_unix(const pid_t pid);
62177 +
62178 +int gr_handle_rawio(const struct inode *inode);
62179 +
62180 +void gr_handle_ioperm(void);
62181 +void gr_handle_iopl(void);
62182 +
62183 +umode_t gr_acl_umask(void);
62184 +
62185 +int gr_tpe_allow(const struct file *file);
62186 +
62187 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
62188 +void gr_clear_chroot_entries(struct task_struct *task);
62189 +
62190 +void gr_log_forkfail(const int retval);
62191 +void gr_log_timechange(void);
62192 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
62193 +void gr_log_chdir(const struct dentry *dentry,
62194 + const struct vfsmount *mnt);
62195 +void gr_log_chroot_exec(const struct dentry *dentry,
62196 + const struct vfsmount *mnt);
62197 +void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
62198 +void gr_log_remount(const char *devname, const int retval);
62199 +void gr_log_unmount(const char *devname, const int retval);
62200 +void gr_log_mount(const char *from, const char *to, const int retval);
62201 +void gr_log_textrel(struct vm_area_struct *vma);
62202 +void gr_log_rwxmmap(struct file *file);
62203 +void gr_log_rwxmprotect(struct file *file);
62204 +
62205 +int gr_handle_follow_link(const struct inode *parent,
62206 + const struct inode *inode,
62207 + const struct dentry *dentry,
62208 + const struct vfsmount *mnt);
62209 +int gr_handle_fifo(const struct dentry *dentry,
62210 + const struct vfsmount *mnt,
62211 + const struct dentry *dir, const int flag,
62212 + const int acc_mode);
62213 +int gr_handle_hardlink(const struct dentry *dentry,
62214 + const struct vfsmount *mnt,
62215 + struct inode *inode,
62216 + const int mode, const char *to);
62217 +
62218 +int gr_is_capable(const int cap);
62219 +int gr_is_capable_nolog(const int cap);
62220 +int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
62221 +int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
62222 +
62223 +void gr_learn_resource(const struct task_struct *task, const int limit,
62224 + const unsigned long wanted, const int gt);
62225 +void gr_copy_label(struct task_struct *tsk);
62226 +void gr_handle_crash(struct task_struct *task, const int sig);
62227 +int gr_handle_signal(const struct task_struct *p, const int sig);
62228 +int gr_check_crash_uid(const uid_t uid);
62229 +int gr_check_protected_task(const struct task_struct *task);
62230 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
62231 +int gr_acl_handle_mmap(const struct file *file,
62232 + const unsigned long prot);
62233 +int gr_acl_handle_mprotect(const struct file *file,
62234 + const unsigned long prot);
62235 +int gr_check_hidden_task(const struct task_struct *tsk);
62236 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
62237 + const struct vfsmount *mnt);
62238 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
62239 + const struct vfsmount *mnt);
62240 +__u32 gr_acl_handle_access(const struct dentry *dentry,
62241 + const struct vfsmount *mnt, const int fmode);
62242 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
62243 + const struct vfsmount *mnt, umode_t *mode);
62244 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
62245 + const struct vfsmount *mnt);
62246 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
62247 + const struct vfsmount *mnt);
62248 +int gr_handle_ptrace(struct task_struct *task, const long request);
62249 +int gr_handle_proc_ptrace(struct task_struct *task);
62250 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
62251 + const struct vfsmount *mnt);
62252 +int gr_check_crash_exec(const struct file *filp);
62253 +int gr_acl_is_enabled(void);
62254 +void gr_set_kernel_label(struct task_struct *task);
62255 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
62256 + const gid_t gid);
62257 +int gr_set_proc_label(const struct dentry *dentry,
62258 + const struct vfsmount *mnt,
62259 + const int unsafe_flags);
62260 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
62261 + const struct vfsmount *mnt);
62262 +__u32 gr_acl_handle_open(const struct dentry *dentry,
62263 + const struct vfsmount *mnt, int acc_mode);
62264 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
62265 + const struct dentry *p_dentry,
62266 + const struct vfsmount *p_mnt,
62267 + int open_flags, int acc_mode, const int imode);
62268 +void gr_handle_create(const struct dentry *dentry,
62269 + const struct vfsmount *mnt);
62270 +void gr_handle_proc_create(const struct dentry *dentry,
62271 + const struct inode *inode);
62272 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
62273 + const struct dentry *parent_dentry,
62274 + const struct vfsmount *parent_mnt,
62275 + const int mode);
62276 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
62277 + const struct dentry *parent_dentry,
62278 + const struct vfsmount *parent_mnt);
62279 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
62280 + const struct vfsmount *mnt);
62281 +void gr_handle_delete(const ino_t ino, const dev_t dev);
62282 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
62283 + const struct vfsmount *mnt);
62284 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
62285 + const struct dentry *parent_dentry,
62286 + const struct vfsmount *parent_mnt,
62287 + const char *from);
62288 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
62289 + const struct dentry *parent_dentry,
62290 + const struct vfsmount *parent_mnt,
62291 + const struct dentry *old_dentry,
62292 + const struct vfsmount *old_mnt, const char *to);
62293 +int gr_acl_handle_rename(struct dentry *new_dentry,
62294 + struct dentry *parent_dentry,
62295 + const struct vfsmount *parent_mnt,
62296 + struct dentry *old_dentry,
62297 + struct inode *old_parent_inode,
62298 + struct vfsmount *old_mnt, const char *newname);
62299 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
62300 + struct dentry *old_dentry,
62301 + struct dentry *new_dentry,
62302 + struct vfsmount *mnt, const __u8 replace);
62303 +__u32 gr_check_link(const struct dentry *new_dentry,
62304 + const struct dentry *parent_dentry,
62305 + const struct vfsmount *parent_mnt,
62306 + const struct dentry *old_dentry,
62307 + const struct vfsmount *old_mnt);
62308 +int gr_acl_handle_filldir(const struct file *file, const char *name,
62309 + const unsigned int namelen, const ino_t ino);
62310 +
62311 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
62312 + const struct vfsmount *mnt);
62313 +void gr_acl_handle_exit(void);
62314 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
62315 +int gr_acl_handle_procpidmem(const struct task_struct *task);
62316 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
62317 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
62318 +void gr_audit_ptrace(struct task_struct *task);
62319 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
62320 +
62321 +int gr_ptrace_readexec(struct file *file, int unsafe_flags);
62322 +
62323 +#ifdef CONFIG_GRKERNSEC
62324 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
62325 +void gr_handle_vm86(void);
62326 +void gr_handle_mem_readwrite(u64 from, u64 to);
62327 +
62328 +void gr_log_badprocpid(const char *entry);
62329 +
62330 +extern int grsec_enable_dmesg;
62331 +extern int grsec_disable_privio;
62332 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62333 +extern int grsec_enable_chroot_findtask;
62334 +#endif
62335 +#ifdef CONFIG_GRKERNSEC_SETXID
62336 +extern int grsec_enable_setxid;
62337 +#endif
62338 +#endif
62339 +
62340 +#endif
62341 diff --git a/include/linux/grsock.h b/include/linux/grsock.h
62342 new file mode 100644
62343 index 0000000..e7ffaaf
62344 --- /dev/null
62345 +++ b/include/linux/grsock.h
62346 @@ -0,0 +1,19 @@
62347 +#ifndef __GRSOCK_H
62348 +#define __GRSOCK_H
62349 +
62350 +extern void gr_attach_curr_ip(const struct sock *sk);
62351 +extern int gr_handle_sock_all(const int family, const int type,
62352 + const int protocol);
62353 +extern int gr_handle_sock_server(const struct sockaddr *sck);
62354 +extern int gr_handle_sock_server_other(const struct sock *sck);
62355 +extern int gr_handle_sock_client(const struct sockaddr *sck);
62356 +extern int gr_search_connect(struct socket * sock,
62357 + struct sockaddr_in * addr);
62358 +extern int gr_search_bind(struct socket * sock,
62359 + struct sockaddr_in * addr);
62360 +extern int gr_search_listen(struct socket * sock);
62361 +extern int gr_search_accept(struct socket * sock);
62362 +extern int gr_search_socket(const int domain, const int type,
62363 + const int protocol);
62364 +
62365 +#endif
62366 diff --git a/include/linux/hid.h b/include/linux/hid.h
62367 index 3a95da6..51986f1 100644
62368 --- a/include/linux/hid.h
62369 +++ b/include/linux/hid.h
62370 @@ -696,7 +696,7 @@ struct hid_ll_driver {
62371 unsigned int code, int value);
62372
62373 int (*parse)(struct hid_device *hdev);
62374 -};
62375 +} __no_const;
62376
62377 #define PM_HINT_FULLON 1<<5
62378 #define PM_HINT_NORMAL 1<<1
62379 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
62380 index 3a93f73..b19d0b3 100644
62381 --- a/include/linux/highmem.h
62382 +++ b/include/linux/highmem.h
62383 @@ -185,6 +185,18 @@ static inline void clear_highpage(struct page *page)
62384 kunmap_atomic(kaddr, KM_USER0);
62385 }
62386
62387 +static inline void sanitize_highpage(struct page *page)
62388 +{
62389 + void *kaddr;
62390 + unsigned long flags;
62391 +
62392 + local_irq_save(flags);
62393 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
62394 + clear_page(kaddr);
62395 + kunmap_atomic(kaddr, KM_CLEARPAGE);
62396 + local_irq_restore(flags);
62397 +}
62398 +
62399 static inline void zero_user_segments(struct page *page,
62400 unsigned start1, unsigned end1,
62401 unsigned start2, unsigned end2)
62402 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
62403 index 8e25a91..551b161 100644
62404 --- a/include/linux/i2c.h
62405 +++ b/include/linux/i2c.h
62406 @@ -364,6 +364,7 @@ struct i2c_algorithm {
62407 /* To determine what the adapter supports */
62408 u32 (*functionality) (struct i2c_adapter *);
62409 };
62410 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
62411
62412 /*
62413 * i2c_adapter is the structure used to identify a physical i2c bus along
62414 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
62415 index a6deef4..c56a7f2 100644
62416 --- a/include/linux/i2o.h
62417 +++ b/include/linux/i2o.h
62418 @@ -564,7 +564,7 @@ struct i2o_controller {
62419 struct i2o_device *exec; /* Executive */
62420 #if BITS_PER_LONG == 64
62421 spinlock_t context_list_lock; /* lock for context_list */
62422 - atomic_t context_list_counter; /* needed for unique contexts */
62423 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
62424 struct list_head context_list; /* list of context id's
62425 and pointers */
62426 #endif
62427 diff --git a/include/linux/if_team.h b/include/linux/if_team.h
62428 index 58404b0..439ed95 100644
62429 --- a/include/linux/if_team.h
62430 +++ b/include/linux/if_team.h
62431 @@ -64,6 +64,7 @@ struct team_mode_ops {
62432 void (*port_leave)(struct team *team, struct team_port *port);
62433 void (*port_change_mac)(struct team *team, struct team_port *port);
62434 };
62435 +typedef struct team_mode_ops __no_const team_mode_ops_no_const;
62436
62437 enum team_option_type {
62438 TEAM_OPTION_TYPE_U32,
62439 @@ -112,7 +113,7 @@ struct team {
62440 struct list_head option_list;
62441
62442 const struct team_mode *mode;
62443 - struct team_mode_ops ops;
62444 + team_mode_ops_no_const ops;
62445 long mode_priv[TEAM_MODE_PRIV_LONGS];
62446 };
62447
62448 diff --git a/include/linux/init.h b/include/linux/init.h
62449 index 6b95109..4aca62c 100644
62450 --- a/include/linux/init.h
62451 +++ b/include/linux/init.h
62452 @@ -294,13 +294,13 @@ void __init parse_early_options(char *cmdline);
62453
62454 /* Each module must use one module_init(). */
62455 #define module_init(initfn) \
62456 - static inline initcall_t __inittest(void) \
62457 + static inline __used initcall_t __inittest(void) \
62458 { return initfn; } \
62459 int init_module(void) __attribute__((alias(#initfn)));
62460
62461 /* This is only required if you want to be unloadable. */
62462 #define module_exit(exitfn) \
62463 - static inline exitcall_t __exittest(void) \
62464 + static inline __used exitcall_t __exittest(void) \
62465 { return exitfn; } \
62466 void cleanup_module(void) __attribute__((alias(#exitfn)));
62467
62468 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
62469 index 9c66b1a..a3fdded 100644
62470 --- a/include/linux/init_task.h
62471 +++ b/include/linux/init_task.h
62472 @@ -127,6 +127,12 @@ extern struct cred init_cred;
62473
62474 #define INIT_TASK_COMM "swapper"
62475
62476 +#ifdef CONFIG_X86
62477 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
62478 +#else
62479 +#define INIT_TASK_THREAD_INFO
62480 +#endif
62481 +
62482 /*
62483 * INIT_TASK is used to set up the first task table, touch at
62484 * your own risk!. Base=0, limit=0x1fffff (=2MB)
62485 @@ -165,6 +171,7 @@ extern struct cred init_cred;
62486 RCU_INIT_POINTER(.cred, &init_cred), \
62487 .comm = INIT_TASK_COMM, \
62488 .thread = INIT_THREAD, \
62489 + INIT_TASK_THREAD_INFO \
62490 .fs = &init_fs, \
62491 .files = &init_files, \
62492 .signal = &init_signals, \
62493 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
62494 index e6ca56d..8583707 100644
62495 --- a/include/linux/intel-iommu.h
62496 +++ b/include/linux/intel-iommu.h
62497 @@ -296,7 +296,7 @@ struct iommu_flush {
62498 u8 fm, u64 type);
62499 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
62500 unsigned int size_order, u64 type);
62501 -};
62502 +} __no_const;
62503
62504 enum {
62505 SR_DMAR_FECTL_REG,
62506 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
62507 index a64b00e..464d8bc 100644
62508 --- a/include/linux/interrupt.h
62509 +++ b/include/linux/interrupt.h
62510 @@ -441,7 +441,7 @@ enum
62511 /* map softirq index to softirq name. update 'softirq_to_name' in
62512 * kernel/softirq.c when adding a new softirq.
62513 */
62514 -extern char *softirq_to_name[NR_SOFTIRQS];
62515 +extern const char * const softirq_to_name[NR_SOFTIRQS];
62516
62517 /* softirq mask and active fields moved to irq_cpustat_t in
62518 * asm/hardirq.h to get better cache usage. KAO
62519 @@ -449,12 +449,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
62520
62521 struct softirq_action
62522 {
62523 - void (*action)(struct softirq_action *);
62524 + void (*action)(void);
62525 };
62526
62527 asmlinkage void do_softirq(void);
62528 asmlinkage void __do_softirq(void);
62529 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
62530 +extern void open_softirq(int nr, void (*action)(void));
62531 extern void softirq_init(void);
62532 static inline void __raise_softirq_irqoff(unsigned int nr)
62533 {
62534 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
62535 index 3875719..4cd454c 100644
62536 --- a/include/linux/kallsyms.h
62537 +++ b/include/linux/kallsyms.h
62538 @@ -15,7 +15,8 @@
62539
62540 struct module;
62541
62542 -#ifdef CONFIG_KALLSYMS
62543 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
62544 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
62545 /* Lookup the address for a symbol. Returns 0 if not found. */
62546 unsigned long kallsyms_lookup_name(const char *name);
62547
62548 @@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
62549 /* Stupid that this does nothing, but I didn't create this mess. */
62550 #define __print_symbol(fmt, addr)
62551 #endif /*CONFIG_KALLSYMS*/
62552 +#else /* when included by kallsyms.c, vsnprintf.c, or
62553 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
62554 +extern void __print_symbol(const char *fmt, unsigned long address);
62555 +extern int sprint_backtrace(char *buffer, unsigned long address);
62556 +extern int sprint_symbol(char *buffer, unsigned long address);
62557 +const char *kallsyms_lookup(unsigned long addr,
62558 + unsigned long *symbolsize,
62559 + unsigned long *offset,
62560 + char **modname, char *namebuf);
62561 +#endif
62562
62563 /* This macro allows us to keep printk typechecking */
62564 static __printf(1, 2)
62565 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
62566 index c4d2fc1..5df9c19 100644
62567 --- a/include/linux/kgdb.h
62568 +++ b/include/linux/kgdb.h
62569 @@ -53,7 +53,7 @@ extern int kgdb_connected;
62570 extern int kgdb_io_module_registered;
62571
62572 extern atomic_t kgdb_setting_breakpoint;
62573 -extern atomic_t kgdb_cpu_doing_single_step;
62574 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
62575
62576 extern struct task_struct *kgdb_usethread;
62577 extern struct task_struct *kgdb_contthread;
62578 @@ -252,7 +252,7 @@ struct kgdb_arch {
62579 void (*disable_hw_break)(struct pt_regs *regs);
62580 void (*remove_all_hw_break)(void);
62581 void (*correct_hw_break)(void);
62582 -};
62583 +} __do_const;
62584
62585 /**
62586 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
62587 @@ -277,7 +277,7 @@ struct kgdb_io {
62588 void (*pre_exception) (void);
62589 void (*post_exception) (void);
62590 int is_console;
62591 -};
62592 +} __do_const;
62593
62594 extern struct kgdb_arch arch_kgdb_ops;
62595
62596 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
62597 index 0fb48ef..1b680b2 100644
62598 --- a/include/linux/kmod.h
62599 +++ b/include/linux/kmod.h
62600 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
62601 * usually useless though. */
62602 extern __printf(2, 3)
62603 int __request_module(bool wait, const char *name, ...);
62604 +extern __printf(3, 4)
62605 +int ___request_module(bool wait, char *param_name, const char *name, ...);
62606 #define request_module(mod...) __request_module(true, mod)
62607 #define request_module_nowait(mod...) __request_module(false, mod)
62608 #define try_then_request_module(x, mod...) \
62609 diff --git a/include/linux/kref.h b/include/linux/kref.h
62610 index 9c07dce..a92fa71 100644
62611 --- a/include/linux/kref.h
62612 +++ b/include/linux/kref.h
62613 @@ -63,7 +63,7 @@ static inline void kref_get(struct kref *kref)
62614 static inline int kref_sub(struct kref *kref, unsigned int count,
62615 void (*release)(struct kref *kref))
62616 {
62617 - WARN_ON(release == NULL);
62618 + BUG_ON(release == NULL);
62619
62620 if (atomic_sub_and_test((int) count, &kref->refcount)) {
62621 release(kref);
62622 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
62623 index 4c4e83d..5f16617 100644
62624 --- a/include/linux/kvm_host.h
62625 +++ b/include/linux/kvm_host.h
62626 @@ -326,7 +326,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
62627 void vcpu_load(struct kvm_vcpu *vcpu);
62628 void vcpu_put(struct kvm_vcpu *vcpu);
62629
62630 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
62631 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
62632 struct module *module);
62633 void kvm_exit(void);
62634
62635 @@ -416,20 +416,20 @@ void kvm_get_pfn(pfn_t pfn);
62636 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
62637 int len);
62638 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
62639 - unsigned long len);
62640 -int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
62641 + unsigned long len) __size_overflow(4);
62642 +int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) __size_overflow(2,4);
62643 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
62644 - void *data, unsigned long len);
62645 + void *data, unsigned long len) __size_overflow(4);
62646 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
62647 int offset, int len);
62648 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
62649 - unsigned long len);
62650 + unsigned long len) __size_overflow(2,4);
62651 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
62652 - void *data, unsigned long len);
62653 + void *data, unsigned long len) __size_overflow(4);
62654 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
62655 gpa_t gpa);
62656 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
62657 -int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
62658 +int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) __size_overflow(2,3);
62659 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
62660 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
62661 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
62662 @@ -485,7 +485,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
62663 struct kvm_guest_debug *dbg);
62664 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
62665
62666 -int kvm_arch_init(void *opaque);
62667 +int kvm_arch_init(const void *opaque);
62668 void kvm_arch_exit(void);
62669
62670 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
62671 @@ -727,7 +727,7 @@ int kvm_setup_default_irq_routing(struct kvm *kvm);
62672 int kvm_set_irq_routing(struct kvm *kvm,
62673 const struct kvm_irq_routing_entry *entries,
62674 unsigned nr,
62675 - unsigned flags);
62676 + unsigned flags) __size_overflow(3);
62677 void kvm_free_irq_routing(struct kvm *kvm);
62678
62679 #else
62680 diff --git a/include/linux/libata.h b/include/linux/libata.h
62681 index cafc09a..d7e7829 100644
62682 --- a/include/linux/libata.h
62683 +++ b/include/linux/libata.h
62684 @@ -909,7 +909,7 @@ struct ata_port_operations {
62685 * fields must be pointers.
62686 */
62687 const struct ata_port_operations *inherits;
62688 -};
62689 +} __do_const;
62690
62691 struct ata_port_info {
62692 unsigned long flags;
62693 diff --git a/include/linux/mca.h b/include/linux/mca.h
62694 index 3797270..7765ede 100644
62695 --- a/include/linux/mca.h
62696 +++ b/include/linux/mca.h
62697 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
62698 int region);
62699 void * (*mca_transform_memory)(struct mca_device *,
62700 void *memory);
62701 -};
62702 +} __no_const;
62703
62704 struct mca_bus {
62705 u64 default_dma_mask;
62706 diff --git a/include/linux/memory.h b/include/linux/memory.h
62707 index 1ac7f6e..a5794d0 100644
62708 --- a/include/linux/memory.h
62709 +++ b/include/linux/memory.h
62710 @@ -143,7 +143,7 @@ struct memory_accessor {
62711 size_t count);
62712 ssize_t (*write)(struct memory_accessor *, const char *buf,
62713 off_t offset, size_t count);
62714 -};
62715 +} __no_const;
62716
62717 /*
62718 * Kernel text modification mutex, used for code patching. Users of this lock
62719 diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
62720 index 9970337..9444122 100644
62721 --- a/include/linux/mfd/abx500.h
62722 +++ b/include/linux/mfd/abx500.h
62723 @@ -188,6 +188,7 @@ struct abx500_ops {
62724 int (*event_registers_startup_state_get) (struct device *, u8 *);
62725 int (*startup_irq_enabled) (struct device *, unsigned int);
62726 };
62727 +typedef struct abx500_ops __no_const abx500_ops_no_const;
62728
62729 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
62730 void abx500_remove_ops(struct device *dev);
62731 diff --git a/include/linux/mm.h b/include/linux/mm.h
62732 index 17b27cd..baea141 100644
62733 --- a/include/linux/mm.h
62734 +++ b/include/linux/mm.h
62735 @@ -115,7 +115,14 @@ extern unsigned int kobjsize(const void *objp);
62736
62737 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
62738 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
62739 +
62740 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
62741 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
62742 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
62743 +#else
62744 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
62745 +#endif
62746 +
62747 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
62748 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
62749
62750 @@ -1012,34 +1019,6 @@ int set_page_dirty(struct page *page);
62751 int set_page_dirty_lock(struct page *page);
62752 int clear_page_dirty_for_io(struct page *page);
62753
62754 -/* Is the vma a continuation of the stack vma above it? */
62755 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
62756 -{
62757 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
62758 -}
62759 -
62760 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
62761 - unsigned long addr)
62762 -{
62763 - return (vma->vm_flags & VM_GROWSDOWN) &&
62764 - (vma->vm_start == addr) &&
62765 - !vma_growsdown(vma->vm_prev, addr);
62766 -}
62767 -
62768 -/* Is the vma a continuation of the stack vma below it? */
62769 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
62770 -{
62771 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
62772 -}
62773 -
62774 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
62775 - unsigned long addr)
62776 -{
62777 - return (vma->vm_flags & VM_GROWSUP) &&
62778 - (vma->vm_end == addr) &&
62779 - !vma_growsup(vma->vm_next, addr);
62780 -}
62781 -
62782 extern unsigned long move_page_tables(struct vm_area_struct *vma,
62783 unsigned long old_addr, struct vm_area_struct *new_vma,
62784 unsigned long new_addr, unsigned long len);
62785 @@ -1134,6 +1113,15 @@ static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
62786 }
62787 #endif
62788
62789 +#ifdef CONFIG_MMU
62790 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
62791 +#else
62792 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
62793 +{
62794 + return __pgprot(0);
62795 +}
62796 +#endif
62797 +
62798 int vma_wants_writenotify(struct vm_area_struct *vma);
62799
62800 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
62801 @@ -1152,8 +1140,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
62802 {
62803 return 0;
62804 }
62805 +
62806 +static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
62807 + unsigned long address)
62808 +{
62809 + return 0;
62810 +}
62811 #else
62812 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
62813 +int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
62814 #endif
62815
62816 #ifdef __PAGETABLE_PMD_FOLDED
62817 @@ -1162,8 +1157,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
62818 {
62819 return 0;
62820 }
62821 +
62822 +static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
62823 + unsigned long address)
62824 +{
62825 + return 0;
62826 +}
62827 #else
62828 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
62829 +int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
62830 #endif
62831
62832 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
62833 @@ -1181,11 +1183,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
62834 NULL: pud_offset(pgd, address);
62835 }
62836
62837 +static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
62838 +{
62839 + return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
62840 + NULL: pud_offset(pgd, address);
62841 +}
62842 +
62843 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
62844 {
62845 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
62846 NULL: pmd_offset(pud, address);
62847 }
62848 +
62849 +static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
62850 +{
62851 + return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
62852 + NULL: pmd_offset(pud, address);
62853 +}
62854 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
62855
62856 #if USE_SPLIT_PTLOCKS
62857 @@ -1409,6 +1423,7 @@ out:
62858 }
62859
62860 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
62861 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
62862
62863 extern unsigned long do_brk(unsigned long, unsigned long);
62864
62865 @@ -1466,6 +1481,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
62866 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
62867 struct vm_area_struct **pprev);
62868
62869 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
62870 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
62871 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
62872 +
62873 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
62874 NULL if none. Assume start_addr < end_addr. */
62875 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
62876 @@ -1494,15 +1513,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
62877 return vma;
62878 }
62879
62880 -#ifdef CONFIG_MMU
62881 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
62882 -#else
62883 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
62884 -{
62885 - return __pgprot(0);
62886 -}
62887 -#endif
62888 -
62889 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
62890 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
62891 unsigned long pfn, unsigned long size, pgprot_t);
62892 @@ -1606,7 +1616,7 @@ extern int unpoison_memory(unsigned long pfn);
62893 extern int sysctl_memory_failure_early_kill;
62894 extern int sysctl_memory_failure_recovery;
62895 extern void shake_page(struct page *p, int access);
62896 -extern atomic_long_t mce_bad_pages;
62897 +extern atomic_long_unchecked_t mce_bad_pages;
62898 extern int soft_offline_page(struct page *page, int flags);
62899
62900 extern void dump_page(struct page *page);
62901 @@ -1637,5 +1647,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
62902 static inline bool page_is_guard(struct page *page) { return false; }
62903 #endif /* CONFIG_DEBUG_PAGEALLOC */
62904
62905 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
62906 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
62907 +#else
62908 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
62909 +#endif
62910 +
62911 #endif /* __KERNEL__ */
62912 #endif /* _LINUX_MM_H */
62913 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
62914 index 3cc3062..efeaeb7 100644
62915 --- a/include/linux/mm_types.h
62916 +++ b/include/linux/mm_types.h
62917 @@ -252,6 +252,8 @@ struct vm_area_struct {
62918 #ifdef CONFIG_NUMA
62919 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
62920 #endif
62921 +
62922 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
62923 };
62924
62925 struct core_thread {
62926 @@ -326,7 +328,7 @@ struct mm_struct {
62927 unsigned long def_flags;
62928 unsigned long nr_ptes; /* Page table pages */
62929 unsigned long start_code, end_code, start_data, end_data;
62930 - unsigned long start_brk, brk, start_stack;
62931 + unsigned long brk_gap, start_brk, brk, start_stack;
62932 unsigned long arg_start, arg_end, env_start, env_end;
62933
62934 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
62935 @@ -388,6 +390,24 @@ struct mm_struct {
62936 #ifdef CONFIG_CPUMASK_OFFSTACK
62937 struct cpumask cpumask_allocation;
62938 #endif
62939 +
62940 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS) || defined(CONFIG_PAX_HAVE_ACL_FLAGS) || defined(CONFIG_PAX_HOOK_ACL_FLAGS)
62941 + unsigned long pax_flags;
62942 +#endif
62943 +
62944 +#ifdef CONFIG_PAX_DLRESOLVE
62945 + unsigned long call_dl_resolve;
62946 +#endif
62947 +
62948 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
62949 + unsigned long call_syscall;
62950 +#endif
62951 +
62952 +#ifdef CONFIG_PAX_ASLR
62953 + unsigned long delta_mmap; /* randomized offset */
62954 + unsigned long delta_stack; /* randomized offset */
62955 +#endif
62956 +
62957 };
62958
62959 static inline void mm_init_cpumask(struct mm_struct *mm)
62960 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
62961 index 1d1b1e1..2a13c78 100644
62962 --- a/include/linux/mmu_notifier.h
62963 +++ b/include/linux/mmu_notifier.h
62964 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
62965 */
62966 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
62967 ({ \
62968 - pte_t __pte; \
62969 + pte_t ___pte; \
62970 struct vm_area_struct *___vma = __vma; \
62971 unsigned long ___address = __address; \
62972 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
62973 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
62974 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
62975 - __pte; \
62976 + ___pte; \
62977 })
62978
62979 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
62980 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
62981 index 650ba2f..af0a58c 100644
62982 --- a/include/linux/mmzone.h
62983 +++ b/include/linux/mmzone.h
62984 @@ -379,7 +379,7 @@ struct zone {
62985 unsigned long flags; /* zone flags, see below */
62986
62987 /* Zone statistics */
62988 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
62989 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
62990
62991 /*
62992 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
62993 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
62994 index 83ac071..2656e0e 100644
62995 --- a/include/linux/mod_devicetable.h
62996 +++ b/include/linux/mod_devicetable.h
62997 @@ -12,7 +12,7 @@
62998 typedef unsigned long kernel_ulong_t;
62999 #endif
63000
63001 -#define PCI_ANY_ID (~0)
63002 +#define PCI_ANY_ID ((__u16)~0)
63003
63004 struct pci_device_id {
63005 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
63006 @@ -131,7 +131,7 @@ struct usb_device_id {
63007 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
63008 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
63009
63010 -#define HID_ANY_ID (~0)
63011 +#define HID_ANY_ID (~0U)
63012
63013 struct hid_device_id {
63014 __u16 bus;
63015 diff --git a/include/linux/module.h b/include/linux/module.h
63016 index 4598bf0..e069d7f 100644
63017 --- a/include/linux/module.h
63018 +++ b/include/linux/module.h
63019 @@ -17,6 +17,7 @@
63020 #include <linux/moduleparam.h>
63021 #include <linux/tracepoint.h>
63022 #include <linux/export.h>
63023 +#include <linux/fs.h>
63024
63025 #include <linux/percpu.h>
63026 #include <asm/module.h>
63027 @@ -275,19 +276,16 @@ struct module
63028 int (*init)(void);
63029
63030 /* If this is non-NULL, vfree after init() returns */
63031 - void *module_init;
63032 + void *module_init_rx, *module_init_rw;
63033
63034 /* Here is the actual code + data, vfree'd on unload. */
63035 - void *module_core;
63036 + void *module_core_rx, *module_core_rw;
63037
63038 /* Here are the sizes of the init and core sections */
63039 - unsigned int init_size, core_size;
63040 + unsigned int init_size_rw, core_size_rw;
63041
63042 /* The size of the executable code in each section. */
63043 - unsigned int init_text_size, core_text_size;
63044 -
63045 - /* Size of RO sections of the module (text+rodata) */
63046 - unsigned int init_ro_size, core_ro_size;
63047 + unsigned int init_size_rx, core_size_rx;
63048
63049 /* Arch-specific module values */
63050 struct mod_arch_specific arch;
63051 @@ -343,6 +341,10 @@ struct module
63052 #ifdef CONFIG_EVENT_TRACING
63053 struct ftrace_event_call **trace_events;
63054 unsigned int num_trace_events;
63055 + struct file_operations trace_id;
63056 + struct file_operations trace_enable;
63057 + struct file_operations trace_format;
63058 + struct file_operations trace_filter;
63059 #endif
63060 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
63061 unsigned int num_ftrace_callsites;
63062 @@ -390,16 +392,46 @@ bool is_module_address(unsigned long addr);
63063 bool is_module_percpu_address(unsigned long addr);
63064 bool is_module_text_address(unsigned long addr);
63065
63066 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
63067 +{
63068 +
63069 +#ifdef CONFIG_PAX_KERNEXEC
63070 + if (ktla_ktva(addr) >= (unsigned long)start &&
63071 + ktla_ktva(addr) < (unsigned long)start + size)
63072 + return 1;
63073 +#endif
63074 +
63075 + return ((void *)addr >= start && (void *)addr < start + size);
63076 +}
63077 +
63078 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
63079 +{
63080 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
63081 +}
63082 +
63083 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
63084 +{
63085 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
63086 +}
63087 +
63088 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
63089 +{
63090 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
63091 +}
63092 +
63093 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
63094 +{
63095 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
63096 +}
63097 +
63098 static inline int within_module_core(unsigned long addr, struct module *mod)
63099 {
63100 - return (unsigned long)mod->module_core <= addr &&
63101 - addr < (unsigned long)mod->module_core + mod->core_size;
63102 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
63103 }
63104
63105 static inline int within_module_init(unsigned long addr, struct module *mod)
63106 {
63107 - return (unsigned long)mod->module_init <= addr &&
63108 - addr < (unsigned long)mod->module_init + mod->init_size;
63109 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
63110 }
63111
63112 /* Search for module by name: must hold module_mutex. */
63113 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
63114 index b2be02e..72d2f78 100644
63115 --- a/include/linux/moduleloader.h
63116 +++ b/include/linux/moduleloader.h
63117 @@ -23,11 +23,23 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
63118
63119 /* Allocator used for allocating struct module, core sections and init
63120 sections. Returns NULL on failure. */
63121 -void *module_alloc(unsigned long size);
63122 +void *module_alloc(unsigned long size) __size_overflow(1);
63123 +
63124 +#ifdef CONFIG_PAX_KERNEXEC
63125 +void *module_alloc_exec(unsigned long size) __size_overflow(1);
63126 +#else
63127 +#define module_alloc_exec(x) module_alloc(x)
63128 +#endif
63129
63130 /* Free memory returned from module_alloc. */
63131 void module_free(struct module *mod, void *module_region);
63132
63133 +#ifdef CONFIG_PAX_KERNEXEC
63134 +void module_free_exec(struct module *mod, void *module_region);
63135 +#else
63136 +#define module_free_exec(x, y) module_free((x), (y))
63137 +#endif
63138 +
63139 /* Apply the given relocation to the (simplified) ELF. Return -error
63140 or 0. */
63141 int apply_relocate(Elf_Shdr *sechdrs,
63142 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
63143 index c47f4d6..23f9bdb 100644
63144 --- a/include/linux/moduleparam.h
63145 +++ b/include/linux/moduleparam.h
63146 @@ -260,7 +260,7 @@ static inline void __kernel_param_unlock(void)
63147 * @len is usually just sizeof(string).
63148 */
63149 #define module_param_string(name, string, len, perm) \
63150 - static const struct kparam_string __param_string_##name \
63151 + static const struct kparam_string __param_string_##name __used \
63152 = { len, string }; \
63153 __module_param_call(MODULE_PARAM_PREFIX, name, \
63154 &param_ops_string, \
63155 @@ -396,7 +396,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
63156 */
63157 #define module_param_array_named(name, array, type, nump, perm) \
63158 param_check_##type(name, &(array)[0]); \
63159 - static const struct kparam_array __param_arr_##name \
63160 + static const struct kparam_array __param_arr_##name __used \
63161 = { .max = ARRAY_SIZE(array), .num = nump, \
63162 .ops = &param_ops_##type, \
63163 .elemsize = sizeof(array[0]), .elem = array }; \
63164 diff --git a/include/linux/namei.h b/include/linux/namei.h
63165 index ffc0213..2c1f2cb 100644
63166 --- a/include/linux/namei.h
63167 +++ b/include/linux/namei.h
63168 @@ -24,7 +24,7 @@ struct nameidata {
63169 unsigned seq;
63170 int last_type;
63171 unsigned depth;
63172 - char *saved_names[MAX_NESTED_LINKS + 1];
63173 + const char *saved_names[MAX_NESTED_LINKS + 1];
63174
63175 /* Intent data */
63176 union {
63177 @@ -94,12 +94,12 @@ extern int follow_up(struct path *);
63178 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
63179 extern void unlock_rename(struct dentry *, struct dentry *);
63180
63181 -static inline void nd_set_link(struct nameidata *nd, char *path)
63182 +static inline void nd_set_link(struct nameidata *nd, const char *path)
63183 {
63184 nd->saved_names[nd->depth] = path;
63185 }
63186
63187 -static inline char *nd_get_link(struct nameidata *nd)
63188 +static inline const char *nd_get_link(const struct nameidata *nd)
63189 {
63190 return nd->saved_names[nd->depth];
63191 }
63192 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
63193 index 7e472b7..212d381 100644
63194 --- a/include/linux/netdevice.h
63195 +++ b/include/linux/netdevice.h
63196 @@ -1002,6 +1002,7 @@ struct net_device_ops {
63197 int (*ndo_neigh_construct)(struct neighbour *n);
63198 void (*ndo_neigh_destroy)(struct neighbour *n);
63199 };
63200 +typedef struct net_device_ops __no_const net_device_ops_no_const;
63201
63202 /*
63203 * The DEVICE structure.
63204 @@ -1063,7 +1064,7 @@ struct net_device {
63205 int iflink;
63206
63207 struct net_device_stats stats;
63208 - atomic_long_t rx_dropped; /* dropped packets by core network
63209 + atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
63210 * Do not use this in drivers.
63211 */
63212
63213 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
63214 new file mode 100644
63215 index 0000000..33f4af8
63216 --- /dev/null
63217 +++ b/include/linux/netfilter/xt_gradm.h
63218 @@ -0,0 +1,9 @@
63219 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
63220 +#define _LINUX_NETFILTER_XT_GRADM_H 1
63221 +
63222 +struct xt_gradm_mtinfo {
63223 + __u16 flags;
63224 + __u16 invflags;
63225 +};
63226 +
63227 +#endif
63228 diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
63229 index c65a18a..0c05f3a 100644
63230 --- a/include/linux/of_pdt.h
63231 +++ b/include/linux/of_pdt.h
63232 @@ -32,7 +32,7 @@ struct of_pdt_ops {
63233
63234 /* return 0 on success; fill in 'len' with number of bytes in path */
63235 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
63236 -};
63237 +} __no_const;
63238
63239 extern void *prom_early_alloc(unsigned long size);
63240
63241 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
63242 index a4c5624..2dabfb7 100644
63243 --- a/include/linux/oprofile.h
63244 +++ b/include/linux/oprofile.h
63245 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
63246 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
63247 char const * name, ulong * val);
63248
63249 -/** Create a file for read-only access to an atomic_t. */
63250 +/** Create a file for read-only access to an atomic_unchecked_t. */
63251 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
63252 - char const * name, atomic_t * val);
63253 + char const * name, atomic_unchecked_t * val);
63254
63255 /** create a directory */
63256 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
63257 @@ -163,7 +163,7 @@ ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user * buf, size_t co
63258 * Read an ASCII string for a number from a userspace buffer and fill *val on success.
63259 * Returns 0 on success, < 0 on error.
63260 */
63261 -int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count);
63262 +int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count) __size_overflow(3);
63263
63264 /** lock for read/write safety */
63265 extern raw_spinlock_t oprofilefs_lock;
63266 diff --git a/include/linux/padata.h b/include/linux/padata.h
63267 index 4633b2f..988bc08 100644
63268 --- a/include/linux/padata.h
63269 +++ b/include/linux/padata.h
63270 @@ -129,7 +129,7 @@ struct parallel_data {
63271 struct padata_instance *pinst;
63272 struct padata_parallel_queue __percpu *pqueue;
63273 struct padata_serial_queue __percpu *squeue;
63274 - atomic_t seq_nr;
63275 + atomic_unchecked_t seq_nr;
63276 atomic_t reorder_objects;
63277 atomic_t refcnt;
63278 unsigned int max_seq_nr;
63279 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
63280 index abb2776..d8b8e15 100644
63281 --- a/include/linux/perf_event.h
63282 +++ b/include/linux/perf_event.h
63283 @@ -750,8 +750,8 @@ struct perf_event {
63284
63285 enum perf_event_active_state state;
63286 unsigned int attach_state;
63287 - local64_t count;
63288 - atomic64_t child_count;
63289 + local64_t count; /* PaX: fix it one day */
63290 + atomic64_unchecked_t child_count;
63291
63292 /*
63293 * These are the total time in nanoseconds that the event
63294 @@ -802,8 +802,8 @@ struct perf_event {
63295 * These accumulate total time (in nanoseconds) that children
63296 * events have been enabled and running, respectively.
63297 */
63298 - atomic64_t child_total_time_enabled;
63299 - atomic64_t child_total_time_running;
63300 + atomic64_unchecked_t child_total_time_enabled;
63301 + atomic64_unchecked_t child_total_time_running;
63302
63303 /*
63304 * Protect attach/detach and child_list:
63305 diff --git a/include/linux/personality.h b/include/linux/personality.h
63306 index 8fc7dd1a..c19d89e 100644
63307 --- a/include/linux/personality.h
63308 +++ b/include/linux/personality.h
63309 @@ -44,6 +44,7 @@ enum {
63310 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
63311 ADDR_NO_RANDOMIZE | \
63312 ADDR_COMPAT_LAYOUT | \
63313 + ADDR_LIMIT_3GB | \
63314 MMAP_PAGE_ZERO)
63315
63316 /*
63317 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
63318 index 0072a53..c5dcca5 100644
63319 --- a/include/linux/pipe_fs_i.h
63320 +++ b/include/linux/pipe_fs_i.h
63321 @@ -47,9 +47,9 @@ struct pipe_buffer {
63322 struct pipe_inode_info {
63323 wait_queue_head_t wait;
63324 unsigned int nrbufs, curbuf, buffers;
63325 - unsigned int readers;
63326 - unsigned int writers;
63327 - unsigned int waiting_writers;
63328 + atomic_t readers;
63329 + atomic_t writers;
63330 + atomic_t waiting_writers;
63331 unsigned int r_counter;
63332 unsigned int w_counter;
63333 struct page *tmp_page;
63334 diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
63335 index 609daae..5392427 100644
63336 --- a/include/linux/pm_runtime.h
63337 +++ b/include/linux/pm_runtime.h
63338 @@ -97,7 +97,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
63339
63340 static inline void pm_runtime_mark_last_busy(struct device *dev)
63341 {
63342 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
63343 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
63344 }
63345
63346 #else /* !CONFIG_PM_RUNTIME */
63347 diff --git a/include/linux/poison.h b/include/linux/poison.h
63348 index 2110a81..13a11bb 100644
63349 --- a/include/linux/poison.h
63350 +++ b/include/linux/poison.h
63351 @@ -19,8 +19,8 @@
63352 * under normal circumstances, used to verify that nobody uses
63353 * non-initialized list entries.
63354 */
63355 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
63356 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
63357 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
63358 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
63359
63360 /********** include/linux/timer.h **********/
63361 /*
63362 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
63363 index 58969b2..ead129b 100644
63364 --- a/include/linux/preempt.h
63365 +++ b/include/linux/preempt.h
63366 @@ -123,7 +123,7 @@ struct preempt_ops {
63367 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
63368 void (*sched_out)(struct preempt_notifier *notifier,
63369 struct task_struct *next);
63370 -};
63371 +} __no_const;
63372
63373 /**
63374 * preempt_notifier - key for installing preemption notifiers
63375 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
63376 index 85c5073..51fac8b 100644
63377 --- a/include/linux/proc_fs.h
63378 +++ b/include/linux/proc_fs.h
63379 @@ -155,6 +155,18 @@ static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
63380 return proc_create_data(name, mode, parent, proc_fops, NULL);
63381 }
63382
63383 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
63384 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
63385 +{
63386 +#ifdef CONFIG_GRKERNSEC_PROC_USER
63387 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
63388 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
63389 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
63390 +#else
63391 + return proc_create_data(name, mode, parent, proc_fops, NULL);
63392 +#endif
63393 +}
63394 +
63395 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
63396 umode_t mode, struct proc_dir_entry *base,
63397 read_proc_t *read_proc, void * data)
63398 @@ -258,7 +270,7 @@ union proc_op {
63399 int (*proc_show)(struct seq_file *m,
63400 struct pid_namespace *ns, struct pid *pid,
63401 struct task_struct *task);
63402 -};
63403 +} __no_const;
63404
63405 struct ctl_table_header;
63406 struct ctl_table;
63407 diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
63408 index c2f1f6a..6fdb196 100644
63409 --- a/include/linux/ptrace.h
63410 +++ b/include/linux/ptrace.h
63411 @@ -199,9 +199,10 @@ static inline void ptrace_event(int event, unsigned long message)
63412 if (unlikely(ptrace_event_enabled(current, event))) {
63413 current->ptrace_message = message;
63414 ptrace_notify((event << 8) | SIGTRAP);
63415 - } else if (event == PTRACE_EVENT_EXEC && unlikely(current->ptrace)) {
63416 + } else if (event == PTRACE_EVENT_EXEC) {
63417 /* legacy EXEC report via SIGTRAP */
63418 - send_sig(SIGTRAP, current, 0);
63419 + if ((current->ptrace & (PT_PTRACED|PT_SEIZED)) == PT_PTRACED)
63420 + send_sig(SIGTRAP, current, 0);
63421 }
63422 }
63423
63424 diff --git a/include/linux/random.h b/include/linux/random.h
63425 index 8f74538..02a1012 100644
63426 --- a/include/linux/random.h
63427 +++ b/include/linux/random.h
63428 @@ -69,12 +69,17 @@ void srandom32(u32 seed);
63429
63430 u32 prandom32(struct rnd_state *);
63431
63432 +static inline unsigned long pax_get_random_long(void)
63433 +{
63434 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
63435 +}
63436 +
63437 /*
63438 * Handle minimum values for seeds
63439 */
63440 static inline u32 __seed(u32 x, u32 m)
63441 {
63442 - return (x < m) ? x + m : x;
63443 + return (x <= m) ? x + m + 1 : x;
63444 }
63445
63446 /**
63447 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
63448 index e0879a7..a12f962 100644
63449 --- a/include/linux/reboot.h
63450 +++ b/include/linux/reboot.h
63451 @@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
63452 * Architecture-specific implementations of sys_reboot commands.
63453 */
63454
63455 -extern void machine_restart(char *cmd);
63456 -extern void machine_halt(void);
63457 -extern void machine_power_off(void);
63458 +extern void machine_restart(char *cmd) __noreturn;
63459 +extern void machine_halt(void) __noreturn;
63460 +extern void machine_power_off(void) __noreturn;
63461
63462 extern void machine_shutdown(void);
63463 struct pt_regs;
63464 @@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
63465 */
63466
63467 extern void kernel_restart_prepare(char *cmd);
63468 -extern void kernel_restart(char *cmd);
63469 -extern void kernel_halt(void);
63470 -extern void kernel_power_off(void);
63471 +extern void kernel_restart(char *cmd) __noreturn;
63472 +extern void kernel_halt(void) __noreturn;
63473 +extern void kernel_power_off(void) __noreturn;
63474
63475 extern int C_A_D; /* for sysctl */
63476 void ctrl_alt_del(void);
63477 @@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
63478 * Emergency restart, callable from an interrupt handler.
63479 */
63480
63481 -extern void emergency_restart(void);
63482 +extern void emergency_restart(void) __noreturn;
63483 #include <asm/emergency-restart.h>
63484
63485 #endif
63486 diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
63487 index 2213ddc..650212a 100644
63488 --- a/include/linux/reiserfs_fs.h
63489 +++ b/include/linux/reiserfs_fs.h
63490 @@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
63491 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
63492
63493 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
63494 -#define get_generation(s) atomic_read (&fs_generation(s))
63495 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
63496 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
63497 #define __fs_changed(gen,s) (gen != get_generation (s))
63498 #define fs_changed(gen,s) \
63499 diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
63500 index 8c9e85c..1698e9a 100644
63501 --- a/include/linux/reiserfs_fs_sb.h
63502 +++ b/include/linux/reiserfs_fs_sb.h
63503 @@ -386,7 +386,7 @@ struct reiserfs_sb_info {
63504 /* Comment? -Hans */
63505 wait_queue_head_t s_wait;
63506 /* To be obsoleted soon by per buffer seals.. -Hans */
63507 - atomic_t s_generation_counter; // increased by one every time the
63508 + atomic_unchecked_t s_generation_counter; // increased by one every time the
63509 // tree gets re-balanced
63510 unsigned long s_properties; /* File system properties. Currently holds
63511 on-disk FS format */
63512 diff --git a/include/linux/relay.h b/include/linux/relay.h
63513 index a822fd7..62b70f6 100644
63514 --- a/include/linux/relay.h
63515 +++ b/include/linux/relay.h
63516 @@ -159,7 +159,7 @@ struct rchan_callbacks
63517 * The callback should return 0 if successful, negative if not.
63518 */
63519 int (*remove_buf_file)(struct dentry *dentry);
63520 -};
63521 +} __no_const;
63522
63523 /*
63524 * CONFIG_RELAY kernel API, kernel/relay.c
63525 diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
63526 index c6c6084..5bf1212 100644
63527 --- a/include/linux/rfkill.h
63528 +++ b/include/linux/rfkill.h
63529 @@ -147,6 +147,7 @@ struct rfkill_ops {
63530 void (*query)(struct rfkill *rfkill, void *data);
63531 int (*set_block)(void *data, bool blocked);
63532 };
63533 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
63534
63535 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
63536 /**
63537 diff --git a/include/linux/rio.h b/include/linux/rio.h
63538 index 4d50611..c6858a2 100644
63539 --- a/include/linux/rio.h
63540 +++ b/include/linux/rio.h
63541 @@ -315,7 +315,7 @@ struct rio_ops {
63542 int mbox, void *buffer, size_t len);
63543 int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
63544 void *(*get_inb_message)(struct rio_mport *mport, int mbox);
63545 -};
63546 +} __no_const;
63547
63548 #define RIO_RESOURCE_MEM 0x00000100
63549 #define RIO_RESOURCE_DOORBELL 0x00000200
63550 diff --git a/include/linux/rmap.h b/include/linux/rmap.h
63551 index 1cdd62a..e399f0d 100644
63552 --- a/include/linux/rmap.h
63553 +++ b/include/linux/rmap.h
63554 @@ -119,9 +119,9 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
63555 void anon_vma_init(void); /* create anon_vma_cachep */
63556 int anon_vma_prepare(struct vm_area_struct *);
63557 void unlink_anon_vmas(struct vm_area_struct *);
63558 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
63559 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
63560 void anon_vma_moveto_tail(struct vm_area_struct *);
63561 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
63562 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
63563 void __anon_vma_link(struct vm_area_struct *);
63564
63565 static inline void anon_vma_merge(struct vm_area_struct *vma,
63566 diff --git a/include/linux/sched.h b/include/linux/sched.h
63567 index 0657368..765f70f 100644
63568 --- a/include/linux/sched.h
63569 +++ b/include/linux/sched.h
63570 @@ -101,6 +101,7 @@ struct bio_list;
63571 struct fs_struct;
63572 struct perf_event_context;
63573 struct blk_plug;
63574 +struct linux_binprm;
63575
63576 /*
63577 * List of flags we want to share for kernel threads,
63578 @@ -382,10 +383,13 @@ struct user_namespace;
63579 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
63580
63581 extern int sysctl_max_map_count;
63582 +extern unsigned long sysctl_heap_stack_gap;
63583
63584 #include <linux/aio.h>
63585
63586 #ifdef CONFIG_MMU
63587 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
63588 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
63589 extern void arch_pick_mmap_layout(struct mm_struct *mm);
63590 extern unsigned long
63591 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
63592 @@ -631,6 +635,17 @@ struct signal_struct {
63593 #ifdef CONFIG_TASKSTATS
63594 struct taskstats *stats;
63595 #endif
63596 +
63597 +#ifdef CONFIG_GRKERNSEC
63598 + u32 curr_ip;
63599 + u32 saved_ip;
63600 + u32 gr_saddr;
63601 + u32 gr_daddr;
63602 + u16 gr_sport;
63603 + u16 gr_dport;
63604 + u8 used_accept:1;
63605 +#endif
63606 +
63607 #ifdef CONFIG_AUDIT
63608 unsigned audit_tty;
63609 struct tty_audit_buf *tty_audit_buf;
63610 @@ -714,6 +729,11 @@ struct user_struct {
63611 struct key *session_keyring; /* UID's default session keyring */
63612 #endif
63613
63614 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
63615 + unsigned int banned;
63616 + unsigned long ban_expires;
63617 +#endif
63618 +
63619 /* Hash table maintenance information */
63620 struct hlist_node uidhash_node;
63621 uid_t uid;
63622 @@ -1354,8 +1374,8 @@ struct task_struct {
63623 struct list_head thread_group;
63624
63625 struct completion *vfork_done; /* for vfork() */
63626 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
63627 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
63628 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
63629 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
63630
63631 cputime_t utime, stime, utimescaled, stimescaled;
63632 cputime_t gtime;
63633 @@ -1371,13 +1391,6 @@ struct task_struct {
63634 struct task_cputime cputime_expires;
63635 struct list_head cpu_timers[3];
63636
63637 -/* process credentials */
63638 - const struct cred __rcu *real_cred; /* objective and real subjective task
63639 - * credentials (COW) */
63640 - const struct cred __rcu *cred; /* effective (overridable) subjective task
63641 - * credentials (COW) */
63642 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
63643 -
63644 char comm[TASK_COMM_LEN]; /* executable name excluding path
63645 - access with [gs]et_task_comm (which lock
63646 it with task_lock())
63647 @@ -1394,8 +1407,16 @@ struct task_struct {
63648 #endif
63649 /* CPU-specific state of this task */
63650 struct thread_struct thread;
63651 +/* thread_info moved to task_struct */
63652 +#ifdef CONFIG_X86
63653 + struct thread_info tinfo;
63654 +#endif
63655 /* filesystem information */
63656 struct fs_struct *fs;
63657 +
63658 + const struct cred __rcu *cred; /* effective (overridable) subjective task
63659 + * credentials (COW) */
63660 +
63661 /* open file information */
63662 struct files_struct *files;
63663 /* namespaces */
63664 @@ -1442,6 +1463,11 @@ struct task_struct {
63665 struct rt_mutex_waiter *pi_blocked_on;
63666 #endif
63667
63668 +/* process credentials */
63669 + const struct cred __rcu *real_cred; /* objective and real subjective task
63670 + * credentials (COW) */
63671 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
63672 +
63673 #ifdef CONFIG_DEBUG_MUTEXES
63674 /* mutex deadlock detection */
63675 struct mutex_waiter *blocked_on;
63676 @@ -1558,6 +1584,27 @@ struct task_struct {
63677 unsigned long default_timer_slack_ns;
63678
63679 struct list_head *scm_work_list;
63680 +
63681 +#ifdef CONFIG_GRKERNSEC
63682 + /* grsecurity */
63683 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63684 + u64 exec_id;
63685 +#endif
63686 +#ifdef CONFIG_GRKERNSEC_SETXID
63687 + const struct cred *delayed_cred;
63688 +#endif
63689 + struct dentry *gr_chroot_dentry;
63690 + struct acl_subject_label *acl;
63691 + struct acl_role_label *role;
63692 + struct file *exec_file;
63693 + u16 acl_role_id;
63694 + /* is this the task that authenticated to the special role */
63695 + u8 acl_sp_role;
63696 + u8 is_writable;
63697 + u8 brute;
63698 + u8 gr_is_chrooted;
63699 +#endif
63700 +
63701 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
63702 /* Index of current stored address in ret_stack */
63703 int curr_ret_stack;
63704 @@ -1592,6 +1639,51 @@ struct task_struct {
63705 #endif
63706 };
63707
63708 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
63709 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
63710 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
63711 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
63712 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
63713 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
63714 +
63715 +#ifdef CONFIG_PAX_SOFTMODE
63716 +extern int pax_softmode;
63717 +#endif
63718 +
63719 +extern int pax_check_flags(unsigned long *);
63720 +
63721 +/* if tsk != current then task_lock must be held on it */
63722 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
63723 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
63724 +{
63725 + if (likely(tsk->mm))
63726 + return tsk->mm->pax_flags;
63727 + else
63728 + return 0UL;
63729 +}
63730 +
63731 +/* if tsk != current then task_lock must be held on it */
63732 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
63733 +{
63734 + if (likely(tsk->mm)) {
63735 + tsk->mm->pax_flags = flags;
63736 + return 0;
63737 + }
63738 + return -EINVAL;
63739 +}
63740 +#endif
63741 +
63742 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
63743 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
63744 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
63745 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
63746 +#endif
63747 +
63748 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
63749 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
63750 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
63751 +extern __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type);
63752 +
63753 /* Future-safe accessor for struct task_struct's cpus_allowed. */
63754 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
63755
63756 @@ -2104,7 +2196,9 @@ void yield(void);
63757 extern struct exec_domain default_exec_domain;
63758
63759 union thread_union {
63760 +#ifndef CONFIG_X86
63761 struct thread_info thread_info;
63762 +#endif
63763 unsigned long stack[THREAD_SIZE/sizeof(long)];
63764 };
63765
63766 @@ -2137,6 +2231,7 @@ extern struct pid_namespace init_pid_ns;
63767 */
63768
63769 extern struct task_struct *find_task_by_vpid(pid_t nr);
63770 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
63771 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
63772 struct pid_namespace *ns);
63773
63774 @@ -2280,7 +2375,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
63775 extern void exit_itimers(struct signal_struct *);
63776 extern void flush_itimer_signals(void);
63777
63778 -extern void do_group_exit(int);
63779 +extern __noreturn void do_group_exit(int);
63780
63781 extern void daemonize(const char *, ...);
63782 extern int allow_signal(int);
63783 @@ -2478,13 +2573,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
63784
63785 #endif
63786
63787 -static inline int object_is_on_stack(void *obj)
63788 +static inline int object_starts_on_stack(void *obj)
63789 {
63790 - void *stack = task_stack_page(current);
63791 + const void *stack = task_stack_page(current);
63792
63793 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
63794 }
63795
63796 +#ifdef CONFIG_PAX_USERCOPY
63797 +extern int object_is_on_stack(const void *obj, unsigned long len);
63798 +#endif
63799 +
63800 extern void thread_info_cache_init(void);
63801
63802 #ifdef CONFIG_DEBUG_STACK_USAGE
63803 diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
63804 index 899fbb4..1cb4138 100644
63805 --- a/include/linux/screen_info.h
63806 +++ b/include/linux/screen_info.h
63807 @@ -43,7 +43,8 @@ struct screen_info {
63808 __u16 pages; /* 0x32 */
63809 __u16 vesa_attributes; /* 0x34 */
63810 __u32 capabilities; /* 0x36 */
63811 - __u8 _reserved[6]; /* 0x3a */
63812 + __u16 vesapm_size; /* 0x3a */
63813 + __u8 _reserved[4]; /* 0x3c */
63814 } __attribute__((packed));
63815
63816 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
63817 diff --git a/include/linux/security.h b/include/linux/security.h
63818 index 83c18e8..2d98860 100644
63819 --- a/include/linux/security.h
63820 +++ b/include/linux/security.h
63821 @@ -37,6 +37,7 @@
63822 #include <linux/xfrm.h>
63823 #include <linux/slab.h>
63824 #include <linux/xattr.h>
63825 +#include <linux/grsecurity.h>
63826 #include <net/flow.h>
63827
63828 /* Maximum number of letters for an LSM name string */
63829 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
63830 index 44f1514..2bbf6c1 100644
63831 --- a/include/linux/seq_file.h
63832 +++ b/include/linux/seq_file.h
63833 @@ -24,6 +24,9 @@ struct seq_file {
63834 struct mutex lock;
63835 const struct seq_operations *op;
63836 int poll_event;
63837 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63838 + u64 exec_id;
63839 +#endif
63840 void *private;
63841 };
63842
63843 @@ -33,6 +36,7 @@ struct seq_operations {
63844 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
63845 int (*show) (struct seq_file *m, void *v);
63846 };
63847 +typedef struct seq_operations __no_const seq_operations_no_const;
63848
63849 #define SEQ_SKIP 1
63850
63851 diff --git a/include/linux/shm.h b/include/linux/shm.h
63852 index 92808b8..c28cac4 100644
63853 --- a/include/linux/shm.h
63854 +++ b/include/linux/shm.h
63855 @@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
63856
63857 /* The task created the shm object. NULL if the task is dead. */
63858 struct task_struct *shm_creator;
63859 +#ifdef CONFIG_GRKERNSEC
63860 + time_t shm_createtime;
63861 + pid_t shm_lapid;
63862 +#endif
63863 };
63864
63865 /* shm_mode upper byte flags */
63866 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
63867 index 42854ce..3b7d3c8 100644
63868 --- a/include/linux/skbuff.h
63869 +++ b/include/linux/skbuff.h
63870 @@ -655,7 +655,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
63871 */
63872 static inline int skb_queue_empty(const struct sk_buff_head *list)
63873 {
63874 - return list->next == (struct sk_buff *)list;
63875 + return list->next == (const struct sk_buff *)list;
63876 }
63877
63878 /**
63879 @@ -668,7 +668,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
63880 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
63881 const struct sk_buff *skb)
63882 {
63883 - return skb->next == (struct sk_buff *)list;
63884 + return skb->next == (const struct sk_buff *)list;
63885 }
63886
63887 /**
63888 @@ -681,7 +681,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
63889 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
63890 const struct sk_buff *skb)
63891 {
63892 - return skb->prev == (struct sk_buff *)list;
63893 + return skb->prev == (const struct sk_buff *)list;
63894 }
63895
63896 /**
63897 @@ -1558,7 +1558,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
63898 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
63899 */
63900 #ifndef NET_SKB_PAD
63901 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
63902 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
63903 #endif
63904
63905 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
63906 diff --git a/include/linux/slab.h b/include/linux/slab.h
63907 index 573c809..07e1f43 100644
63908 --- a/include/linux/slab.h
63909 +++ b/include/linux/slab.h
63910 @@ -11,12 +11,20 @@
63911
63912 #include <linux/gfp.h>
63913 #include <linux/types.h>
63914 +#include <linux/err.h>
63915
63916 /*
63917 * Flags to pass to kmem_cache_create().
63918 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
63919 */
63920 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
63921 +
63922 +#ifdef CONFIG_PAX_USERCOPY
63923 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
63924 +#else
63925 +#define SLAB_USERCOPY 0x00000000UL
63926 +#endif
63927 +
63928 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
63929 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
63930 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
63931 @@ -87,10 +95,13 @@
63932 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
63933 * Both make kfree a no-op.
63934 */
63935 -#define ZERO_SIZE_PTR ((void *)16)
63936 +#define ZERO_SIZE_PTR \
63937 +({ \
63938 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
63939 + (void *)(-MAX_ERRNO-1L); \
63940 +})
63941
63942 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
63943 - (unsigned long)ZERO_SIZE_PTR)
63944 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
63945
63946 /*
63947 * struct kmem_cache related prototypes
63948 @@ -156,11 +167,12 @@ unsigned int kmem_cache_size(struct kmem_cache *);
63949 /*
63950 * Common kmalloc functions provided by all allocators
63951 */
63952 -void * __must_check __krealloc(const void *, size_t, gfp_t);
63953 -void * __must_check krealloc(const void *, size_t, gfp_t);
63954 +void * __must_check __krealloc(const void *, size_t, gfp_t) __size_overflow(2);
63955 +void * __must_check krealloc(const void *, size_t, gfp_t) __size_overflow(2);
63956 void kfree(const void *);
63957 void kzfree(const void *);
63958 size_t ksize(const void *);
63959 +void check_object_size(const void *ptr, unsigned long n, bool to);
63960
63961 /*
63962 * Allocator specific definitions. These are mainly used to establish optimized
63963 @@ -287,7 +299,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
63964 */
63965 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
63966 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
63967 -extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
63968 +extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
63969 #define kmalloc_track_caller(size, flags) \
63970 __kmalloc_track_caller(size, flags, _RET_IP_)
63971 #else
63972 @@ -306,7 +318,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
63973 */
63974 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
63975 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
63976 -extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
63977 +extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
63978 #define kmalloc_node_track_caller(size, flags, node) \
63979 __kmalloc_node_track_caller(size, flags, node, \
63980 _RET_IP_)
63981 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
63982 index fbd1117..c0bd874 100644
63983 --- a/include/linux/slab_def.h
63984 +++ b/include/linux/slab_def.h
63985 @@ -66,10 +66,10 @@ struct kmem_cache {
63986 unsigned long node_allocs;
63987 unsigned long node_frees;
63988 unsigned long node_overflow;
63989 - atomic_t allochit;
63990 - atomic_t allocmiss;
63991 - atomic_t freehit;
63992 - atomic_t freemiss;
63993 + atomic_unchecked_t allochit;
63994 + atomic_unchecked_t allocmiss;
63995 + atomic_unchecked_t freehit;
63996 + atomic_unchecked_t freemiss;
63997
63998 /*
63999 * If debugging is enabled, then the allocator can add additional
64000 @@ -107,7 +107,7 @@ struct cache_sizes {
64001 extern struct cache_sizes malloc_sizes[];
64002
64003 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
64004 -void *__kmalloc(size_t size, gfp_t flags);
64005 +void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
64006
64007 #ifdef CONFIG_TRACING
64008 extern void *kmem_cache_alloc_trace(size_t size,
64009 @@ -125,6 +125,7 @@ static inline size_t slab_buffer_size(struct kmem_cache *cachep)
64010 }
64011 #endif
64012
64013 +static __always_inline void *kmalloc(size_t size, gfp_t flags) __size_overflow(1);
64014 static __always_inline void *kmalloc(size_t size, gfp_t flags)
64015 {
64016 struct kmem_cache *cachep;
64017 @@ -160,7 +161,7 @@ found:
64018 }
64019
64020 #ifdef CONFIG_NUMA
64021 -extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
64022 +extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
64023 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
64024
64025 #ifdef CONFIG_TRACING
64026 @@ -179,6 +180,7 @@ kmem_cache_alloc_node_trace(size_t size,
64027 }
64028 #endif
64029
64030 +static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
64031 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
64032 {
64033 struct kmem_cache *cachep;
64034 diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
64035 index 0ec00b3..65e7e0e 100644
64036 --- a/include/linux/slob_def.h
64037 +++ b/include/linux/slob_def.h
64038 @@ -9,8 +9,9 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
64039 return kmem_cache_alloc_node(cachep, flags, -1);
64040 }
64041
64042 -void *__kmalloc_node(size_t size, gfp_t flags, int node);
64043 +void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
64044
64045 +static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
64046 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
64047 {
64048 return __kmalloc_node(size, flags, node);
64049 @@ -24,11 +25,13 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
64050 * kmalloc is the normal method of allocating memory
64051 * in the kernel.
64052 */
64053 +static __always_inline void *kmalloc(size_t size, gfp_t flags) __size_overflow(1);
64054 static __always_inline void *kmalloc(size_t size, gfp_t flags)
64055 {
64056 return __kmalloc_node(size, flags, -1);
64057 }
64058
64059 +static __always_inline void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
64060 static __always_inline void *__kmalloc(size_t size, gfp_t flags)
64061 {
64062 return kmalloc(size, flags);
64063 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
64064 index a32bcfd..d26bd6e 100644
64065 --- a/include/linux/slub_def.h
64066 +++ b/include/linux/slub_def.h
64067 @@ -89,7 +89,7 @@ struct kmem_cache {
64068 struct kmem_cache_order_objects max;
64069 struct kmem_cache_order_objects min;
64070 gfp_t allocflags; /* gfp flags to use on each alloc */
64071 - int refcount; /* Refcount for slab cache destroy */
64072 + atomic_t refcount; /* Refcount for slab cache destroy */
64073 void (*ctor)(void *);
64074 int inuse; /* Offset to metadata */
64075 int align; /* Alignment */
64076 @@ -204,6 +204,7 @@ static __always_inline int kmalloc_index(size_t size)
64077 * This ought to end up with a global pointer to the right cache
64078 * in kmalloc_caches.
64079 */
64080 +static __always_inline struct kmem_cache *kmalloc_slab(size_t size) __size_overflow(1);
64081 static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
64082 {
64083 int index = kmalloc_index(size);
64084 @@ -215,9 +216,11 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
64085 }
64086
64087 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
64088 -void *__kmalloc(size_t size, gfp_t flags);
64089 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
64090
64091 static __always_inline void *
64092 +kmalloc_order(size_t size, gfp_t flags, unsigned int order) __size_overflow(1);
64093 +static __always_inline void *
64094 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
64095 {
64096 void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
64097 @@ -256,12 +259,14 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
64098 }
64099 #endif
64100
64101 +static __always_inline void *kmalloc_large(size_t size, gfp_t flags) __size_overflow(1);
64102 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
64103 {
64104 unsigned int order = get_order(size);
64105 return kmalloc_order_trace(size, flags, order);
64106 }
64107
64108 +static __always_inline void *kmalloc(size_t size, gfp_t flags) __size_overflow(1);
64109 static __always_inline void *kmalloc(size_t size, gfp_t flags)
64110 {
64111 if (__builtin_constant_p(size)) {
64112 @@ -281,7 +286,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
64113 }
64114
64115 #ifdef CONFIG_NUMA
64116 -void *__kmalloc_node(size_t size, gfp_t flags, int node);
64117 +void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
64118 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
64119
64120 #ifdef CONFIG_TRACING
64121 @@ -298,6 +303,7 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
64122 }
64123 #endif
64124
64125 +static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
64126 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
64127 {
64128 if (__builtin_constant_p(size) &&
64129 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
64130 index de8832d..0147b46 100644
64131 --- a/include/linux/sonet.h
64132 +++ b/include/linux/sonet.h
64133 @@ -61,7 +61,7 @@ struct sonet_stats {
64134 #include <linux/atomic.h>
64135
64136 struct k_sonet_stats {
64137 -#define __HANDLE_ITEM(i) atomic_t i
64138 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
64139 __SONET_ITEMS
64140 #undef __HANDLE_ITEM
64141 };
64142 diff --git a/include/linux/stddef.h b/include/linux/stddef.h
64143 index 6a40c76..1747b67 100644
64144 --- a/include/linux/stddef.h
64145 +++ b/include/linux/stddef.h
64146 @@ -3,14 +3,10 @@
64147
64148 #include <linux/compiler.h>
64149
64150 +#ifdef __KERNEL__
64151 +
64152 #undef NULL
64153 -#if defined(__cplusplus)
64154 -#define NULL 0
64155 -#else
64156 #define NULL ((void *)0)
64157 -#endif
64158 -
64159 -#ifdef __KERNEL__
64160
64161 enum {
64162 false = 0,
64163 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
64164 index 2c5993a..b0e79f0 100644
64165 --- a/include/linux/sunrpc/clnt.h
64166 +++ b/include/linux/sunrpc/clnt.h
64167 @@ -172,9 +172,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
64168 {
64169 switch (sap->sa_family) {
64170 case AF_INET:
64171 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
64172 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
64173 case AF_INET6:
64174 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
64175 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
64176 }
64177 return 0;
64178 }
64179 @@ -207,7 +207,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
64180 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
64181 const struct sockaddr *src)
64182 {
64183 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
64184 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
64185 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
64186
64187 dsin->sin_family = ssin->sin_family;
64188 @@ -310,7 +310,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
64189 if (sa->sa_family != AF_INET6)
64190 return 0;
64191
64192 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
64193 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
64194 }
64195
64196 #endif /* __KERNEL__ */
64197 diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
64198 index e775689..9e206d9 100644
64199 --- a/include/linux/sunrpc/sched.h
64200 +++ b/include/linux/sunrpc/sched.h
64201 @@ -105,6 +105,7 @@ struct rpc_call_ops {
64202 void (*rpc_call_done)(struct rpc_task *, void *);
64203 void (*rpc_release)(void *);
64204 };
64205 +typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
64206
64207 struct rpc_task_setup {
64208 struct rpc_task *task;
64209 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
64210 index c14fe86..393245e 100644
64211 --- a/include/linux/sunrpc/svc_rdma.h
64212 +++ b/include/linux/sunrpc/svc_rdma.h
64213 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
64214 extern unsigned int svcrdma_max_requests;
64215 extern unsigned int svcrdma_max_req_size;
64216
64217 -extern atomic_t rdma_stat_recv;
64218 -extern atomic_t rdma_stat_read;
64219 -extern atomic_t rdma_stat_write;
64220 -extern atomic_t rdma_stat_sq_starve;
64221 -extern atomic_t rdma_stat_rq_starve;
64222 -extern atomic_t rdma_stat_rq_poll;
64223 -extern atomic_t rdma_stat_rq_prod;
64224 -extern atomic_t rdma_stat_sq_poll;
64225 -extern atomic_t rdma_stat_sq_prod;
64226 +extern atomic_unchecked_t rdma_stat_recv;
64227 +extern atomic_unchecked_t rdma_stat_read;
64228 +extern atomic_unchecked_t rdma_stat_write;
64229 +extern atomic_unchecked_t rdma_stat_sq_starve;
64230 +extern atomic_unchecked_t rdma_stat_rq_starve;
64231 +extern atomic_unchecked_t rdma_stat_rq_poll;
64232 +extern atomic_unchecked_t rdma_stat_rq_prod;
64233 +extern atomic_unchecked_t rdma_stat_sq_poll;
64234 +extern atomic_unchecked_t rdma_stat_sq_prod;
64235
64236 #define RPCRDMA_VERSION 1
64237
64238 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
64239 index bb9127d..34ab358 100644
64240 --- a/include/linux/sysctl.h
64241 +++ b/include/linux/sysctl.h
64242 @@ -155,7 +155,11 @@ enum
64243 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
64244 };
64245
64246 -
64247 +#ifdef CONFIG_PAX_SOFTMODE
64248 +enum {
64249 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
64250 +};
64251 +#endif
64252
64253 /* CTL_VM names: */
64254 enum
64255 @@ -968,6 +972,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
64256
64257 extern int proc_dostring(struct ctl_table *, int,
64258 void __user *, size_t *, loff_t *);
64259 +extern int proc_dostring_modpriv(struct ctl_table *, int,
64260 + void __user *, size_t *, loff_t *);
64261 extern int proc_dointvec(struct ctl_table *, int,
64262 void __user *, size_t *, loff_t *);
64263 extern int proc_dointvec_minmax(struct ctl_table *, int,
64264 diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
64265 index a71a292..51bd91d 100644
64266 --- a/include/linux/tracehook.h
64267 +++ b/include/linux/tracehook.h
64268 @@ -54,12 +54,12 @@ struct linux_binprm;
64269 /*
64270 * ptrace report for syscall entry and exit looks identical.
64271 */
64272 -static inline void ptrace_report_syscall(struct pt_regs *regs)
64273 +static inline int ptrace_report_syscall(struct pt_regs *regs)
64274 {
64275 int ptrace = current->ptrace;
64276
64277 if (!(ptrace & PT_PTRACED))
64278 - return;
64279 + return 0;
64280
64281 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
64282
64283 @@ -72,6 +72,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
64284 send_sig(current->exit_code, current, 1);
64285 current->exit_code = 0;
64286 }
64287 +
64288 + return fatal_signal_pending(current);
64289 }
64290
64291 /**
64292 @@ -96,8 +98,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
64293 static inline __must_check int tracehook_report_syscall_entry(
64294 struct pt_regs *regs)
64295 {
64296 - ptrace_report_syscall(regs);
64297 - return 0;
64298 + return ptrace_report_syscall(regs);
64299 }
64300
64301 /**
64302 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
64303 index ff7dc08..893e1bd 100644
64304 --- a/include/linux/tty_ldisc.h
64305 +++ b/include/linux/tty_ldisc.h
64306 @@ -148,7 +148,7 @@ struct tty_ldisc_ops {
64307
64308 struct module *owner;
64309
64310 - int refcount;
64311 + atomic_t refcount;
64312 };
64313
64314 struct tty_ldisc {
64315 diff --git a/include/linux/types.h b/include/linux/types.h
64316 index e5fa503..df6e8a4 100644
64317 --- a/include/linux/types.h
64318 +++ b/include/linux/types.h
64319 @@ -214,10 +214,26 @@ typedef struct {
64320 int counter;
64321 } atomic_t;
64322
64323 +#ifdef CONFIG_PAX_REFCOUNT
64324 +typedef struct {
64325 + int counter;
64326 +} atomic_unchecked_t;
64327 +#else
64328 +typedef atomic_t atomic_unchecked_t;
64329 +#endif
64330 +
64331 #ifdef CONFIG_64BIT
64332 typedef struct {
64333 long counter;
64334 } atomic64_t;
64335 +
64336 +#ifdef CONFIG_PAX_REFCOUNT
64337 +typedef struct {
64338 + long counter;
64339 +} atomic64_unchecked_t;
64340 +#else
64341 +typedef atomic64_t atomic64_unchecked_t;
64342 +#endif
64343 #endif
64344
64345 struct list_head {
64346 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
64347 index 5ca0951..53a2fff 100644
64348 --- a/include/linux/uaccess.h
64349 +++ b/include/linux/uaccess.h
64350 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
64351 long ret; \
64352 mm_segment_t old_fs = get_fs(); \
64353 \
64354 - set_fs(KERNEL_DS); \
64355 pagefault_disable(); \
64356 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
64357 - pagefault_enable(); \
64358 + set_fs(KERNEL_DS); \
64359 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
64360 set_fs(old_fs); \
64361 + pagefault_enable(); \
64362 ret; \
64363 })
64364
64365 @@ -105,7 +105,7 @@ extern long __probe_kernel_read(void *dst, const void *src, size_t size);
64366 * Safely write to address @dst from the buffer at @src. If a kernel fault
64367 * happens, handle that and return -EFAULT.
64368 */
64369 -extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
64370 +extern long notrace probe_kernel_write(void *dst, const void *src, size_t size) __size_overflow(3);
64371 extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
64372
64373 #endif /* __LINUX_UACCESS_H__ */
64374 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
64375 index 99c1b4d..bb94261 100644
64376 --- a/include/linux/unaligned/access_ok.h
64377 +++ b/include/linux/unaligned/access_ok.h
64378 @@ -6,32 +6,32 @@
64379
64380 static inline u16 get_unaligned_le16(const void *p)
64381 {
64382 - return le16_to_cpup((__le16 *)p);
64383 + return le16_to_cpup((const __le16 *)p);
64384 }
64385
64386 static inline u32 get_unaligned_le32(const void *p)
64387 {
64388 - return le32_to_cpup((__le32 *)p);
64389 + return le32_to_cpup((const __le32 *)p);
64390 }
64391
64392 static inline u64 get_unaligned_le64(const void *p)
64393 {
64394 - return le64_to_cpup((__le64 *)p);
64395 + return le64_to_cpup((const __le64 *)p);
64396 }
64397
64398 static inline u16 get_unaligned_be16(const void *p)
64399 {
64400 - return be16_to_cpup((__be16 *)p);
64401 + return be16_to_cpup((const __be16 *)p);
64402 }
64403
64404 static inline u32 get_unaligned_be32(const void *p)
64405 {
64406 - return be32_to_cpup((__be32 *)p);
64407 + return be32_to_cpup((const __be32 *)p);
64408 }
64409
64410 static inline u64 get_unaligned_be64(const void *p)
64411 {
64412 - return be64_to_cpup((__be64 *)p);
64413 + return be64_to_cpup((const __be64 *)p);
64414 }
64415
64416 static inline void put_unaligned_le16(u16 val, void *p)
64417 diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
64418 index 0d3f988..000f101 100644
64419 --- a/include/linux/usb/renesas_usbhs.h
64420 +++ b/include/linux/usb/renesas_usbhs.h
64421 @@ -39,7 +39,7 @@ enum {
64422 */
64423 struct renesas_usbhs_driver_callback {
64424 int (*notify_hotplug)(struct platform_device *pdev);
64425 -};
64426 +} __no_const;
64427
64428 /*
64429 * callback functions for platform
64430 @@ -97,7 +97,7 @@ struct renesas_usbhs_platform_callback {
64431 * VBUS control is needed for Host
64432 */
64433 int (*set_vbus)(struct platform_device *pdev, int enable);
64434 -};
64435 +} __no_const;
64436
64437 /*
64438 * parameters for renesas usbhs
64439 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
64440 index 6f8fbcf..8259001 100644
64441 --- a/include/linux/vermagic.h
64442 +++ b/include/linux/vermagic.h
64443 @@ -25,9 +25,35 @@
64444 #define MODULE_ARCH_VERMAGIC ""
64445 #endif
64446
64447 +#ifdef CONFIG_PAX_REFCOUNT
64448 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
64449 +#else
64450 +#define MODULE_PAX_REFCOUNT ""
64451 +#endif
64452 +
64453 +#ifdef CONSTIFY_PLUGIN
64454 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
64455 +#else
64456 +#define MODULE_CONSTIFY_PLUGIN ""
64457 +#endif
64458 +
64459 +#ifdef STACKLEAK_PLUGIN
64460 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
64461 +#else
64462 +#define MODULE_STACKLEAK_PLUGIN ""
64463 +#endif
64464 +
64465 +#ifdef CONFIG_GRKERNSEC
64466 +#define MODULE_GRSEC "GRSEC "
64467 +#else
64468 +#define MODULE_GRSEC ""
64469 +#endif
64470 +
64471 #define VERMAGIC_STRING \
64472 UTS_RELEASE " " \
64473 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
64474 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
64475 - MODULE_ARCH_VERMAGIC
64476 + MODULE_ARCH_VERMAGIC \
64477 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
64478 + MODULE_GRSEC
64479
64480 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
64481 index dcdfc2b..cce598d 100644
64482 --- a/include/linux/vmalloc.h
64483 +++ b/include/linux/vmalloc.h
64484 @@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
64485 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
64486 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
64487 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
64488 +
64489 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64490 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
64491 +#endif
64492 +
64493 /* bits [20..32] reserved for arch specific ioremap internals */
64494
64495 /*
64496 @@ -51,18 +56,18 @@ static inline void vmalloc_init(void)
64497 }
64498 #endif
64499
64500 -extern void *vmalloc(unsigned long size);
64501 -extern void *vzalloc(unsigned long size);
64502 -extern void *vmalloc_user(unsigned long size);
64503 -extern void *vmalloc_node(unsigned long size, int node);
64504 -extern void *vzalloc_node(unsigned long size, int node);
64505 -extern void *vmalloc_exec(unsigned long size);
64506 -extern void *vmalloc_32(unsigned long size);
64507 -extern void *vmalloc_32_user(unsigned long size);
64508 -extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
64509 +extern void *vmalloc(unsigned long size) __size_overflow(1);
64510 +extern void *vzalloc(unsigned long size) __size_overflow(1);
64511 +extern void *vmalloc_user(unsigned long size) __size_overflow(1);
64512 +extern void *vmalloc_node(unsigned long size, int node) __size_overflow(1);
64513 +extern void *vzalloc_node(unsigned long size, int node) __size_overflow(1);
64514 +extern void *vmalloc_exec(unsigned long size) __size_overflow(1);
64515 +extern void *vmalloc_32(unsigned long size) __size_overflow(1);
64516 +extern void *vmalloc_32_user(unsigned long size) __size_overflow(1);
64517 +extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) __size_overflow(1);
64518 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
64519 unsigned long start, unsigned long end, gfp_t gfp_mask,
64520 - pgprot_t prot, int node, void *caller);
64521 + pgprot_t prot, int node, void *caller) __size_overflow(1);
64522 extern void vfree(const void *addr);
64523
64524 extern void *vmap(struct page **pages, unsigned int count,
64525 @@ -123,8 +128,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
64526 extern void free_vm_area(struct vm_struct *area);
64527
64528 /* for /dev/kmem */
64529 -extern long vread(char *buf, char *addr, unsigned long count);
64530 -extern long vwrite(char *buf, char *addr, unsigned long count);
64531 +extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
64532 +extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
64533
64534 /*
64535 * Internals. Dont't use..
64536 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
64537 index 65efb92..137adbb 100644
64538 --- a/include/linux/vmstat.h
64539 +++ b/include/linux/vmstat.h
64540 @@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
64541 /*
64542 * Zone based page accounting with per cpu differentials.
64543 */
64544 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64545 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64546
64547 static inline void zone_page_state_add(long x, struct zone *zone,
64548 enum zone_stat_item item)
64549 {
64550 - atomic_long_add(x, &zone->vm_stat[item]);
64551 - atomic_long_add(x, &vm_stat[item]);
64552 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
64553 + atomic_long_add_unchecked(x, &vm_stat[item]);
64554 }
64555
64556 static inline unsigned long global_page_state(enum zone_stat_item item)
64557 {
64558 - long x = atomic_long_read(&vm_stat[item]);
64559 + long x = atomic_long_read_unchecked(&vm_stat[item]);
64560 #ifdef CONFIG_SMP
64561 if (x < 0)
64562 x = 0;
64563 @@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
64564 static inline unsigned long zone_page_state(struct zone *zone,
64565 enum zone_stat_item item)
64566 {
64567 - long x = atomic_long_read(&zone->vm_stat[item]);
64568 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
64569 #ifdef CONFIG_SMP
64570 if (x < 0)
64571 x = 0;
64572 @@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
64573 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
64574 enum zone_stat_item item)
64575 {
64576 - long x = atomic_long_read(&zone->vm_stat[item]);
64577 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
64578
64579 #ifdef CONFIG_SMP
64580 int cpu;
64581 @@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
64582
64583 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
64584 {
64585 - atomic_long_inc(&zone->vm_stat[item]);
64586 - atomic_long_inc(&vm_stat[item]);
64587 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
64588 + atomic_long_inc_unchecked(&vm_stat[item]);
64589 }
64590
64591 static inline void __inc_zone_page_state(struct page *page,
64592 @@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
64593
64594 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
64595 {
64596 - atomic_long_dec(&zone->vm_stat[item]);
64597 - atomic_long_dec(&vm_stat[item]);
64598 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
64599 + atomic_long_dec_unchecked(&vm_stat[item]);
64600 }
64601
64602 static inline void __dec_zone_page_state(struct page *page,
64603 diff --git a/include/linux/xattr.h b/include/linux/xattr.h
64604 index e5d1220..ef6e406 100644
64605 --- a/include/linux/xattr.h
64606 +++ b/include/linux/xattr.h
64607 @@ -57,6 +57,11 @@
64608 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
64609 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
64610
64611 +/* User namespace */
64612 +#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
64613 +#define XATTR_PAX_FLAGS_SUFFIX "flags"
64614 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
64615 +
64616 #ifdef __KERNEL__
64617
64618 #include <linux/types.h>
64619 diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
64620 index 4aeff96..b378cdc 100644
64621 --- a/include/media/saa7146_vv.h
64622 +++ b/include/media/saa7146_vv.h
64623 @@ -163,7 +163,7 @@ struct saa7146_ext_vv
64624 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
64625
64626 /* the extension can override this */
64627 - struct v4l2_ioctl_ops ops;
64628 + v4l2_ioctl_ops_no_const ops;
64629 /* pointer to the saa7146 core ops */
64630 const struct v4l2_ioctl_ops *core_ops;
64631
64632 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
64633 index c7c40f1..4f01585 100644
64634 --- a/include/media/v4l2-dev.h
64635 +++ b/include/media/v4l2-dev.h
64636 @@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
64637
64638
64639 struct v4l2_file_operations {
64640 - struct module *owner;
64641 + struct module * const owner;
64642 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
64643 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
64644 unsigned int (*poll) (struct file *, struct poll_table_struct *);
64645 @@ -68,6 +68,7 @@ struct v4l2_file_operations {
64646 int (*open) (struct file *);
64647 int (*release) (struct file *);
64648 };
64649 +typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
64650
64651 /*
64652 * Newer version of video_device, handled by videodev2.c
64653 diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
64654 index 3f5d60f..44210ed 100644
64655 --- a/include/media/v4l2-ioctl.h
64656 +++ b/include/media/v4l2-ioctl.h
64657 @@ -278,7 +278,7 @@ struct v4l2_ioctl_ops {
64658 long (*vidioc_default) (struct file *file, void *fh,
64659 bool valid_prio, int cmd, void *arg);
64660 };
64661 -
64662 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
64663
64664 /* v4l debugging and diagnostics */
64665
64666 diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
64667 index 8d55251..dfe5b0a 100644
64668 --- a/include/net/caif/caif_hsi.h
64669 +++ b/include/net/caif/caif_hsi.h
64670 @@ -98,7 +98,7 @@ struct cfhsi_drv {
64671 void (*rx_done_cb) (struct cfhsi_drv *drv);
64672 void (*wake_up_cb) (struct cfhsi_drv *drv);
64673 void (*wake_down_cb) (struct cfhsi_drv *drv);
64674 -};
64675 +} __no_const;
64676
64677 /* Structure implemented by HSI device. */
64678 struct cfhsi_dev {
64679 diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
64680 index 9e5425b..8136ffc 100644
64681 --- a/include/net/caif/cfctrl.h
64682 +++ b/include/net/caif/cfctrl.h
64683 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
64684 void (*radioset_rsp)(void);
64685 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
64686 struct cflayer *client_layer);
64687 -};
64688 +} __no_const;
64689
64690 /* Link Setup Parameters for CAIF-Links. */
64691 struct cfctrl_link_param {
64692 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
64693 struct cfctrl {
64694 struct cfsrvl serv;
64695 struct cfctrl_rsp res;
64696 - atomic_t req_seq_no;
64697 - atomic_t rsp_seq_no;
64698 + atomic_unchecked_t req_seq_no;
64699 + atomic_unchecked_t rsp_seq_no;
64700 struct list_head list;
64701 /* Protects from simultaneous access to first_req list */
64702 spinlock_t info_list_lock;
64703 diff --git a/include/net/flow.h b/include/net/flow.h
64704 index 6c469db..7743b8e 100644
64705 --- a/include/net/flow.h
64706 +++ b/include/net/flow.h
64707 @@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
64708
64709 extern void flow_cache_flush(void);
64710 extern void flow_cache_flush_deferred(void);
64711 -extern atomic_t flow_cache_genid;
64712 +extern atomic_unchecked_t flow_cache_genid;
64713
64714 #endif
64715 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
64716 index b94765e..053f68b 100644
64717 --- a/include/net/inetpeer.h
64718 +++ b/include/net/inetpeer.h
64719 @@ -48,8 +48,8 @@ struct inet_peer {
64720 */
64721 union {
64722 struct {
64723 - atomic_t rid; /* Frag reception counter */
64724 - atomic_t ip_id_count; /* IP ID for the next packet */
64725 + atomic_unchecked_t rid; /* Frag reception counter */
64726 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
64727 __u32 tcp_ts;
64728 __u32 tcp_ts_stamp;
64729 };
64730 @@ -115,11 +115,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
64731 more++;
64732 inet_peer_refcheck(p);
64733 do {
64734 - old = atomic_read(&p->ip_id_count);
64735 + old = atomic_read_unchecked(&p->ip_id_count);
64736 new = old + more;
64737 if (!new)
64738 new = 1;
64739 - } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
64740 + } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
64741 return new;
64742 }
64743
64744 diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
64745 index 10422ef..662570f 100644
64746 --- a/include/net/ip_fib.h
64747 +++ b/include/net/ip_fib.h
64748 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
64749
64750 #define FIB_RES_SADDR(net, res) \
64751 ((FIB_RES_NH(res).nh_saddr_genid == \
64752 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
64753 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
64754 FIB_RES_NH(res).nh_saddr : \
64755 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
64756 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
64757 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
64758 index ebe517f..1bd286b 100644
64759 --- a/include/net/ip_vs.h
64760 +++ b/include/net/ip_vs.h
64761 @@ -509,7 +509,7 @@ struct ip_vs_conn {
64762 struct ip_vs_conn *control; /* Master control connection */
64763 atomic_t n_control; /* Number of controlled ones */
64764 struct ip_vs_dest *dest; /* real server */
64765 - atomic_t in_pkts; /* incoming packet counter */
64766 + atomic_unchecked_t in_pkts; /* incoming packet counter */
64767
64768 /* packet transmitter for different forwarding methods. If it
64769 mangles the packet, it must return NF_DROP or better NF_STOLEN,
64770 @@ -647,7 +647,7 @@ struct ip_vs_dest {
64771 __be16 port; /* port number of the server */
64772 union nf_inet_addr addr; /* IP address of the server */
64773 volatile unsigned flags; /* dest status flags */
64774 - atomic_t conn_flags; /* flags to copy to conn */
64775 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
64776 atomic_t weight; /* server weight */
64777
64778 atomic_t refcnt; /* reference counter */
64779 diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
64780 index 69b610a..fe3962c 100644
64781 --- a/include/net/irda/ircomm_core.h
64782 +++ b/include/net/irda/ircomm_core.h
64783 @@ -51,7 +51,7 @@ typedef struct {
64784 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
64785 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
64786 struct ircomm_info *);
64787 -} call_t;
64788 +} __no_const call_t;
64789
64790 struct ircomm_cb {
64791 irda_queue_t queue;
64792 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
64793 index 59ba38bc..d515662 100644
64794 --- a/include/net/irda/ircomm_tty.h
64795 +++ b/include/net/irda/ircomm_tty.h
64796 @@ -35,6 +35,7 @@
64797 #include <linux/termios.h>
64798 #include <linux/timer.h>
64799 #include <linux/tty.h> /* struct tty_struct */
64800 +#include <asm/local.h>
64801
64802 #include <net/irda/irias_object.h>
64803 #include <net/irda/ircomm_core.h>
64804 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
64805 unsigned short close_delay;
64806 unsigned short closing_wait; /* time to wait before closing */
64807
64808 - int open_count;
64809 - int blocked_open; /* # of blocked opens */
64810 + local_t open_count;
64811 + local_t blocked_open; /* # of blocked opens */
64812
64813 /* Protect concurent access to :
64814 * o self->open_count
64815 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
64816 index 0954ec9..7413562 100644
64817 --- a/include/net/iucv/af_iucv.h
64818 +++ b/include/net/iucv/af_iucv.h
64819 @@ -138,7 +138,7 @@ struct iucv_sock {
64820 struct iucv_sock_list {
64821 struct hlist_head head;
64822 rwlock_t lock;
64823 - atomic_t autobind_name;
64824 + atomic_unchecked_t autobind_name;
64825 };
64826
64827 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
64828 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
64829 index 34c996f..bb3b4d4 100644
64830 --- a/include/net/neighbour.h
64831 +++ b/include/net/neighbour.h
64832 @@ -123,7 +123,7 @@ struct neigh_ops {
64833 void (*error_report)(struct neighbour *, struct sk_buff *);
64834 int (*output)(struct neighbour *, struct sk_buff *);
64835 int (*connected_output)(struct neighbour *, struct sk_buff *);
64836 -};
64837 +} __do_const;
64838
64839 struct pneigh_entry {
64840 struct pneigh_entry *next;
64841 diff --git a/include/net/netlink.h b/include/net/netlink.h
64842 index cb1f350..3279d2c 100644
64843 --- a/include/net/netlink.h
64844 +++ b/include/net/netlink.h
64845 @@ -569,7 +569,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
64846 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
64847 {
64848 if (mark)
64849 - skb_trim(skb, (unsigned char *) mark - skb->data);
64850 + skb_trim(skb, (const unsigned char *) mark - skb->data);
64851 }
64852
64853 /**
64854 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
64855 index bbd023a..97c6d0d 100644
64856 --- a/include/net/netns/ipv4.h
64857 +++ b/include/net/netns/ipv4.h
64858 @@ -57,8 +57,8 @@ struct netns_ipv4 {
64859 unsigned int sysctl_ping_group_range[2];
64860 long sysctl_tcp_mem[3];
64861
64862 - atomic_t rt_genid;
64863 - atomic_t dev_addr_genid;
64864 + atomic_unchecked_t rt_genid;
64865 + atomic_unchecked_t dev_addr_genid;
64866
64867 #ifdef CONFIG_IP_MROUTE
64868 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
64869 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
64870 index d368561..96aaa17 100644
64871 --- a/include/net/sctp/sctp.h
64872 +++ b/include/net/sctp/sctp.h
64873 @@ -318,9 +318,9 @@ do { \
64874
64875 #else /* SCTP_DEBUG */
64876
64877 -#define SCTP_DEBUG_PRINTK(whatever...)
64878 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
64879 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
64880 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
64881 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
64882 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
64883 #define SCTP_ENABLE_DEBUG
64884 #define SCTP_DISABLE_DEBUG
64885 #define SCTP_ASSERT(expr, str, func)
64886 diff --git a/include/net/sock.h b/include/net/sock.h
64887 index 91c1c8b..15ae923 100644
64888 --- a/include/net/sock.h
64889 +++ b/include/net/sock.h
64890 @@ -299,7 +299,7 @@ struct sock {
64891 #ifdef CONFIG_RPS
64892 __u32 sk_rxhash;
64893 #endif
64894 - atomic_t sk_drops;
64895 + atomic_unchecked_t sk_drops;
64896 int sk_rcvbuf;
64897
64898 struct sk_filter __rcu *sk_filter;
64899 @@ -1660,7 +1660,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
64900 }
64901
64902 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
64903 - char __user *from, char *to,
64904 + char __user *from, unsigned char *to,
64905 int copy, int offset)
64906 {
64907 if (skb->ip_summed == CHECKSUM_NONE) {
64908 diff --git a/include/net/tcp.h b/include/net/tcp.h
64909 index 2d80c29..aa07caf 100644
64910 --- a/include/net/tcp.h
64911 +++ b/include/net/tcp.h
64912 @@ -1426,7 +1426,7 @@ struct tcp_seq_afinfo {
64913 char *name;
64914 sa_family_t family;
64915 const struct file_operations *seq_fops;
64916 - struct seq_operations seq_ops;
64917 + seq_operations_no_const seq_ops;
64918 };
64919
64920 struct tcp_iter_state {
64921 diff --git a/include/net/udp.h b/include/net/udp.h
64922 index e39592f..fef9680 100644
64923 --- a/include/net/udp.h
64924 +++ b/include/net/udp.h
64925 @@ -243,7 +243,7 @@ struct udp_seq_afinfo {
64926 sa_family_t family;
64927 struct udp_table *udp_table;
64928 const struct file_operations *seq_fops;
64929 - struct seq_operations seq_ops;
64930 + seq_operations_no_const seq_ops;
64931 };
64932
64933 struct udp_iter_state {
64934 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
64935 index 89174e2..1f82598 100644
64936 --- a/include/net/xfrm.h
64937 +++ b/include/net/xfrm.h
64938 @@ -505,7 +505,7 @@ struct xfrm_policy {
64939 struct timer_list timer;
64940
64941 struct flow_cache_object flo;
64942 - atomic_t genid;
64943 + atomic_unchecked_t genid;
64944 u32 priority;
64945 u32 index;
64946 struct xfrm_mark mark;
64947 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
64948 index 1a046b1..ee0bef0 100644
64949 --- a/include/rdma/iw_cm.h
64950 +++ b/include/rdma/iw_cm.h
64951 @@ -122,7 +122,7 @@ struct iw_cm_verbs {
64952 int backlog);
64953
64954 int (*destroy_listen)(struct iw_cm_id *cm_id);
64955 -};
64956 +} __no_const;
64957
64958 /**
64959 * iw_create_cm_id - Create an IW CM identifier.
64960 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
64961 index 6a3922f..0b73022 100644
64962 --- a/include/scsi/libfc.h
64963 +++ b/include/scsi/libfc.h
64964 @@ -748,6 +748,7 @@ struct libfc_function_template {
64965 */
64966 void (*disc_stop_final) (struct fc_lport *);
64967 };
64968 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
64969
64970 /**
64971 * struct fc_disc - Discovery context
64972 @@ -851,7 +852,7 @@ struct fc_lport {
64973 struct fc_vport *vport;
64974
64975 /* Operational Information */
64976 - struct libfc_function_template tt;
64977 + libfc_function_template_no_const tt;
64978 u8 link_up;
64979 u8 qfull;
64980 enum fc_lport_state state;
64981 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
64982 index 77273f2..dd4031f 100644
64983 --- a/include/scsi/scsi_device.h
64984 +++ b/include/scsi/scsi_device.h
64985 @@ -161,9 +161,9 @@ struct scsi_device {
64986 unsigned int max_device_blocked; /* what device_blocked counts down from */
64987 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
64988
64989 - atomic_t iorequest_cnt;
64990 - atomic_t iodone_cnt;
64991 - atomic_t ioerr_cnt;
64992 + atomic_unchecked_t iorequest_cnt;
64993 + atomic_unchecked_t iodone_cnt;
64994 + atomic_unchecked_t ioerr_cnt;
64995
64996 struct device sdev_gendev,
64997 sdev_dev;
64998 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
64999 index 2a65167..91e01f8 100644
65000 --- a/include/scsi/scsi_transport_fc.h
65001 +++ b/include/scsi/scsi_transport_fc.h
65002 @@ -711,7 +711,7 @@ struct fc_function_template {
65003 unsigned long show_host_system_hostname:1;
65004
65005 unsigned long disable_target_scan:1;
65006 -};
65007 +} __do_const;
65008
65009
65010 /**
65011 diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
65012 index 030b87c..98a6954 100644
65013 --- a/include/sound/ak4xxx-adda.h
65014 +++ b/include/sound/ak4xxx-adda.h
65015 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
65016 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
65017 unsigned char val);
65018 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
65019 -};
65020 +} __no_const;
65021
65022 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
65023
65024 diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
65025 index 8c05e47..2b5df97 100644
65026 --- a/include/sound/hwdep.h
65027 +++ b/include/sound/hwdep.h
65028 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
65029 struct snd_hwdep_dsp_status *status);
65030 int (*dsp_load)(struct snd_hwdep *hw,
65031 struct snd_hwdep_dsp_image *image);
65032 -};
65033 +} __no_const;
65034
65035 struct snd_hwdep {
65036 struct snd_card *card;
65037 diff --git a/include/sound/info.h b/include/sound/info.h
65038 index 9ca1a49..aba1728 100644
65039 --- a/include/sound/info.h
65040 +++ b/include/sound/info.h
65041 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
65042 struct snd_info_buffer *buffer);
65043 void (*write)(struct snd_info_entry *entry,
65044 struct snd_info_buffer *buffer);
65045 -};
65046 +} __no_const;
65047
65048 struct snd_info_entry_ops {
65049 int (*open)(struct snd_info_entry *entry,
65050 diff --git a/include/sound/pcm.h b/include/sound/pcm.h
65051 index 0cf91b2..b70cae4 100644
65052 --- a/include/sound/pcm.h
65053 +++ b/include/sound/pcm.h
65054 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
65055 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
65056 int (*ack)(struct snd_pcm_substream *substream);
65057 };
65058 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
65059
65060 /*
65061 *
65062 diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
65063 index af1b49e..a5d55a5 100644
65064 --- a/include/sound/sb16_csp.h
65065 +++ b/include/sound/sb16_csp.h
65066 @@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
65067 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
65068 int (*csp_stop) (struct snd_sb_csp * p);
65069 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
65070 -};
65071 +} __no_const;
65072
65073 /*
65074 * CSP private data
65075 diff --git a/include/sound/soc.h b/include/sound/soc.h
65076 index 0992dff..bb366fe 100644
65077 --- a/include/sound/soc.h
65078 +++ b/include/sound/soc.h
65079 @@ -682,7 +682,7 @@ struct snd_soc_platform_driver {
65080 /* platform IO - used for platform DAPM */
65081 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
65082 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
65083 -};
65084 +} __do_const;
65085
65086 struct snd_soc_platform {
65087 const char *name;
65088 @@ -852,7 +852,7 @@ struct snd_soc_pcm_runtime {
65089 struct snd_soc_dai_link *dai_link;
65090 struct mutex pcm_mutex;
65091 enum snd_soc_pcm_subclass pcm_subclass;
65092 - struct snd_pcm_ops ops;
65093 + snd_pcm_ops_no_const ops;
65094
65095 unsigned int complete:1;
65096 unsigned int dev_registered:1;
65097 diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
65098 index 444cd6b..3327cc5 100644
65099 --- a/include/sound/ymfpci.h
65100 +++ b/include/sound/ymfpci.h
65101 @@ -358,7 +358,7 @@ struct snd_ymfpci {
65102 spinlock_t reg_lock;
65103 spinlock_t voice_lock;
65104 wait_queue_head_t interrupt_sleep;
65105 - atomic_t interrupt_sleep_count;
65106 + atomic_unchecked_t interrupt_sleep_count;
65107 struct snd_info_entry *proc_entry;
65108 const struct firmware *dsp_microcode;
65109 const struct firmware *controller_microcode;
65110 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
65111 index fe73eb8..56388b1 100644
65112 --- a/include/target/target_core_base.h
65113 +++ b/include/target/target_core_base.h
65114 @@ -443,7 +443,7 @@ struct t10_reservation_ops {
65115 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
65116 int (*t10_pr_register)(struct se_cmd *);
65117 int (*t10_pr_clear)(struct se_cmd *);
65118 -};
65119 +} __no_const;
65120
65121 struct t10_reservation {
65122 /* Reservation effects all target ports */
65123 @@ -561,8 +561,8 @@ struct se_cmd {
65124 atomic_t t_se_count;
65125 atomic_t t_task_cdbs_left;
65126 atomic_t t_task_cdbs_ex_left;
65127 - atomic_t t_task_cdbs_sent;
65128 - atomic_t t_transport_aborted;
65129 + atomic_unchecked_t t_task_cdbs_sent;
65130 + atomic_unchecked_t t_transport_aborted;
65131 atomic_t t_transport_active;
65132 atomic_t t_transport_complete;
65133 atomic_t t_transport_queue_active;
65134 @@ -799,7 +799,7 @@ struct se_device {
65135 spinlock_t stats_lock;
65136 /* Active commands on this virtual SE device */
65137 atomic_t simple_cmds;
65138 - atomic_t dev_ordered_id;
65139 + atomic_unchecked_t dev_ordered_id;
65140 atomic_t execute_tasks;
65141 atomic_t dev_ordered_sync;
65142 atomic_t dev_qf_count;
65143 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
65144 index 1c09820..7f5ec79 100644
65145 --- a/include/trace/events/irq.h
65146 +++ b/include/trace/events/irq.h
65147 @@ -36,7 +36,7 @@ struct softirq_action;
65148 */
65149 TRACE_EVENT(irq_handler_entry,
65150
65151 - TP_PROTO(int irq, struct irqaction *action),
65152 + TP_PROTO(int irq, const struct irqaction *action),
65153
65154 TP_ARGS(irq, action),
65155
65156 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
65157 */
65158 TRACE_EVENT(irq_handler_exit,
65159
65160 - TP_PROTO(int irq, struct irqaction *action, int ret),
65161 + TP_PROTO(int irq, const struct irqaction *action, int ret),
65162
65163 TP_ARGS(irq, action, ret),
65164
65165 diff --git a/include/video/udlfb.h b/include/video/udlfb.h
65166 index c41f308..6918de3 100644
65167 --- a/include/video/udlfb.h
65168 +++ b/include/video/udlfb.h
65169 @@ -52,10 +52,10 @@ struct dlfb_data {
65170 u32 pseudo_palette[256];
65171 int blank_mode; /*one of FB_BLANK_ */
65172 /* blit-only rendering path metrics, exposed through sysfs */
65173 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
65174 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
65175 - atomic_t bytes_sent; /* to usb, after compression including overhead */
65176 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
65177 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
65178 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
65179 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
65180 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
65181 };
65182
65183 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
65184 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
65185 index 0993a22..32ba2fe 100644
65186 --- a/include/video/uvesafb.h
65187 +++ b/include/video/uvesafb.h
65188 @@ -177,6 +177,7 @@ struct uvesafb_par {
65189 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
65190 u8 pmi_setpal; /* PMI for palette changes */
65191 u16 *pmi_base; /* protected mode interface location */
65192 + u8 *pmi_code; /* protected mode code location */
65193 void *pmi_start;
65194 void *pmi_pal;
65195 u8 *vbe_state_orig; /*
65196 diff --git a/init/Kconfig b/init/Kconfig
65197 index 3f42cd6..613f41d 100644
65198 --- a/init/Kconfig
65199 +++ b/init/Kconfig
65200 @@ -799,6 +799,7 @@ endif # CGROUPS
65201
65202 config CHECKPOINT_RESTORE
65203 bool "Checkpoint/restore support" if EXPERT
65204 + depends on !GRKERNSEC
65205 default n
65206 help
65207 Enables additional kernel features in a sake of checkpoint/restore.
65208 @@ -1249,7 +1250,7 @@ config SLUB_DEBUG
65209
65210 config COMPAT_BRK
65211 bool "Disable heap randomization"
65212 - default y
65213 + default n
65214 help
65215 Randomizing heap placement makes heap exploits harder, but it
65216 also breaks ancient binaries (including anything libc5 based).
65217 diff --git a/init/do_mounts.c b/init/do_mounts.c
65218 index 2974c8b..0b863ae 100644
65219 --- a/init/do_mounts.c
65220 +++ b/init/do_mounts.c
65221 @@ -326,11 +326,11 @@ static void __init get_fs_names(char *page)
65222 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
65223 {
65224 struct super_block *s;
65225 - int err = sys_mount(name, "/root", fs, flags, data);
65226 + int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
65227 if (err)
65228 return err;
65229
65230 - sys_chdir((const char __user __force *)"/root");
65231 + sys_chdir((const char __force_user *)"/root");
65232 s = current->fs->pwd.dentry->d_sb;
65233 ROOT_DEV = s->s_dev;
65234 printk(KERN_INFO
65235 @@ -450,18 +450,18 @@ void __init change_floppy(char *fmt, ...)
65236 va_start(args, fmt);
65237 vsprintf(buf, fmt, args);
65238 va_end(args);
65239 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
65240 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
65241 if (fd >= 0) {
65242 sys_ioctl(fd, FDEJECT, 0);
65243 sys_close(fd);
65244 }
65245 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
65246 - fd = sys_open("/dev/console", O_RDWR, 0);
65247 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
65248 if (fd >= 0) {
65249 sys_ioctl(fd, TCGETS, (long)&termios);
65250 termios.c_lflag &= ~ICANON;
65251 sys_ioctl(fd, TCSETSF, (long)&termios);
65252 - sys_read(fd, &c, 1);
65253 + sys_read(fd, (char __user *)&c, 1);
65254 termios.c_lflag |= ICANON;
65255 sys_ioctl(fd, TCSETSF, (long)&termios);
65256 sys_close(fd);
65257 @@ -555,6 +555,6 @@ void __init prepare_namespace(void)
65258 mount_root();
65259 out:
65260 devtmpfs_mount("dev");
65261 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
65262 - sys_chroot((const char __user __force *)".");
65263 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
65264 + sys_chroot((const char __force_user *)".");
65265 }
65266 diff --git a/init/do_mounts.h b/init/do_mounts.h
65267 index f5b978a..69dbfe8 100644
65268 --- a/init/do_mounts.h
65269 +++ b/init/do_mounts.h
65270 @@ -15,15 +15,15 @@ extern int root_mountflags;
65271
65272 static inline int create_dev(char *name, dev_t dev)
65273 {
65274 - sys_unlink(name);
65275 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
65276 + sys_unlink((char __force_user *)name);
65277 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
65278 }
65279
65280 #if BITS_PER_LONG == 32
65281 static inline u32 bstat(char *name)
65282 {
65283 struct stat64 stat;
65284 - if (sys_stat64(name, &stat) != 0)
65285 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
65286 return 0;
65287 if (!S_ISBLK(stat.st_mode))
65288 return 0;
65289 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
65290 static inline u32 bstat(char *name)
65291 {
65292 struct stat stat;
65293 - if (sys_newstat(name, &stat) != 0)
65294 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
65295 return 0;
65296 if (!S_ISBLK(stat.st_mode))
65297 return 0;
65298 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
65299 index 3098a38..253064e 100644
65300 --- a/init/do_mounts_initrd.c
65301 +++ b/init/do_mounts_initrd.c
65302 @@ -44,13 +44,13 @@ static void __init handle_initrd(void)
65303 create_dev("/dev/root.old", Root_RAM0);
65304 /* mount initrd on rootfs' /root */
65305 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
65306 - sys_mkdir("/old", 0700);
65307 - root_fd = sys_open("/", 0, 0);
65308 - old_fd = sys_open("/old", 0, 0);
65309 + sys_mkdir((const char __force_user *)"/old", 0700);
65310 + root_fd = sys_open((const char __force_user *)"/", 0, 0);
65311 + old_fd = sys_open((const char __force_user *)"/old", 0, 0);
65312 /* move initrd over / and chdir/chroot in initrd root */
65313 - sys_chdir("/root");
65314 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
65315 - sys_chroot(".");
65316 + sys_chdir((const char __force_user *)"/root");
65317 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
65318 + sys_chroot((const char __force_user *)".");
65319
65320 /*
65321 * In case that a resume from disk is carried out by linuxrc or one of
65322 @@ -67,15 +67,15 @@ static void __init handle_initrd(void)
65323
65324 /* move initrd to rootfs' /old */
65325 sys_fchdir(old_fd);
65326 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
65327 + sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
65328 /* switch root and cwd back to / of rootfs */
65329 sys_fchdir(root_fd);
65330 - sys_chroot(".");
65331 + sys_chroot((const char __force_user *)".");
65332 sys_close(old_fd);
65333 sys_close(root_fd);
65334
65335 if (new_decode_dev(real_root_dev) == Root_RAM0) {
65336 - sys_chdir("/old");
65337 + sys_chdir((const char __force_user *)"/old");
65338 return;
65339 }
65340
65341 @@ -83,17 +83,17 @@ static void __init handle_initrd(void)
65342 mount_root();
65343
65344 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
65345 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
65346 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
65347 if (!error)
65348 printk("okay\n");
65349 else {
65350 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
65351 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
65352 if (error == -ENOENT)
65353 printk("/initrd does not exist. Ignored.\n");
65354 else
65355 printk("failed\n");
65356 printk(KERN_NOTICE "Unmounting old root\n");
65357 - sys_umount("/old", MNT_DETACH);
65358 + sys_umount((char __force_user *)"/old", MNT_DETACH);
65359 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
65360 if (fd < 0) {
65361 error = fd;
65362 @@ -116,11 +116,11 @@ int __init initrd_load(void)
65363 * mounted in the normal path.
65364 */
65365 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
65366 - sys_unlink("/initrd.image");
65367 + sys_unlink((const char __force_user *)"/initrd.image");
65368 handle_initrd();
65369 return 1;
65370 }
65371 }
65372 - sys_unlink("/initrd.image");
65373 + sys_unlink((const char __force_user *)"/initrd.image");
65374 return 0;
65375 }
65376 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
65377 index 32c4799..c27ee74 100644
65378 --- a/init/do_mounts_md.c
65379 +++ b/init/do_mounts_md.c
65380 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
65381 partitioned ? "_d" : "", minor,
65382 md_setup_args[ent].device_names);
65383
65384 - fd = sys_open(name, 0, 0);
65385 + fd = sys_open((char __force_user *)name, 0, 0);
65386 if (fd < 0) {
65387 printk(KERN_ERR "md: open failed - cannot start "
65388 "array %s\n", name);
65389 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
65390 * array without it
65391 */
65392 sys_close(fd);
65393 - fd = sys_open(name, 0, 0);
65394 + fd = sys_open((char __force_user *)name, 0, 0);
65395 sys_ioctl(fd, BLKRRPART, 0);
65396 }
65397 sys_close(fd);
65398 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
65399
65400 wait_for_device_probe();
65401
65402 - fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
65403 + fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
65404 if (fd >= 0) {
65405 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
65406 sys_close(fd);
65407 diff --git a/init/initramfs.c b/init/initramfs.c
65408 index 8216c30..25e8e32 100644
65409 --- a/init/initramfs.c
65410 +++ b/init/initramfs.c
65411 @@ -74,7 +74,7 @@ static void __init free_hash(void)
65412 }
65413 }
65414
65415 -static long __init do_utime(char __user *filename, time_t mtime)
65416 +static long __init do_utime(__force char __user *filename, time_t mtime)
65417 {
65418 struct timespec t[2];
65419
65420 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
65421 struct dir_entry *de, *tmp;
65422 list_for_each_entry_safe(de, tmp, &dir_list, list) {
65423 list_del(&de->list);
65424 - do_utime(de->name, de->mtime);
65425 + do_utime((char __force_user *)de->name, de->mtime);
65426 kfree(de->name);
65427 kfree(de);
65428 }
65429 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
65430 if (nlink >= 2) {
65431 char *old = find_link(major, minor, ino, mode, collected);
65432 if (old)
65433 - return (sys_link(old, collected) < 0) ? -1 : 1;
65434 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
65435 }
65436 return 0;
65437 }
65438 @@ -280,11 +280,11 @@ static void __init clean_path(char *path, umode_t mode)
65439 {
65440 struct stat st;
65441
65442 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
65443 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
65444 if (S_ISDIR(st.st_mode))
65445 - sys_rmdir(path);
65446 + sys_rmdir((char __force_user *)path);
65447 else
65448 - sys_unlink(path);
65449 + sys_unlink((char __force_user *)path);
65450 }
65451 }
65452
65453 @@ -305,7 +305,7 @@ static int __init do_name(void)
65454 int openflags = O_WRONLY|O_CREAT;
65455 if (ml != 1)
65456 openflags |= O_TRUNC;
65457 - wfd = sys_open(collected, openflags, mode);
65458 + wfd = sys_open((char __force_user *)collected, openflags, mode);
65459
65460 if (wfd >= 0) {
65461 sys_fchown(wfd, uid, gid);
65462 @@ -317,17 +317,17 @@ static int __init do_name(void)
65463 }
65464 }
65465 } else if (S_ISDIR(mode)) {
65466 - sys_mkdir(collected, mode);
65467 - sys_chown(collected, uid, gid);
65468 - sys_chmod(collected, mode);
65469 + sys_mkdir((char __force_user *)collected, mode);
65470 + sys_chown((char __force_user *)collected, uid, gid);
65471 + sys_chmod((char __force_user *)collected, mode);
65472 dir_add(collected, mtime);
65473 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
65474 S_ISFIFO(mode) || S_ISSOCK(mode)) {
65475 if (maybe_link() == 0) {
65476 - sys_mknod(collected, mode, rdev);
65477 - sys_chown(collected, uid, gid);
65478 - sys_chmod(collected, mode);
65479 - do_utime(collected, mtime);
65480 + sys_mknod((char __force_user *)collected, mode, rdev);
65481 + sys_chown((char __force_user *)collected, uid, gid);
65482 + sys_chmod((char __force_user *)collected, mode);
65483 + do_utime((char __force_user *)collected, mtime);
65484 }
65485 }
65486 return 0;
65487 @@ -336,15 +336,15 @@ static int __init do_name(void)
65488 static int __init do_copy(void)
65489 {
65490 if (count >= body_len) {
65491 - sys_write(wfd, victim, body_len);
65492 + sys_write(wfd, (char __force_user *)victim, body_len);
65493 sys_close(wfd);
65494 - do_utime(vcollected, mtime);
65495 + do_utime((char __force_user *)vcollected, mtime);
65496 kfree(vcollected);
65497 eat(body_len);
65498 state = SkipIt;
65499 return 0;
65500 } else {
65501 - sys_write(wfd, victim, count);
65502 + sys_write(wfd, (char __force_user *)victim, count);
65503 body_len -= count;
65504 eat(count);
65505 return 1;
65506 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
65507 {
65508 collected[N_ALIGN(name_len) + body_len] = '\0';
65509 clean_path(collected, 0);
65510 - sys_symlink(collected + N_ALIGN(name_len), collected);
65511 - sys_lchown(collected, uid, gid);
65512 - do_utime(collected, mtime);
65513 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
65514 + sys_lchown((char __force_user *)collected, uid, gid);
65515 + do_utime((char __force_user *)collected, mtime);
65516 state = SkipIt;
65517 next_state = Reset;
65518 return 0;
65519 diff --git a/init/main.c b/init/main.c
65520 index ff49a6d..5fa0429 100644
65521 --- a/init/main.c
65522 +++ b/init/main.c
65523 @@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
65524 extern void tc_init(void);
65525 #endif
65526
65527 +extern void grsecurity_init(void);
65528 +
65529 /*
65530 * Debug helper: via this flag we know that we are in 'early bootup code'
65531 * where only the boot processor is running with IRQ disabled. This means
65532 @@ -149,6 +151,49 @@ static int __init set_reset_devices(char *str)
65533
65534 __setup("reset_devices", set_reset_devices);
65535
65536 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
65537 +extern char pax_enter_kernel_user[];
65538 +extern char pax_exit_kernel_user[];
65539 +extern pgdval_t clone_pgd_mask;
65540 +#endif
65541 +
65542 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
65543 +static int __init setup_pax_nouderef(char *str)
65544 +{
65545 +#ifdef CONFIG_X86_32
65546 + unsigned int cpu;
65547 + struct desc_struct *gdt;
65548 +
65549 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
65550 + gdt = get_cpu_gdt_table(cpu);
65551 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
65552 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
65553 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
65554 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
65555 + }
65556 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
65557 +#else
65558 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
65559 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
65560 + clone_pgd_mask = ~(pgdval_t)0UL;
65561 +#endif
65562 +
65563 + return 0;
65564 +}
65565 +early_param("pax_nouderef", setup_pax_nouderef);
65566 +#endif
65567 +
65568 +#ifdef CONFIG_PAX_SOFTMODE
65569 +int pax_softmode;
65570 +
65571 +static int __init setup_pax_softmode(char *str)
65572 +{
65573 + get_option(&str, &pax_softmode);
65574 + return 1;
65575 +}
65576 +__setup("pax_softmode=", setup_pax_softmode);
65577 +#endif
65578 +
65579 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
65580 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
65581 static const char *panic_later, *panic_param;
65582 @@ -675,6 +720,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
65583 {
65584 int count = preempt_count();
65585 int ret;
65586 + const char *msg1 = "", *msg2 = "";
65587
65588 if (initcall_debug)
65589 ret = do_one_initcall_debug(fn);
65590 @@ -687,15 +733,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
65591 sprintf(msgbuf, "error code %d ", ret);
65592
65593 if (preempt_count() != count) {
65594 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
65595 + msg1 = " preemption imbalance";
65596 preempt_count() = count;
65597 }
65598 if (irqs_disabled()) {
65599 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
65600 + msg2 = " disabled interrupts";
65601 local_irq_enable();
65602 }
65603 - if (msgbuf[0]) {
65604 - printk("initcall %pF returned with %s\n", fn, msgbuf);
65605 + if (msgbuf[0] || *msg1 || *msg2) {
65606 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
65607 }
65608
65609 return ret;
65610 @@ -814,7 +860,7 @@ static int __init kernel_init(void * unused)
65611 do_basic_setup();
65612
65613 /* Open the /dev/console on the rootfs, this should never fail */
65614 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
65615 + if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
65616 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
65617
65618 (void) sys_dup(0);
65619 @@ -827,11 +873,13 @@ static int __init kernel_init(void * unused)
65620 if (!ramdisk_execute_command)
65621 ramdisk_execute_command = "/init";
65622
65623 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
65624 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
65625 ramdisk_execute_command = NULL;
65626 prepare_namespace();
65627 }
65628
65629 + grsecurity_init();
65630 +
65631 /*
65632 * Ok, we have completed the initial bootup, and
65633 * we're essentially up and running. Get rid of the
65634 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
65635 index 86ee272..773d937 100644
65636 --- a/ipc/mqueue.c
65637 +++ b/ipc/mqueue.c
65638 @@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
65639 mq_bytes = (mq_msg_tblsz +
65640 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
65641
65642 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
65643 spin_lock(&mq_lock);
65644 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
65645 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
65646 diff --git a/ipc/msg.c b/ipc/msg.c
65647 index 7385de2..a8180e08 100644
65648 --- a/ipc/msg.c
65649 +++ b/ipc/msg.c
65650 @@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
65651 return security_msg_queue_associate(msq, msgflg);
65652 }
65653
65654 +static struct ipc_ops msg_ops = {
65655 + .getnew = newque,
65656 + .associate = msg_security,
65657 + .more_checks = NULL
65658 +};
65659 +
65660 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
65661 {
65662 struct ipc_namespace *ns;
65663 - struct ipc_ops msg_ops;
65664 struct ipc_params msg_params;
65665
65666 ns = current->nsproxy->ipc_ns;
65667
65668 - msg_ops.getnew = newque;
65669 - msg_ops.associate = msg_security;
65670 - msg_ops.more_checks = NULL;
65671 -
65672 msg_params.key = key;
65673 msg_params.flg = msgflg;
65674
65675 diff --git a/ipc/sem.c b/ipc/sem.c
65676 index 5215a81..cfc0cac 100644
65677 --- a/ipc/sem.c
65678 +++ b/ipc/sem.c
65679 @@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
65680 return 0;
65681 }
65682
65683 +static struct ipc_ops sem_ops = {
65684 + .getnew = newary,
65685 + .associate = sem_security,
65686 + .more_checks = sem_more_checks
65687 +};
65688 +
65689 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
65690 {
65691 struct ipc_namespace *ns;
65692 - struct ipc_ops sem_ops;
65693 struct ipc_params sem_params;
65694
65695 ns = current->nsproxy->ipc_ns;
65696 @@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
65697 if (nsems < 0 || nsems > ns->sc_semmsl)
65698 return -EINVAL;
65699
65700 - sem_ops.getnew = newary;
65701 - sem_ops.associate = sem_security;
65702 - sem_ops.more_checks = sem_more_checks;
65703 -
65704 sem_params.key = key;
65705 sem_params.flg = semflg;
65706 sem_params.u.nsems = nsems;
65707 diff --git a/ipc/shm.c b/ipc/shm.c
65708 index b76be5b..859e750 100644
65709 --- a/ipc/shm.c
65710 +++ b/ipc/shm.c
65711 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
65712 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
65713 #endif
65714
65715 +#ifdef CONFIG_GRKERNSEC
65716 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
65717 + const time_t shm_createtime, const uid_t cuid,
65718 + const int shmid);
65719 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
65720 + const time_t shm_createtime);
65721 +#endif
65722 +
65723 void shm_init_ns(struct ipc_namespace *ns)
65724 {
65725 ns->shm_ctlmax = SHMMAX;
65726 @@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
65727 shp->shm_lprid = 0;
65728 shp->shm_atim = shp->shm_dtim = 0;
65729 shp->shm_ctim = get_seconds();
65730 +#ifdef CONFIG_GRKERNSEC
65731 + {
65732 + struct timespec timeval;
65733 + do_posix_clock_monotonic_gettime(&timeval);
65734 +
65735 + shp->shm_createtime = timeval.tv_sec;
65736 + }
65737 +#endif
65738 shp->shm_segsz = size;
65739 shp->shm_nattch = 0;
65740 shp->shm_file = file;
65741 @@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
65742 return 0;
65743 }
65744
65745 +static struct ipc_ops shm_ops = {
65746 + .getnew = newseg,
65747 + .associate = shm_security,
65748 + .more_checks = shm_more_checks
65749 +};
65750 +
65751 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
65752 {
65753 struct ipc_namespace *ns;
65754 - struct ipc_ops shm_ops;
65755 struct ipc_params shm_params;
65756
65757 ns = current->nsproxy->ipc_ns;
65758
65759 - shm_ops.getnew = newseg;
65760 - shm_ops.associate = shm_security;
65761 - shm_ops.more_checks = shm_more_checks;
65762 -
65763 shm_params.key = key;
65764 shm_params.flg = shmflg;
65765 shm_params.u.size = size;
65766 @@ -988,6 +1005,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
65767 f_mode = FMODE_READ | FMODE_WRITE;
65768 }
65769 if (shmflg & SHM_EXEC) {
65770 +
65771 +#ifdef CONFIG_PAX_MPROTECT
65772 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
65773 + goto out;
65774 +#endif
65775 +
65776 prot |= PROT_EXEC;
65777 acc_mode |= S_IXUGO;
65778 }
65779 @@ -1011,9 +1034,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
65780 if (err)
65781 goto out_unlock;
65782
65783 +#ifdef CONFIG_GRKERNSEC
65784 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
65785 + shp->shm_perm.cuid, shmid) ||
65786 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
65787 + err = -EACCES;
65788 + goto out_unlock;
65789 + }
65790 +#endif
65791 +
65792 path = shp->shm_file->f_path;
65793 path_get(&path);
65794 shp->shm_nattch++;
65795 +#ifdef CONFIG_GRKERNSEC
65796 + shp->shm_lapid = current->pid;
65797 +#endif
65798 size = i_size_read(path.dentry->d_inode);
65799 shm_unlock(shp);
65800
65801 diff --git a/kernel/acct.c b/kernel/acct.c
65802 index 02e6167..54824f7 100644
65803 --- a/kernel/acct.c
65804 +++ b/kernel/acct.c
65805 @@ -550,7 +550,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
65806 */
65807 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
65808 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
65809 - file->f_op->write(file, (char *)&ac,
65810 + file->f_op->write(file, (char __force_user *)&ac,
65811 sizeof(acct_t), &file->f_pos);
65812 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
65813 set_fs(fs);
65814 diff --git a/kernel/audit.c b/kernel/audit.c
65815 index bb0eb5b..cf2a03a 100644
65816 --- a/kernel/audit.c
65817 +++ b/kernel/audit.c
65818 @@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
65819 3) suppressed due to audit_rate_limit
65820 4) suppressed due to audit_backlog_limit
65821 */
65822 -static atomic_t audit_lost = ATOMIC_INIT(0);
65823 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
65824
65825 /* The netlink socket. */
65826 static struct sock *audit_sock;
65827 @@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
65828 unsigned long now;
65829 int print;
65830
65831 - atomic_inc(&audit_lost);
65832 + atomic_inc_unchecked(&audit_lost);
65833
65834 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
65835
65836 @@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
65837 printk(KERN_WARNING
65838 "audit: audit_lost=%d audit_rate_limit=%d "
65839 "audit_backlog_limit=%d\n",
65840 - atomic_read(&audit_lost),
65841 + atomic_read_unchecked(&audit_lost),
65842 audit_rate_limit,
65843 audit_backlog_limit);
65844 audit_panic(message);
65845 @@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
65846 status_set.pid = audit_pid;
65847 status_set.rate_limit = audit_rate_limit;
65848 status_set.backlog_limit = audit_backlog_limit;
65849 - status_set.lost = atomic_read(&audit_lost);
65850 + status_set.lost = atomic_read_unchecked(&audit_lost);
65851 status_set.backlog = skb_queue_len(&audit_skb_queue);
65852 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
65853 &status_set, sizeof(status_set));
65854 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
65855 index af1de0f..06dfe57 100644
65856 --- a/kernel/auditsc.c
65857 +++ b/kernel/auditsc.c
65858 @@ -2288,7 +2288,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
65859 }
65860
65861 /* global counter which is incremented every time something logs in */
65862 -static atomic_t session_id = ATOMIC_INIT(0);
65863 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
65864
65865 /**
65866 * audit_set_loginuid - set current task's audit_context loginuid
65867 @@ -2312,7 +2312,7 @@ int audit_set_loginuid(uid_t loginuid)
65868 return -EPERM;
65869 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
65870
65871 - sessionid = atomic_inc_return(&session_id);
65872 + sessionid = atomic_inc_return_unchecked(&session_id);
65873 if (context && context->in_syscall) {
65874 struct audit_buffer *ab;
65875
65876 diff --git a/kernel/capability.c b/kernel/capability.c
65877 index 3f1adb6..c564db0 100644
65878 --- a/kernel/capability.c
65879 +++ b/kernel/capability.c
65880 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
65881 * before modification is attempted and the application
65882 * fails.
65883 */
65884 + if (tocopy > ARRAY_SIZE(kdata))
65885 + return -EFAULT;
65886 +
65887 if (copy_to_user(dataptr, kdata, tocopy
65888 * sizeof(struct __user_cap_data_struct))) {
65889 return -EFAULT;
65890 @@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
65891 int ret;
65892
65893 rcu_read_lock();
65894 - ret = security_capable(__task_cred(t), ns, cap);
65895 + ret = security_capable(__task_cred(t), ns, cap) == 0 &&
65896 + gr_task_is_capable(t, __task_cred(t), cap);
65897 rcu_read_unlock();
65898
65899 - return (ret == 0);
65900 + return ret;
65901 }
65902
65903 /**
65904 @@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
65905 int ret;
65906
65907 rcu_read_lock();
65908 - ret = security_capable_noaudit(__task_cred(t), ns, cap);
65909 + ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
65910 rcu_read_unlock();
65911
65912 - return (ret == 0);
65913 + return ret;
65914 }
65915
65916 /**
65917 @@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
65918 BUG();
65919 }
65920
65921 - if (security_capable(current_cred(), ns, cap) == 0) {
65922 + if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
65923 current->flags |= PF_SUPERPRIV;
65924 return true;
65925 }
65926 @@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
65927 }
65928 EXPORT_SYMBOL(ns_capable);
65929
65930 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
65931 +{
65932 + if (unlikely(!cap_valid(cap))) {
65933 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
65934 + BUG();
65935 + }
65936 +
65937 + if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
65938 + current->flags |= PF_SUPERPRIV;
65939 + return true;
65940 + }
65941 + return false;
65942 +}
65943 +EXPORT_SYMBOL(ns_capable_nolog);
65944 +
65945 /**
65946 * capable - Determine if the current task has a superior capability in effect
65947 * @cap: The capability to be tested for
65948 @@ -408,6 +427,12 @@ bool capable(int cap)
65949 }
65950 EXPORT_SYMBOL(capable);
65951
65952 +bool capable_nolog(int cap)
65953 +{
65954 + return ns_capable_nolog(&init_user_ns, cap);
65955 +}
65956 +EXPORT_SYMBOL(capable_nolog);
65957 +
65958 /**
65959 * nsown_capable - Check superior capability to one's own user_ns
65960 * @cap: The capability in question
65961 diff --git a/kernel/compat.c b/kernel/compat.c
65962 index f346ced..aa2b1f4 100644
65963 --- a/kernel/compat.c
65964 +++ b/kernel/compat.c
65965 @@ -13,6 +13,7 @@
65966
65967 #include <linux/linkage.h>
65968 #include <linux/compat.h>
65969 +#include <linux/module.h>
65970 #include <linux/errno.h>
65971 #include <linux/time.h>
65972 #include <linux/signal.h>
65973 @@ -168,7 +169,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
65974 mm_segment_t oldfs;
65975 long ret;
65976
65977 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
65978 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
65979 oldfs = get_fs();
65980 set_fs(KERNEL_DS);
65981 ret = hrtimer_nanosleep_restart(restart);
65982 @@ -200,7 +201,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
65983 oldfs = get_fs();
65984 set_fs(KERNEL_DS);
65985 ret = hrtimer_nanosleep(&tu,
65986 - rmtp ? (struct timespec __user *)&rmt : NULL,
65987 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
65988 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
65989 set_fs(oldfs);
65990
65991 @@ -309,7 +310,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
65992 mm_segment_t old_fs = get_fs();
65993
65994 set_fs(KERNEL_DS);
65995 - ret = sys_sigpending((old_sigset_t __user *) &s);
65996 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
65997 set_fs(old_fs);
65998 if (ret == 0)
65999 ret = put_user(s, set);
66000 @@ -332,8 +333,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
66001 old_fs = get_fs();
66002 set_fs(KERNEL_DS);
66003 ret = sys_sigprocmask(how,
66004 - set ? (old_sigset_t __user *) &s : NULL,
66005 - oset ? (old_sigset_t __user *) &s : NULL);
66006 + set ? (old_sigset_t __force_user *) &s : NULL,
66007 + oset ? (old_sigset_t __force_user *) &s : NULL);
66008 set_fs(old_fs);
66009 if (ret == 0)
66010 if (oset)
66011 @@ -370,7 +371,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
66012 mm_segment_t old_fs = get_fs();
66013
66014 set_fs(KERNEL_DS);
66015 - ret = sys_old_getrlimit(resource, &r);
66016 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
66017 set_fs(old_fs);
66018
66019 if (!ret) {
66020 @@ -442,7 +443,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
66021 mm_segment_t old_fs = get_fs();
66022
66023 set_fs(KERNEL_DS);
66024 - ret = sys_getrusage(who, (struct rusage __user *) &r);
66025 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
66026 set_fs(old_fs);
66027
66028 if (ret)
66029 @@ -469,8 +470,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
66030 set_fs (KERNEL_DS);
66031 ret = sys_wait4(pid,
66032 (stat_addr ?
66033 - (unsigned int __user *) &status : NULL),
66034 - options, (struct rusage __user *) &r);
66035 + (unsigned int __force_user *) &status : NULL),
66036 + options, (struct rusage __force_user *) &r);
66037 set_fs (old_fs);
66038
66039 if (ret > 0) {
66040 @@ -495,8 +496,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
66041 memset(&info, 0, sizeof(info));
66042
66043 set_fs(KERNEL_DS);
66044 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
66045 - uru ? (struct rusage __user *)&ru : NULL);
66046 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
66047 + uru ? (struct rusage __force_user *)&ru : NULL);
66048 set_fs(old_fs);
66049
66050 if ((ret < 0) || (info.si_signo == 0))
66051 @@ -626,8 +627,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
66052 oldfs = get_fs();
66053 set_fs(KERNEL_DS);
66054 err = sys_timer_settime(timer_id, flags,
66055 - (struct itimerspec __user *) &newts,
66056 - (struct itimerspec __user *) &oldts);
66057 + (struct itimerspec __force_user *) &newts,
66058 + (struct itimerspec __force_user *) &oldts);
66059 set_fs(oldfs);
66060 if (!err && old && put_compat_itimerspec(old, &oldts))
66061 return -EFAULT;
66062 @@ -644,7 +645,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
66063 oldfs = get_fs();
66064 set_fs(KERNEL_DS);
66065 err = sys_timer_gettime(timer_id,
66066 - (struct itimerspec __user *) &ts);
66067 + (struct itimerspec __force_user *) &ts);
66068 set_fs(oldfs);
66069 if (!err && put_compat_itimerspec(setting, &ts))
66070 return -EFAULT;
66071 @@ -663,7 +664,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
66072 oldfs = get_fs();
66073 set_fs(KERNEL_DS);
66074 err = sys_clock_settime(which_clock,
66075 - (struct timespec __user *) &ts);
66076 + (struct timespec __force_user *) &ts);
66077 set_fs(oldfs);
66078 return err;
66079 }
66080 @@ -678,7 +679,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
66081 oldfs = get_fs();
66082 set_fs(KERNEL_DS);
66083 err = sys_clock_gettime(which_clock,
66084 - (struct timespec __user *) &ts);
66085 + (struct timespec __force_user *) &ts);
66086 set_fs(oldfs);
66087 if (!err && put_compat_timespec(&ts, tp))
66088 return -EFAULT;
66089 @@ -698,7 +699,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
66090
66091 oldfs = get_fs();
66092 set_fs(KERNEL_DS);
66093 - ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
66094 + ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
66095 set_fs(oldfs);
66096
66097 err = compat_put_timex(utp, &txc);
66098 @@ -718,7 +719,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
66099 oldfs = get_fs();
66100 set_fs(KERNEL_DS);
66101 err = sys_clock_getres(which_clock,
66102 - (struct timespec __user *) &ts);
66103 + (struct timespec __force_user *) &ts);
66104 set_fs(oldfs);
66105 if (!err && tp && put_compat_timespec(&ts, tp))
66106 return -EFAULT;
66107 @@ -730,9 +731,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
66108 long err;
66109 mm_segment_t oldfs;
66110 struct timespec tu;
66111 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
66112 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
66113
66114 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
66115 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
66116 oldfs = get_fs();
66117 set_fs(KERNEL_DS);
66118 err = clock_nanosleep_restart(restart);
66119 @@ -764,8 +765,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
66120 oldfs = get_fs();
66121 set_fs(KERNEL_DS);
66122 err = sys_clock_nanosleep(which_clock, flags,
66123 - (struct timespec __user *) &in,
66124 - (struct timespec __user *) &out);
66125 + (struct timespec __force_user *) &in,
66126 + (struct timespec __force_user *) &out);
66127 set_fs(oldfs);
66128
66129 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
66130 diff --git a/kernel/configs.c b/kernel/configs.c
66131 index 42e8fa0..9e7406b 100644
66132 --- a/kernel/configs.c
66133 +++ b/kernel/configs.c
66134 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
66135 struct proc_dir_entry *entry;
66136
66137 /* create the current config file */
66138 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
66139 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
66140 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
66141 + &ikconfig_file_ops);
66142 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66143 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
66144 + &ikconfig_file_ops);
66145 +#endif
66146 +#else
66147 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
66148 &ikconfig_file_ops);
66149 +#endif
66150 +
66151 if (!entry)
66152 return -ENOMEM;
66153
66154 diff --git a/kernel/cred.c b/kernel/cred.c
66155 index 48c6fd3..8398912 100644
66156 --- a/kernel/cred.c
66157 +++ b/kernel/cred.c
66158 @@ -204,6 +204,15 @@ void exit_creds(struct task_struct *tsk)
66159 validate_creds(cred);
66160 put_cred(cred);
66161 }
66162 +
66163 +#ifdef CONFIG_GRKERNSEC_SETXID
66164 + cred = (struct cred *) tsk->delayed_cred;
66165 + if (cred) {
66166 + tsk->delayed_cred = NULL;
66167 + validate_creds(cred);
66168 + put_cred(cred);
66169 + }
66170 +#endif
66171 }
66172
66173 /**
66174 @@ -472,7 +481,7 @@ error_put:
66175 * Always returns 0 thus allowing this function to be tail-called at the end
66176 * of, say, sys_setgid().
66177 */
66178 -int commit_creds(struct cred *new)
66179 +static int __commit_creds(struct cred *new)
66180 {
66181 struct task_struct *task = current;
66182 const struct cred *old = task->real_cred;
66183 @@ -491,6 +500,8 @@ int commit_creds(struct cred *new)
66184
66185 get_cred(new); /* we will require a ref for the subj creds too */
66186
66187 + gr_set_role_label(task, new->uid, new->gid);
66188 +
66189 /* dumpability changes */
66190 if (old->euid != new->euid ||
66191 old->egid != new->egid ||
66192 @@ -540,6 +551,101 @@ int commit_creds(struct cred *new)
66193 put_cred(old);
66194 return 0;
66195 }
66196 +#ifdef CONFIG_GRKERNSEC_SETXID
66197 +extern int set_user(struct cred *new);
66198 +
66199 +void gr_delayed_cred_worker(void)
66200 +{
66201 + const struct cred *new = current->delayed_cred;
66202 + struct cred *ncred;
66203 +
66204 + current->delayed_cred = NULL;
66205 +
66206 + if (current_uid() && new != NULL) {
66207 + // from doing get_cred on it when queueing this
66208 + put_cred(new);
66209 + return;
66210 + } else if (new == NULL)
66211 + return;
66212 +
66213 + ncred = prepare_creds();
66214 + if (!ncred)
66215 + goto die;
66216 + // uids
66217 + ncred->uid = new->uid;
66218 + ncred->euid = new->euid;
66219 + ncred->suid = new->suid;
66220 + ncred->fsuid = new->fsuid;
66221 + // gids
66222 + ncred->gid = new->gid;
66223 + ncred->egid = new->egid;
66224 + ncred->sgid = new->sgid;
66225 + ncred->fsgid = new->fsgid;
66226 + // groups
66227 + if (set_groups(ncred, new->group_info) < 0) {
66228 + abort_creds(ncred);
66229 + goto die;
66230 + }
66231 + // caps
66232 + ncred->securebits = new->securebits;
66233 + ncred->cap_inheritable = new->cap_inheritable;
66234 + ncred->cap_permitted = new->cap_permitted;
66235 + ncred->cap_effective = new->cap_effective;
66236 + ncred->cap_bset = new->cap_bset;
66237 +
66238 + if (set_user(ncred)) {
66239 + abort_creds(ncred);
66240 + goto die;
66241 + }
66242 +
66243 + // from doing get_cred on it when queueing this
66244 + put_cred(new);
66245 +
66246 + __commit_creds(ncred);
66247 + return;
66248 +die:
66249 + // from doing get_cred on it when queueing this
66250 + put_cred(new);
66251 + do_group_exit(SIGKILL);
66252 +}
66253 +#endif
66254 +
66255 +int commit_creds(struct cred *new)
66256 +{
66257 +#ifdef CONFIG_GRKERNSEC_SETXID
66258 + int ret;
66259 + int schedule_it = 0;
66260 + struct task_struct *t;
66261 +
66262 + /* we won't get called with tasklist_lock held for writing
66263 + and interrupts disabled as the cred struct in that case is
66264 + init_cred
66265 + */
66266 + if (grsec_enable_setxid && !current_is_single_threaded() &&
66267 + !current_uid() && new->uid) {
66268 + schedule_it = 1;
66269 + }
66270 + ret = __commit_creds(new);
66271 + if (schedule_it) {
66272 + rcu_read_lock();
66273 + read_lock(&tasklist_lock);
66274 + for (t = next_thread(current); t != current;
66275 + t = next_thread(t)) {
66276 + if (t->delayed_cred == NULL) {
66277 + t->delayed_cred = get_cred(new);
66278 + set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
66279 + set_tsk_need_resched(t);
66280 + }
66281 + }
66282 + read_unlock(&tasklist_lock);
66283 + rcu_read_unlock();
66284 + }
66285 + return ret;
66286 +#else
66287 + return __commit_creds(new);
66288 +#endif
66289 +}
66290 +
66291 EXPORT_SYMBOL(commit_creds);
66292
66293 /**
66294 diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
66295 index 7fda904..59f620c 100644
66296 --- a/kernel/debug/debug_core.c
66297 +++ b/kernel/debug/debug_core.c
66298 @@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
66299 */
66300 static atomic_t masters_in_kgdb;
66301 static atomic_t slaves_in_kgdb;
66302 -static atomic_t kgdb_break_tasklet_var;
66303 +static atomic_unchecked_t kgdb_break_tasklet_var;
66304 atomic_t kgdb_setting_breakpoint;
66305
66306 struct task_struct *kgdb_usethread;
66307 @@ -129,7 +129,7 @@ int kgdb_single_step;
66308 static pid_t kgdb_sstep_pid;
66309
66310 /* to keep track of the CPU which is doing the single stepping*/
66311 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
66312 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
66313
66314 /*
66315 * If you are debugging a problem where roundup (the collection of
66316 @@ -537,7 +537,7 @@ return_normal:
66317 * kernel will only try for the value of sstep_tries before
66318 * giving up and continuing on.
66319 */
66320 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
66321 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
66322 (kgdb_info[cpu].task &&
66323 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
66324 atomic_set(&kgdb_active, -1);
66325 @@ -631,8 +631,8 @@ cpu_master_loop:
66326 }
66327
66328 kgdb_restore:
66329 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
66330 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
66331 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
66332 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
66333 if (kgdb_info[sstep_cpu].task)
66334 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
66335 else
66336 @@ -829,18 +829,18 @@ static void kgdb_unregister_callbacks(void)
66337 static void kgdb_tasklet_bpt(unsigned long ing)
66338 {
66339 kgdb_breakpoint();
66340 - atomic_set(&kgdb_break_tasklet_var, 0);
66341 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
66342 }
66343
66344 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
66345
66346 void kgdb_schedule_breakpoint(void)
66347 {
66348 - if (atomic_read(&kgdb_break_tasklet_var) ||
66349 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
66350 atomic_read(&kgdb_active) != -1 ||
66351 atomic_read(&kgdb_setting_breakpoint))
66352 return;
66353 - atomic_inc(&kgdb_break_tasklet_var);
66354 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
66355 tasklet_schedule(&kgdb_tasklet_breakpoint);
66356 }
66357 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
66358 diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
66359 index e2ae734..08a4c5c 100644
66360 --- a/kernel/debug/kdb/kdb_main.c
66361 +++ b/kernel/debug/kdb/kdb_main.c
66362 @@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const char **argv)
66363 list_for_each_entry(mod, kdb_modules, list) {
66364
66365 kdb_printf("%-20s%8u 0x%p ", mod->name,
66366 - mod->core_size, (void *)mod);
66367 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
66368 #ifdef CONFIG_MODULE_UNLOAD
66369 kdb_printf("%4ld ", module_refcount(mod));
66370 #endif
66371 @@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const char **argv)
66372 kdb_printf(" (Loading)");
66373 else
66374 kdb_printf(" (Live)");
66375 - kdb_printf(" 0x%p", mod->module_core);
66376 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
66377
66378 #ifdef CONFIG_MODULE_UNLOAD
66379 {
66380 diff --git a/kernel/events/core.c b/kernel/events/core.c
66381 index 1b5c081..c375f83 100644
66382 --- a/kernel/events/core.c
66383 +++ b/kernel/events/core.c
66384 @@ -173,7 +173,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
66385 return 0;
66386 }
66387
66388 -static atomic64_t perf_event_id;
66389 +static atomic64_unchecked_t perf_event_id;
66390
66391 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
66392 enum event_type_t event_type);
66393 @@ -2581,7 +2581,7 @@ static void __perf_event_read(void *info)
66394
66395 static inline u64 perf_event_count(struct perf_event *event)
66396 {
66397 - return local64_read(&event->count) + atomic64_read(&event->child_count);
66398 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
66399 }
66400
66401 static u64 perf_event_read(struct perf_event *event)
66402 @@ -2897,9 +2897,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
66403 mutex_lock(&event->child_mutex);
66404 total += perf_event_read(event);
66405 *enabled += event->total_time_enabled +
66406 - atomic64_read(&event->child_total_time_enabled);
66407 + atomic64_read_unchecked(&event->child_total_time_enabled);
66408 *running += event->total_time_running +
66409 - atomic64_read(&event->child_total_time_running);
66410 + atomic64_read_unchecked(&event->child_total_time_running);
66411
66412 list_for_each_entry(child, &event->child_list, child_list) {
66413 total += perf_event_read(child);
66414 @@ -3306,10 +3306,10 @@ void perf_event_update_userpage(struct perf_event *event)
66415 userpg->offset -= local64_read(&event->hw.prev_count);
66416
66417 userpg->time_enabled = enabled +
66418 - atomic64_read(&event->child_total_time_enabled);
66419 + atomic64_read_unchecked(&event->child_total_time_enabled);
66420
66421 userpg->time_running = running +
66422 - atomic64_read(&event->child_total_time_running);
66423 + atomic64_read_unchecked(&event->child_total_time_running);
66424
66425 barrier();
66426 ++userpg->lock;
66427 @@ -3738,11 +3738,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
66428 values[n++] = perf_event_count(event);
66429 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
66430 values[n++] = enabled +
66431 - atomic64_read(&event->child_total_time_enabled);
66432 + atomic64_read_unchecked(&event->child_total_time_enabled);
66433 }
66434 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
66435 values[n++] = running +
66436 - atomic64_read(&event->child_total_time_running);
66437 + atomic64_read_unchecked(&event->child_total_time_running);
66438 }
66439 if (read_format & PERF_FORMAT_ID)
66440 values[n++] = primary_event_id(event);
66441 @@ -4393,12 +4393,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
66442 * need to add enough zero bytes after the string to handle
66443 * the 64bit alignment we do later.
66444 */
66445 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
66446 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
66447 if (!buf) {
66448 name = strncpy(tmp, "//enomem", sizeof(tmp));
66449 goto got_name;
66450 }
66451 - name = d_path(&file->f_path, buf, PATH_MAX);
66452 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
66453 if (IS_ERR(name)) {
66454 name = strncpy(tmp, "//toolong", sizeof(tmp));
66455 goto got_name;
66456 @@ -5765,7 +5765,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
66457 event->parent = parent_event;
66458
66459 event->ns = get_pid_ns(current->nsproxy->pid_ns);
66460 - event->id = atomic64_inc_return(&perf_event_id);
66461 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
66462
66463 event->state = PERF_EVENT_STATE_INACTIVE;
66464
66465 @@ -6287,10 +6287,10 @@ static void sync_child_event(struct perf_event *child_event,
66466 /*
66467 * Add back the child's count to the parent's count:
66468 */
66469 - atomic64_add(child_val, &parent_event->child_count);
66470 - atomic64_add(child_event->total_time_enabled,
66471 + atomic64_add_unchecked(child_val, &parent_event->child_count);
66472 + atomic64_add_unchecked(child_event->total_time_enabled,
66473 &parent_event->child_total_time_enabled);
66474 - atomic64_add(child_event->total_time_running,
66475 + atomic64_add_unchecked(child_event->total_time_running,
66476 &parent_event->child_total_time_running);
66477
66478 /*
66479 diff --git a/kernel/exit.c b/kernel/exit.c
66480 index 46c8b14..d868958 100644
66481 --- a/kernel/exit.c
66482 +++ b/kernel/exit.c
66483 @@ -58,6 +58,10 @@
66484 #include <asm/pgtable.h>
66485 #include <asm/mmu_context.h>
66486
66487 +#ifdef CONFIG_GRKERNSEC
66488 +extern rwlock_t grsec_exec_file_lock;
66489 +#endif
66490 +
66491 static void exit_mm(struct task_struct * tsk);
66492
66493 static void __unhash_process(struct task_struct *p, bool group_dead)
66494 @@ -169,6 +173,10 @@ void release_task(struct task_struct * p)
66495 struct task_struct *leader;
66496 int zap_leader;
66497 repeat:
66498 +#ifdef CONFIG_NET
66499 + gr_del_task_from_ip_table(p);
66500 +#endif
66501 +
66502 /* don't need to get the RCU readlock here - the process is dead and
66503 * can't be modifying its own credentials. But shut RCU-lockdep up */
66504 rcu_read_lock();
66505 @@ -381,7 +389,7 @@ int allow_signal(int sig)
66506 * know it'll be handled, so that they don't get converted to
66507 * SIGKILL or just silently dropped.
66508 */
66509 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
66510 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
66511 recalc_sigpending();
66512 spin_unlock_irq(&current->sighand->siglock);
66513 return 0;
66514 @@ -417,6 +425,17 @@ void daemonize(const char *name, ...)
66515 vsnprintf(current->comm, sizeof(current->comm), name, args);
66516 va_end(args);
66517
66518 +#ifdef CONFIG_GRKERNSEC
66519 + write_lock(&grsec_exec_file_lock);
66520 + if (current->exec_file) {
66521 + fput(current->exec_file);
66522 + current->exec_file = NULL;
66523 + }
66524 + write_unlock(&grsec_exec_file_lock);
66525 +#endif
66526 +
66527 + gr_set_kernel_label(current);
66528 +
66529 /*
66530 * If we were started as result of loading a module, close all of the
66531 * user space pages. We don't need them, and if we didn't close them
66532 @@ -873,6 +892,8 @@ void do_exit(long code)
66533 struct task_struct *tsk = current;
66534 int group_dead;
66535
66536 + set_fs(USER_DS);
66537 +
66538 profile_task_exit(tsk);
66539
66540 WARN_ON(blk_needs_flush_plug(tsk));
66541 @@ -889,7 +910,6 @@ void do_exit(long code)
66542 * mm_release()->clear_child_tid() from writing to a user-controlled
66543 * kernel address.
66544 */
66545 - set_fs(USER_DS);
66546
66547 ptrace_event(PTRACE_EVENT_EXIT, code);
66548
66549 @@ -950,6 +970,9 @@ void do_exit(long code)
66550 tsk->exit_code = code;
66551 taskstats_exit(tsk, group_dead);
66552
66553 + gr_acl_handle_psacct(tsk, code);
66554 + gr_acl_handle_exit();
66555 +
66556 exit_mm(tsk);
66557
66558 if (group_dead)
66559 @@ -1066,7 +1089,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
66560 * Take down every thread in the group. This is called by fatal signals
66561 * as well as by sys_exit_group (below).
66562 */
66563 -void
66564 +__noreturn void
66565 do_group_exit(int exit_code)
66566 {
66567 struct signal_struct *sig = current->signal;
66568 diff --git a/kernel/fork.c b/kernel/fork.c
66569 index 26a7a67..a1053f9 100644
66570 --- a/kernel/fork.c
66571 +++ b/kernel/fork.c
66572 @@ -284,7 +284,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
66573 *stackend = STACK_END_MAGIC; /* for overflow detection */
66574
66575 #ifdef CONFIG_CC_STACKPROTECTOR
66576 - tsk->stack_canary = get_random_int();
66577 + tsk->stack_canary = pax_get_random_long();
66578 #endif
66579
66580 /*
66581 @@ -308,13 +308,77 @@ out:
66582 }
66583
66584 #ifdef CONFIG_MMU
66585 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
66586 +{
66587 + struct vm_area_struct *tmp;
66588 + unsigned long charge;
66589 + struct mempolicy *pol;
66590 + struct file *file;
66591 +
66592 + charge = 0;
66593 + if (mpnt->vm_flags & VM_ACCOUNT) {
66594 + unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
66595 + if (security_vm_enough_memory(len))
66596 + goto fail_nomem;
66597 + charge = len;
66598 + }
66599 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
66600 + if (!tmp)
66601 + goto fail_nomem;
66602 + *tmp = *mpnt;
66603 + tmp->vm_mm = mm;
66604 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
66605 + pol = mpol_dup(vma_policy(mpnt));
66606 + if (IS_ERR(pol))
66607 + goto fail_nomem_policy;
66608 + vma_set_policy(tmp, pol);
66609 + if (anon_vma_fork(tmp, mpnt))
66610 + goto fail_nomem_anon_vma_fork;
66611 + tmp->vm_flags &= ~VM_LOCKED;
66612 + tmp->vm_next = tmp->vm_prev = NULL;
66613 + tmp->vm_mirror = NULL;
66614 + file = tmp->vm_file;
66615 + if (file) {
66616 + struct inode *inode = file->f_path.dentry->d_inode;
66617 + struct address_space *mapping = file->f_mapping;
66618 +
66619 + get_file(file);
66620 + if (tmp->vm_flags & VM_DENYWRITE)
66621 + atomic_dec(&inode->i_writecount);
66622 + mutex_lock(&mapping->i_mmap_mutex);
66623 + if (tmp->vm_flags & VM_SHARED)
66624 + mapping->i_mmap_writable++;
66625 + flush_dcache_mmap_lock(mapping);
66626 + /* insert tmp into the share list, just after mpnt */
66627 + vma_prio_tree_add(tmp, mpnt);
66628 + flush_dcache_mmap_unlock(mapping);
66629 + mutex_unlock(&mapping->i_mmap_mutex);
66630 + }
66631 +
66632 + /*
66633 + * Clear hugetlb-related page reserves for children. This only
66634 + * affects MAP_PRIVATE mappings. Faults generated by the child
66635 + * are not guaranteed to succeed, even if read-only
66636 + */
66637 + if (is_vm_hugetlb_page(tmp))
66638 + reset_vma_resv_huge_pages(tmp);
66639 +
66640 + return tmp;
66641 +
66642 +fail_nomem_anon_vma_fork:
66643 + mpol_put(pol);
66644 +fail_nomem_policy:
66645 + kmem_cache_free(vm_area_cachep, tmp);
66646 +fail_nomem:
66647 + vm_unacct_memory(charge);
66648 + return NULL;
66649 +}
66650 +
66651 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
66652 {
66653 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
66654 struct rb_node **rb_link, *rb_parent;
66655 int retval;
66656 - unsigned long charge;
66657 - struct mempolicy *pol;
66658
66659 down_write(&oldmm->mmap_sem);
66660 flush_cache_dup_mm(oldmm);
66661 @@ -326,8 +390,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
66662 mm->locked_vm = 0;
66663 mm->mmap = NULL;
66664 mm->mmap_cache = NULL;
66665 - mm->free_area_cache = oldmm->mmap_base;
66666 - mm->cached_hole_size = ~0UL;
66667 + mm->free_area_cache = oldmm->free_area_cache;
66668 + mm->cached_hole_size = oldmm->cached_hole_size;
66669 mm->map_count = 0;
66670 cpumask_clear(mm_cpumask(mm));
66671 mm->mm_rb = RB_ROOT;
66672 @@ -343,8 +407,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
66673
66674 prev = NULL;
66675 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
66676 - struct file *file;
66677 -
66678 if (mpnt->vm_flags & VM_DONTCOPY) {
66679 long pages = vma_pages(mpnt);
66680 mm->total_vm -= pages;
66681 @@ -352,53 +414,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
66682 -pages);
66683 continue;
66684 }
66685 - charge = 0;
66686 - if (mpnt->vm_flags & VM_ACCOUNT) {
66687 - unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
66688 - if (security_vm_enough_memory(len))
66689 - goto fail_nomem;
66690 - charge = len;
66691 + tmp = dup_vma(mm, mpnt);
66692 + if (!tmp) {
66693 + retval = -ENOMEM;
66694 + goto out;
66695 }
66696 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
66697 - if (!tmp)
66698 - goto fail_nomem;
66699 - *tmp = *mpnt;
66700 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
66701 - pol = mpol_dup(vma_policy(mpnt));
66702 - retval = PTR_ERR(pol);
66703 - if (IS_ERR(pol))
66704 - goto fail_nomem_policy;
66705 - vma_set_policy(tmp, pol);
66706 - tmp->vm_mm = mm;
66707 - if (anon_vma_fork(tmp, mpnt))
66708 - goto fail_nomem_anon_vma_fork;
66709 - tmp->vm_flags &= ~VM_LOCKED;
66710 - tmp->vm_next = tmp->vm_prev = NULL;
66711 - file = tmp->vm_file;
66712 - if (file) {
66713 - struct inode *inode = file->f_path.dentry->d_inode;
66714 - struct address_space *mapping = file->f_mapping;
66715 -
66716 - get_file(file);
66717 - if (tmp->vm_flags & VM_DENYWRITE)
66718 - atomic_dec(&inode->i_writecount);
66719 - mutex_lock(&mapping->i_mmap_mutex);
66720 - if (tmp->vm_flags & VM_SHARED)
66721 - mapping->i_mmap_writable++;
66722 - flush_dcache_mmap_lock(mapping);
66723 - /* insert tmp into the share list, just after mpnt */
66724 - vma_prio_tree_add(tmp, mpnt);
66725 - flush_dcache_mmap_unlock(mapping);
66726 - mutex_unlock(&mapping->i_mmap_mutex);
66727 - }
66728 -
66729 - /*
66730 - * Clear hugetlb-related page reserves for children. This only
66731 - * affects MAP_PRIVATE mappings. Faults generated by the child
66732 - * are not guaranteed to succeed, even if read-only
66733 - */
66734 - if (is_vm_hugetlb_page(tmp))
66735 - reset_vma_resv_huge_pages(tmp);
66736
66737 /*
66738 * Link in the new vma and copy the page table entries.
66739 @@ -421,6 +441,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
66740 if (retval)
66741 goto out;
66742 }
66743 +
66744 +#ifdef CONFIG_PAX_SEGMEXEC
66745 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
66746 + struct vm_area_struct *mpnt_m;
66747 +
66748 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
66749 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
66750 +
66751 + if (!mpnt->vm_mirror)
66752 + continue;
66753 +
66754 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
66755 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
66756 + mpnt->vm_mirror = mpnt_m;
66757 + } else {
66758 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
66759 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
66760 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
66761 + mpnt->vm_mirror->vm_mirror = mpnt;
66762 + }
66763 + }
66764 + BUG_ON(mpnt_m);
66765 + }
66766 +#endif
66767 +
66768 /* a new mm has just been created */
66769 arch_dup_mmap(oldmm, mm);
66770 retval = 0;
66771 @@ -429,14 +474,6 @@ out:
66772 flush_tlb_mm(oldmm);
66773 up_write(&oldmm->mmap_sem);
66774 return retval;
66775 -fail_nomem_anon_vma_fork:
66776 - mpol_put(pol);
66777 -fail_nomem_policy:
66778 - kmem_cache_free(vm_area_cachep, tmp);
66779 -fail_nomem:
66780 - retval = -ENOMEM;
66781 - vm_unacct_memory(charge);
66782 - goto out;
66783 }
66784
66785 static inline int mm_alloc_pgd(struct mm_struct *mm)
66786 @@ -658,8 +695,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
66787 return ERR_PTR(err);
66788
66789 mm = get_task_mm(task);
66790 - if (mm && mm != current->mm &&
66791 - !ptrace_may_access(task, mode)) {
66792 + if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
66793 + (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
66794 mmput(mm);
66795 mm = ERR_PTR(-EACCES);
66796 }
66797 @@ -881,13 +918,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
66798 spin_unlock(&fs->lock);
66799 return -EAGAIN;
66800 }
66801 - fs->users++;
66802 + atomic_inc(&fs->users);
66803 spin_unlock(&fs->lock);
66804 return 0;
66805 }
66806 tsk->fs = copy_fs_struct(fs);
66807 if (!tsk->fs)
66808 return -ENOMEM;
66809 + gr_set_chroot_entries(tsk, &tsk->fs->root);
66810 return 0;
66811 }
66812
66813 @@ -1151,6 +1189,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
66814 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
66815 #endif
66816 retval = -EAGAIN;
66817 +
66818 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
66819 +
66820 if (atomic_read(&p->real_cred->user->processes) >=
66821 task_rlimit(p, RLIMIT_NPROC)) {
66822 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
66823 @@ -1306,6 +1347,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
66824 if (clone_flags & CLONE_THREAD)
66825 p->tgid = current->tgid;
66826
66827 + gr_copy_label(p);
66828 +
66829 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
66830 /*
66831 * Clear TID on mm_release()?
66832 @@ -1472,6 +1515,8 @@ bad_fork_cleanup_count:
66833 bad_fork_free:
66834 free_task(p);
66835 fork_out:
66836 + gr_log_forkfail(retval);
66837 +
66838 return ERR_PTR(retval);
66839 }
66840
66841 @@ -1572,6 +1617,8 @@ long do_fork(unsigned long clone_flags,
66842 if (clone_flags & CLONE_PARENT_SETTID)
66843 put_user(nr, parent_tidptr);
66844
66845 + gr_handle_brute_check();
66846 +
66847 if (clone_flags & CLONE_VFORK) {
66848 p->vfork_done = &vfork;
66849 init_completion(&vfork);
66850 @@ -1670,7 +1717,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
66851 return 0;
66852
66853 /* don't need lock here; in the worst case we'll do useless copy */
66854 - if (fs->users == 1)
66855 + if (atomic_read(&fs->users) == 1)
66856 return 0;
66857
66858 *new_fsp = copy_fs_struct(fs);
66859 @@ -1759,7 +1806,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
66860 fs = current->fs;
66861 spin_lock(&fs->lock);
66862 current->fs = new_fs;
66863 - if (--fs->users)
66864 + gr_set_chroot_entries(current, &current->fs->root);
66865 + if (atomic_dec_return(&fs->users))
66866 new_fs = NULL;
66867 else
66868 new_fs = fs;
66869 diff --git a/kernel/futex.c b/kernel/futex.c
66870 index 866c9d5..5c5f828 100644
66871 --- a/kernel/futex.c
66872 +++ b/kernel/futex.c
66873 @@ -54,6 +54,7 @@
66874 #include <linux/mount.h>
66875 #include <linux/pagemap.h>
66876 #include <linux/syscalls.h>
66877 +#include <linux/ptrace.h>
66878 #include <linux/signal.h>
66879 #include <linux/export.h>
66880 #include <linux/magic.h>
66881 @@ -239,6 +240,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
66882 struct page *page, *page_head;
66883 int err, ro = 0;
66884
66885 +#ifdef CONFIG_PAX_SEGMEXEC
66886 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
66887 + return -EFAULT;
66888 +#endif
66889 +
66890 /*
66891 * The futex address must be "naturally" aligned.
66892 */
66893 @@ -2721,6 +2727,7 @@ static int __init futex_init(void)
66894 {
66895 u32 curval;
66896 int i;
66897 + mm_segment_t oldfs;
66898
66899 /*
66900 * This will fail and we want it. Some arch implementations do
66901 @@ -2732,8 +2739,11 @@ static int __init futex_init(void)
66902 * implementation, the non-functional ones will return
66903 * -ENOSYS.
66904 */
66905 + oldfs = get_fs();
66906 + set_fs(USER_DS);
66907 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
66908 futex_cmpxchg_enabled = 1;
66909 + set_fs(oldfs);
66910
66911 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
66912 plist_head_init(&futex_queues[i].chain);
66913 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
66914 index 9b22d03..6295b62 100644
66915 --- a/kernel/gcov/base.c
66916 +++ b/kernel/gcov/base.c
66917 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
66918 }
66919
66920 #ifdef CONFIG_MODULES
66921 -static inline int within(void *addr, void *start, unsigned long size)
66922 -{
66923 - return ((addr >= start) && (addr < start + size));
66924 -}
66925 -
66926 /* Update list and generate events when modules are unloaded. */
66927 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
66928 void *data)
66929 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
66930 prev = NULL;
66931 /* Remove entries located in module from linked list. */
66932 for (info = gcov_info_head; info; info = info->next) {
66933 - if (within(info, mod->module_core, mod->core_size)) {
66934 + if (within_module_core_rw((unsigned long)info, mod)) {
66935 if (prev)
66936 prev->next = info->next;
66937 else
66938 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
66939 index ae34bf5..4e2f3d0 100644
66940 --- a/kernel/hrtimer.c
66941 +++ b/kernel/hrtimer.c
66942 @@ -1393,7 +1393,7 @@ void hrtimer_peek_ahead_timers(void)
66943 local_irq_restore(flags);
66944 }
66945
66946 -static void run_hrtimer_softirq(struct softirq_action *h)
66947 +static void run_hrtimer_softirq(void)
66948 {
66949 hrtimer_peek_ahead_timers();
66950 }
66951 diff --git a/kernel/jump_label.c b/kernel/jump_label.c
66952 index 01d3b70..9e4d098 100644
66953 --- a/kernel/jump_label.c
66954 +++ b/kernel/jump_label.c
66955 @@ -55,7 +55,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
66956
66957 size = (((unsigned long)stop - (unsigned long)start)
66958 / sizeof(struct jump_entry));
66959 + pax_open_kernel();
66960 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
66961 + pax_close_kernel();
66962 }
66963
66964 static void jump_label_update(struct jump_label_key *key, int enable);
66965 @@ -340,10 +342,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
66966 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
66967 struct jump_entry *iter;
66968
66969 + pax_open_kernel();
66970 for (iter = iter_start; iter < iter_stop; iter++) {
66971 if (within_module_init(iter->code, mod))
66972 iter->code = 0;
66973 }
66974 + pax_close_kernel();
66975 }
66976
66977 static int
66978 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
66979 index 079f1d3..a407562 100644
66980 --- a/kernel/kallsyms.c
66981 +++ b/kernel/kallsyms.c
66982 @@ -11,6 +11,9 @@
66983 * Changed the compression method from stem compression to "table lookup"
66984 * compression (see scripts/kallsyms.c for a more complete description)
66985 */
66986 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66987 +#define __INCLUDED_BY_HIDESYM 1
66988 +#endif
66989 #include <linux/kallsyms.h>
66990 #include <linux/module.h>
66991 #include <linux/init.h>
66992 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
66993
66994 static inline int is_kernel_inittext(unsigned long addr)
66995 {
66996 + if (system_state != SYSTEM_BOOTING)
66997 + return 0;
66998 +
66999 if (addr >= (unsigned long)_sinittext
67000 && addr <= (unsigned long)_einittext)
67001 return 1;
67002 return 0;
67003 }
67004
67005 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67006 +#ifdef CONFIG_MODULES
67007 +static inline int is_module_text(unsigned long addr)
67008 +{
67009 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
67010 + return 1;
67011 +
67012 + addr = ktla_ktva(addr);
67013 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
67014 +}
67015 +#else
67016 +static inline int is_module_text(unsigned long addr)
67017 +{
67018 + return 0;
67019 +}
67020 +#endif
67021 +#endif
67022 +
67023 static inline int is_kernel_text(unsigned long addr)
67024 {
67025 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
67026 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
67027
67028 static inline int is_kernel(unsigned long addr)
67029 {
67030 +
67031 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67032 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
67033 + return 1;
67034 +
67035 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
67036 +#else
67037 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
67038 +#endif
67039 +
67040 return 1;
67041 return in_gate_area_no_mm(addr);
67042 }
67043
67044 static int is_ksym_addr(unsigned long addr)
67045 {
67046 +
67047 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67048 + if (is_module_text(addr))
67049 + return 0;
67050 +#endif
67051 +
67052 if (all_var)
67053 return is_kernel(addr);
67054
67055 @@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
67056
67057 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
67058 {
67059 - iter->name[0] = '\0';
67060 iter->nameoff = get_symbol_offset(new_pos);
67061 iter->pos = new_pos;
67062 }
67063 @@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
67064 {
67065 struct kallsym_iter *iter = m->private;
67066
67067 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67068 + if (current_uid())
67069 + return 0;
67070 +#endif
67071 +
67072 /* Some debugging symbols have no name. Ignore them. */
67073 if (!iter->name[0])
67074 return 0;
67075 @@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
67076 struct kallsym_iter *iter;
67077 int ret;
67078
67079 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
67080 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
67081 if (!iter)
67082 return -ENOMEM;
67083 reset_iter(iter, 0);
67084 diff --git a/kernel/kexec.c b/kernel/kexec.c
67085 index 7b08867..3bac516 100644
67086 --- a/kernel/kexec.c
67087 +++ b/kernel/kexec.c
67088 @@ -1047,7 +1047,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
67089 unsigned long flags)
67090 {
67091 struct compat_kexec_segment in;
67092 - struct kexec_segment out, __user *ksegments;
67093 + struct kexec_segment out;
67094 + struct kexec_segment __user *ksegments;
67095 unsigned long i, result;
67096
67097 /* Don't allow clients that don't understand the native
67098 diff --git a/kernel/kmod.c b/kernel/kmod.c
67099 index a3a46cb..f2e42f8 100644
67100 --- a/kernel/kmod.c
67101 +++ b/kernel/kmod.c
67102 @@ -75,13 +75,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
67103 * If module auto-loading support is disabled then this function
67104 * becomes a no-operation.
67105 */
67106 -int __request_module(bool wait, const char *fmt, ...)
67107 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
67108 {
67109 - va_list args;
67110 char module_name[MODULE_NAME_LEN];
67111 unsigned int max_modprobes;
67112 int ret;
67113 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
67114 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
67115 static char *envp[] = { "HOME=/",
67116 "TERM=linux",
67117 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
67118 @@ -90,9 +89,7 @@ int __request_module(bool wait, const char *fmt, ...)
67119 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
67120 static int kmod_loop_msg;
67121
67122 - va_start(args, fmt);
67123 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
67124 - va_end(args);
67125 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
67126 if (ret >= MODULE_NAME_LEN)
67127 return -ENAMETOOLONG;
67128
67129 @@ -100,6 +97,20 @@ int __request_module(bool wait, const char *fmt, ...)
67130 if (ret)
67131 return ret;
67132
67133 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
67134 + if (!current_uid()) {
67135 + /* hack to workaround consolekit/udisks stupidity */
67136 + read_lock(&tasklist_lock);
67137 + if (!strcmp(current->comm, "mount") &&
67138 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
67139 + read_unlock(&tasklist_lock);
67140 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
67141 + return -EPERM;
67142 + }
67143 + read_unlock(&tasklist_lock);
67144 + }
67145 +#endif
67146 +
67147 /* If modprobe needs a service that is in a module, we get a recursive
67148 * loop. Limit the number of running kmod threads to max_threads/2 or
67149 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
67150 @@ -135,6 +146,47 @@ int __request_module(bool wait, const char *fmt, ...)
67151 atomic_dec(&kmod_concurrent);
67152 return ret;
67153 }
67154 +
67155 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
67156 +{
67157 + va_list args;
67158 + int ret;
67159 +
67160 + va_start(args, fmt);
67161 + ret = ____request_module(wait, module_param, fmt, args);
67162 + va_end(args);
67163 +
67164 + return ret;
67165 +}
67166 +
67167 +int __request_module(bool wait, const char *fmt, ...)
67168 +{
67169 + va_list args;
67170 + int ret;
67171 +
67172 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
67173 + if (current_uid()) {
67174 + char module_param[MODULE_NAME_LEN];
67175 +
67176 + memset(module_param, 0, sizeof(module_param));
67177 +
67178 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
67179 +
67180 + va_start(args, fmt);
67181 + ret = ____request_module(wait, module_param, fmt, args);
67182 + va_end(args);
67183 +
67184 + return ret;
67185 + }
67186 +#endif
67187 +
67188 + va_start(args, fmt);
67189 + ret = ____request_module(wait, NULL, fmt, args);
67190 + va_end(args);
67191 +
67192 + return ret;
67193 +}
67194 +
67195 EXPORT_SYMBOL(__request_module);
67196 #endif /* CONFIG_MODULES */
67197
67198 @@ -224,7 +276,7 @@ static int wait_for_helper(void *data)
67199 *
67200 * Thus the __user pointer cast is valid here.
67201 */
67202 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
67203 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
67204
67205 /*
67206 * If ret is 0, either ____call_usermodehelper failed and the
67207 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
67208 index c62b854..cb67968 100644
67209 --- a/kernel/kprobes.c
67210 +++ b/kernel/kprobes.c
67211 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
67212 * kernel image and loaded module images reside. This is required
67213 * so x86_64 can correctly handle the %rip-relative fixups.
67214 */
67215 - kip->insns = module_alloc(PAGE_SIZE);
67216 + kip->insns = module_alloc_exec(PAGE_SIZE);
67217 if (!kip->insns) {
67218 kfree(kip);
67219 return NULL;
67220 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
67221 */
67222 if (!list_is_singular(&kip->list)) {
67223 list_del(&kip->list);
67224 - module_free(NULL, kip->insns);
67225 + module_free_exec(NULL, kip->insns);
67226 kfree(kip);
67227 }
67228 return 1;
67229 @@ -1955,7 +1955,7 @@ static int __init init_kprobes(void)
67230 {
67231 int i, err = 0;
67232 unsigned long offset = 0, size = 0;
67233 - char *modname, namebuf[128];
67234 + char *modname, namebuf[KSYM_NAME_LEN];
67235 const char *symbol_name;
67236 void *addr;
67237 struct kprobe_blackpoint *kb;
67238 @@ -2081,7 +2081,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
67239 const char *sym = NULL;
67240 unsigned int i = *(loff_t *) v;
67241 unsigned long offset = 0;
67242 - char *modname, namebuf[128];
67243 + char *modname, namebuf[KSYM_NAME_LEN];
67244
67245 head = &kprobe_table[i];
67246 preempt_disable();
67247 diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
67248 index 4e316e1..5501eef 100644
67249 --- a/kernel/ksysfs.c
67250 +++ b/kernel/ksysfs.c
67251 @@ -47,6 +47,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
67252 {
67253 if (count+1 > UEVENT_HELPER_PATH_LEN)
67254 return -ENOENT;
67255 + if (!capable(CAP_SYS_ADMIN))
67256 + return -EPERM;
67257 memcpy(uevent_helper, buf, count);
67258 uevent_helper[count] = '\0';
67259 if (count && uevent_helper[count-1] == '\n')
67260 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
67261 index 8889f7d..95319b7 100644
67262 --- a/kernel/lockdep.c
67263 +++ b/kernel/lockdep.c
67264 @@ -590,6 +590,10 @@ static int static_obj(void *obj)
67265 end = (unsigned long) &_end,
67266 addr = (unsigned long) obj;
67267
67268 +#ifdef CONFIG_PAX_KERNEXEC
67269 + start = ktla_ktva(start);
67270 +#endif
67271 +
67272 /*
67273 * static variable?
67274 */
67275 @@ -730,6 +734,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
67276 if (!static_obj(lock->key)) {
67277 debug_locks_off();
67278 printk("INFO: trying to register non-static key.\n");
67279 + printk("lock:%pS key:%pS.\n", lock, lock->key);
67280 printk("the code is fine but needs lockdep annotation.\n");
67281 printk("turning off the locking correctness validator.\n");
67282 dump_stack();
67283 @@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
67284 if (!class)
67285 return 0;
67286 }
67287 - atomic_inc((atomic_t *)&class->ops);
67288 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
67289 if (very_verbose(class)) {
67290 printk("\nacquire class [%p] %s", class->key, class->name);
67291 if (class->name_version > 1)
67292 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
67293 index 91c32a0..b2c71c5 100644
67294 --- a/kernel/lockdep_proc.c
67295 +++ b/kernel/lockdep_proc.c
67296 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
67297
67298 static void print_name(struct seq_file *m, struct lock_class *class)
67299 {
67300 - char str[128];
67301 + char str[KSYM_NAME_LEN];
67302 const char *name = class->name;
67303
67304 if (!name) {
67305 diff --git a/kernel/module.c b/kernel/module.c
67306 index 3d56b6f..2a22bd0 100644
67307 --- a/kernel/module.c
67308 +++ b/kernel/module.c
67309 @@ -58,6 +58,7 @@
67310 #include <linux/jump_label.h>
67311 #include <linux/pfn.h>
67312 #include <linux/bsearch.h>
67313 +#include <linux/grsecurity.h>
67314
67315 #define CREATE_TRACE_POINTS
67316 #include <trace/events/module.h>
67317 @@ -113,7 +114,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
67318
67319 /* Bounds of module allocation, for speeding __module_address.
67320 * Protected by module_mutex. */
67321 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
67322 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
67323 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
67324
67325 int register_module_notifier(struct notifier_block * nb)
67326 {
67327 @@ -277,7 +279,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
67328 return true;
67329
67330 list_for_each_entry_rcu(mod, &modules, list) {
67331 - struct symsearch arr[] = {
67332 + struct symsearch modarr[] = {
67333 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
67334 NOT_GPL_ONLY, false },
67335 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
67336 @@ -299,7 +301,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
67337 #endif
67338 };
67339
67340 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
67341 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
67342 return true;
67343 }
67344 return false;
67345 @@ -431,7 +433,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
67346 static int percpu_modalloc(struct module *mod,
67347 unsigned long size, unsigned long align)
67348 {
67349 - if (align > PAGE_SIZE) {
67350 + if (align-1 >= PAGE_SIZE) {
67351 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
67352 mod->name, align, PAGE_SIZE);
67353 align = PAGE_SIZE;
67354 @@ -1001,7 +1003,7 @@ struct module_attribute module_uevent =
67355 static ssize_t show_coresize(struct module_attribute *mattr,
67356 struct module_kobject *mk, char *buffer)
67357 {
67358 - return sprintf(buffer, "%u\n", mk->mod->core_size);
67359 + return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
67360 }
67361
67362 static struct module_attribute modinfo_coresize =
67363 @@ -1010,7 +1012,7 @@ static struct module_attribute modinfo_coresize =
67364 static ssize_t show_initsize(struct module_attribute *mattr,
67365 struct module_kobject *mk, char *buffer)
67366 {
67367 - return sprintf(buffer, "%u\n", mk->mod->init_size);
67368 + return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
67369 }
67370
67371 static struct module_attribute modinfo_initsize =
67372 @@ -1224,7 +1226,7 @@ resolve_symbol_wait(struct module *mod,
67373 */
67374 #ifdef CONFIG_SYSFS
67375
67376 -#ifdef CONFIG_KALLSYMS
67377 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67378 static inline bool sect_empty(const Elf_Shdr *sect)
67379 {
67380 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
67381 @@ -1690,21 +1692,21 @@ static void set_section_ro_nx(void *base,
67382
67383 static void unset_module_core_ro_nx(struct module *mod)
67384 {
67385 - set_page_attributes(mod->module_core + mod->core_text_size,
67386 - mod->module_core + mod->core_size,
67387 + set_page_attributes(mod->module_core_rw,
67388 + mod->module_core_rw + mod->core_size_rw,
67389 set_memory_x);
67390 - set_page_attributes(mod->module_core,
67391 - mod->module_core + mod->core_ro_size,
67392 + set_page_attributes(mod->module_core_rx,
67393 + mod->module_core_rx + mod->core_size_rx,
67394 set_memory_rw);
67395 }
67396
67397 static void unset_module_init_ro_nx(struct module *mod)
67398 {
67399 - set_page_attributes(mod->module_init + mod->init_text_size,
67400 - mod->module_init + mod->init_size,
67401 + set_page_attributes(mod->module_init_rw,
67402 + mod->module_init_rw + mod->init_size_rw,
67403 set_memory_x);
67404 - set_page_attributes(mod->module_init,
67405 - mod->module_init + mod->init_ro_size,
67406 + set_page_attributes(mod->module_init_rx,
67407 + mod->module_init_rx + mod->init_size_rx,
67408 set_memory_rw);
67409 }
67410
67411 @@ -1715,14 +1717,14 @@ void set_all_modules_text_rw(void)
67412
67413 mutex_lock(&module_mutex);
67414 list_for_each_entry_rcu(mod, &modules, list) {
67415 - if ((mod->module_core) && (mod->core_text_size)) {
67416 - set_page_attributes(mod->module_core,
67417 - mod->module_core + mod->core_text_size,
67418 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
67419 + set_page_attributes(mod->module_core_rx,
67420 + mod->module_core_rx + mod->core_size_rx,
67421 set_memory_rw);
67422 }
67423 - if ((mod->module_init) && (mod->init_text_size)) {
67424 - set_page_attributes(mod->module_init,
67425 - mod->module_init + mod->init_text_size,
67426 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
67427 + set_page_attributes(mod->module_init_rx,
67428 + mod->module_init_rx + mod->init_size_rx,
67429 set_memory_rw);
67430 }
67431 }
67432 @@ -1736,14 +1738,14 @@ void set_all_modules_text_ro(void)
67433
67434 mutex_lock(&module_mutex);
67435 list_for_each_entry_rcu(mod, &modules, list) {
67436 - if ((mod->module_core) && (mod->core_text_size)) {
67437 - set_page_attributes(mod->module_core,
67438 - mod->module_core + mod->core_text_size,
67439 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
67440 + set_page_attributes(mod->module_core_rx,
67441 + mod->module_core_rx + mod->core_size_rx,
67442 set_memory_ro);
67443 }
67444 - if ((mod->module_init) && (mod->init_text_size)) {
67445 - set_page_attributes(mod->module_init,
67446 - mod->module_init + mod->init_text_size,
67447 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
67448 + set_page_attributes(mod->module_init_rx,
67449 + mod->module_init_rx + mod->init_size_rx,
67450 set_memory_ro);
67451 }
67452 }
67453 @@ -1789,16 +1791,19 @@ static void free_module(struct module *mod)
67454
67455 /* This may be NULL, but that's OK */
67456 unset_module_init_ro_nx(mod);
67457 - module_free(mod, mod->module_init);
67458 + module_free(mod, mod->module_init_rw);
67459 + module_free_exec(mod, mod->module_init_rx);
67460 kfree(mod->args);
67461 percpu_modfree(mod);
67462
67463 /* Free lock-classes: */
67464 - lockdep_free_key_range(mod->module_core, mod->core_size);
67465 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
67466 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
67467
67468 /* Finally, free the core (containing the module structure) */
67469 unset_module_core_ro_nx(mod);
67470 - module_free(mod, mod->module_core);
67471 + module_free_exec(mod, mod->module_core_rx);
67472 + module_free(mod, mod->module_core_rw);
67473
67474 #ifdef CONFIG_MPU
67475 update_protections(current->mm);
67476 @@ -1867,10 +1872,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
67477 unsigned int i;
67478 int ret = 0;
67479 const struct kernel_symbol *ksym;
67480 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
67481 + int is_fs_load = 0;
67482 + int register_filesystem_found = 0;
67483 + char *p;
67484 +
67485 + p = strstr(mod->args, "grsec_modharden_fs");
67486 + if (p) {
67487 + char *endptr = p + strlen("grsec_modharden_fs");
67488 + /* copy \0 as well */
67489 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
67490 + is_fs_load = 1;
67491 + }
67492 +#endif
67493
67494 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
67495 const char *name = info->strtab + sym[i].st_name;
67496
67497 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
67498 + /* it's a real shame this will never get ripped and copied
67499 + upstream! ;(
67500 + */
67501 + if (is_fs_load && !strcmp(name, "register_filesystem"))
67502 + register_filesystem_found = 1;
67503 +#endif
67504 +
67505 switch (sym[i].st_shndx) {
67506 case SHN_COMMON:
67507 /* We compiled with -fno-common. These are not
67508 @@ -1891,7 +1917,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
67509 ksym = resolve_symbol_wait(mod, info, name);
67510 /* Ok if resolved. */
67511 if (ksym && !IS_ERR(ksym)) {
67512 + pax_open_kernel();
67513 sym[i].st_value = ksym->value;
67514 + pax_close_kernel();
67515 break;
67516 }
67517
67518 @@ -1910,11 +1938,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
67519 secbase = (unsigned long)mod_percpu(mod);
67520 else
67521 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
67522 + pax_open_kernel();
67523 sym[i].st_value += secbase;
67524 + pax_close_kernel();
67525 break;
67526 }
67527 }
67528
67529 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
67530 + if (is_fs_load && !register_filesystem_found) {
67531 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
67532 + ret = -EPERM;
67533 + }
67534 +#endif
67535 +
67536 return ret;
67537 }
67538
67539 @@ -2018,22 +2055,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
67540 || s->sh_entsize != ~0UL
67541 || strstarts(sname, ".init"))
67542 continue;
67543 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
67544 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
67545 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
67546 + else
67547 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
67548 pr_debug("\t%s\n", sname);
67549 }
67550 - switch (m) {
67551 - case 0: /* executable */
67552 - mod->core_size = debug_align(mod->core_size);
67553 - mod->core_text_size = mod->core_size;
67554 - break;
67555 - case 1: /* RO: text and ro-data */
67556 - mod->core_size = debug_align(mod->core_size);
67557 - mod->core_ro_size = mod->core_size;
67558 - break;
67559 - case 3: /* whole core */
67560 - mod->core_size = debug_align(mod->core_size);
67561 - break;
67562 - }
67563 }
67564
67565 pr_debug("Init section allocation order:\n");
67566 @@ -2047,23 +2074,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
67567 || s->sh_entsize != ~0UL
67568 || !strstarts(sname, ".init"))
67569 continue;
67570 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
67571 - | INIT_OFFSET_MASK);
67572 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
67573 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
67574 + else
67575 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
67576 + s->sh_entsize |= INIT_OFFSET_MASK;
67577 pr_debug("\t%s\n", sname);
67578 }
67579 - switch (m) {
67580 - case 0: /* executable */
67581 - mod->init_size = debug_align(mod->init_size);
67582 - mod->init_text_size = mod->init_size;
67583 - break;
67584 - case 1: /* RO: text and ro-data */
67585 - mod->init_size = debug_align(mod->init_size);
67586 - mod->init_ro_size = mod->init_size;
67587 - break;
67588 - case 3: /* whole init */
67589 - mod->init_size = debug_align(mod->init_size);
67590 - break;
67591 - }
67592 }
67593 }
67594
67595 @@ -2235,7 +2252,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
67596
67597 /* Put symbol section at end of init part of module. */
67598 symsect->sh_flags |= SHF_ALLOC;
67599 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
67600 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
67601 info->index.sym) | INIT_OFFSET_MASK;
67602 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
67603
67604 @@ -2250,13 +2267,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
67605 }
67606
67607 /* Append room for core symbols at end of core part. */
67608 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
67609 - info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
67610 - mod->core_size += strtab_size;
67611 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
67612 + info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
67613 + mod->core_size_rx += strtab_size;
67614
67615 /* Put string table section at end of init part of module. */
67616 strsect->sh_flags |= SHF_ALLOC;
67617 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
67618 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
67619 info->index.str) | INIT_OFFSET_MASK;
67620 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
67621 }
67622 @@ -2274,12 +2291,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
67623 /* Make sure we get permanent strtab: don't use info->strtab. */
67624 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
67625
67626 + pax_open_kernel();
67627 +
67628 /* Set types up while we still have access to sections. */
67629 for (i = 0; i < mod->num_symtab; i++)
67630 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
67631
67632 - mod->core_symtab = dst = mod->module_core + info->symoffs;
67633 - mod->core_strtab = s = mod->module_core + info->stroffs;
67634 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
67635 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
67636 src = mod->symtab;
67637 *dst = *src;
67638 *s++ = 0;
67639 @@ -2292,6 +2311,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
67640 s += strlcpy(s, &mod->strtab[src->st_name], KSYM_NAME_LEN) + 1;
67641 }
67642 mod->core_num_syms = ndst;
67643 +
67644 + pax_close_kernel();
67645 }
67646 #else
67647 static inline void layout_symtab(struct module *mod, struct load_info *info)
67648 @@ -2325,17 +2346,33 @@ void * __weak module_alloc(unsigned long size)
67649 return size == 0 ? NULL : vmalloc_exec(size);
67650 }
67651
67652 -static void *module_alloc_update_bounds(unsigned long size)
67653 +static void *module_alloc_update_bounds_rw(unsigned long size)
67654 {
67655 void *ret = module_alloc(size);
67656
67657 if (ret) {
67658 mutex_lock(&module_mutex);
67659 /* Update module bounds. */
67660 - if ((unsigned long)ret < module_addr_min)
67661 - module_addr_min = (unsigned long)ret;
67662 - if ((unsigned long)ret + size > module_addr_max)
67663 - module_addr_max = (unsigned long)ret + size;
67664 + if ((unsigned long)ret < module_addr_min_rw)
67665 + module_addr_min_rw = (unsigned long)ret;
67666 + if ((unsigned long)ret + size > module_addr_max_rw)
67667 + module_addr_max_rw = (unsigned long)ret + size;
67668 + mutex_unlock(&module_mutex);
67669 + }
67670 + return ret;
67671 +}
67672 +
67673 +static void *module_alloc_update_bounds_rx(unsigned long size)
67674 +{
67675 + void *ret = module_alloc_exec(size);
67676 +
67677 + if (ret) {
67678 + mutex_lock(&module_mutex);
67679 + /* Update module bounds. */
67680 + if ((unsigned long)ret < module_addr_min_rx)
67681 + module_addr_min_rx = (unsigned long)ret;
67682 + if ((unsigned long)ret + size > module_addr_max_rx)
67683 + module_addr_max_rx = (unsigned long)ret + size;
67684 mutex_unlock(&module_mutex);
67685 }
67686 return ret;
67687 @@ -2512,8 +2549,14 @@ static struct module *setup_load_info(struct load_info *info)
67688 static int check_modinfo(struct module *mod, struct load_info *info)
67689 {
67690 const char *modmagic = get_modinfo(info, "vermagic");
67691 + const char *license = get_modinfo(info, "license");
67692 int err;
67693
67694 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
67695 + if (!license || !license_is_gpl_compatible(license))
67696 + return -ENOEXEC;
67697 +#endif
67698 +
67699 /* This is allowed: modprobe --force will invalidate it. */
67700 if (!modmagic) {
67701 err = try_to_force_load(mod, "bad vermagic");
67702 @@ -2536,7 +2579,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
67703 }
67704
67705 /* Set up license info based on the info section */
67706 - set_license(mod, get_modinfo(info, "license"));
67707 + set_license(mod, license);
67708
67709 return 0;
67710 }
67711 @@ -2630,7 +2673,7 @@ static int move_module(struct module *mod, struct load_info *info)
67712 void *ptr;
67713
67714 /* Do the allocs. */
67715 - ptr = module_alloc_update_bounds(mod->core_size);
67716 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
67717 /*
67718 * The pointer to this block is stored in the module structure
67719 * which is inside the block. Just mark it as not being a
67720 @@ -2640,23 +2683,50 @@ static int move_module(struct module *mod, struct load_info *info)
67721 if (!ptr)
67722 return -ENOMEM;
67723
67724 - memset(ptr, 0, mod->core_size);
67725 - mod->module_core = ptr;
67726 + memset(ptr, 0, mod->core_size_rw);
67727 + mod->module_core_rw = ptr;
67728
67729 - ptr = module_alloc_update_bounds(mod->init_size);
67730 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
67731 /*
67732 * The pointer to this block is stored in the module structure
67733 * which is inside the block. This block doesn't need to be
67734 * scanned as it contains data and code that will be freed
67735 * after the module is initialized.
67736 */
67737 - kmemleak_ignore(ptr);
67738 - if (!ptr && mod->init_size) {
67739 - module_free(mod, mod->module_core);
67740 + kmemleak_not_leak(ptr);
67741 + if (!ptr && mod->init_size_rw) {
67742 + module_free(mod, mod->module_core_rw);
67743 return -ENOMEM;
67744 }
67745 - memset(ptr, 0, mod->init_size);
67746 - mod->module_init = ptr;
67747 + memset(ptr, 0, mod->init_size_rw);
67748 + mod->module_init_rw = ptr;
67749 +
67750 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
67751 + kmemleak_not_leak(ptr);
67752 + if (!ptr) {
67753 + module_free(mod, mod->module_init_rw);
67754 + module_free(mod, mod->module_core_rw);
67755 + return -ENOMEM;
67756 + }
67757 +
67758 + pax_open_kernel();
67759 + memset(ptr, 0, mod->core_size_rx);
67760 + pax_close_kernel();
67761 + mod->module_core_rx = ptr;
67762 +
67763 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
67764 + kmemleak_not_leak(ptr);
67765 + if (!ptr && mod->init_size_rx) {
67766 + module_free_exec(mod, mod->module_core_rx);
67767 + module_free(mod, mod->module_init_rw);
67768 + module_free(mod, mod->module_core_rw);
67769 + return -ENOMEM;
67770 + }
67771 +
67772 + pax_open_kernel();
67773 + memset(ptr, 0, mod->init_size_rx);
67774 + pax_close_kernel();
67775 + mod->module_init_rx = ptr;
67776
67777 /* Transfer each section which specifies SHF_ALLOC */
67778 pr_debug("final section addresses:\n");
67779 @@ -2667,16 +2737,45 @@ static int move_module(struct module *mod, struct load_info *info)
67780 if (!(shdr->sh_flags & SHF_ALLOC))
67781 continue;
67782
67783 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
67784 - dest = mod->module_init
67785 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
67786 - else
67787 - dest = mod->module_core + shdr->sh_entsize;
67788 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
67789 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
67790 + dest = mod->module_init_rw
67791 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
67792 + else
67793 + dest = mod->module_init_rx
67794 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
67795 + } else {
67796 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
67797 + dest = mod->module_core_rw + shdr->sh_entsize;
67798 + else
67799 + dest = mod->module_core_rx + shdr->sh_entsize;
67800 + }
67801 +
67802 + if (shdr->sh_type != SHT_NOBITS) {
67803 +
67804 +#ifdef CONFIG_PAX_KERNEXEC
67805 +#ifdef CONFIG_X86_64
67806 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
67807 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
67808 +#endif
67809 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
67810 + pax_open_kernel();
67811 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
67812 + pax_close_kernel();
67813 + } else
67814 +#endif
67815
67816 - if (shdr->sh_type != SHT_NOBITS)
67817 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
67818 + }
67819 /* Update sh_addr to point to copy in image. */
67820 - shdr->sh_addr = (unsigned long)dest;
67821 +
67822 +#ifdef CONFIG_PAX_KERNEXEC
67823 + if (shdr->sh_flags & SHF_EXECINSTR)
67824 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
67825 + else
67826 +#endif
67827 +
67828 + shdr->sh_addr = (unsigned long)dest;
67829 pr_debug("\t0x%lx %s\n",
67830 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
67831 }
67832 @@ -2727,12 +2826,12 @@ static void flush_module_icache(const struct module *mod)
67833 * Do it before processing of module parameters, so the module
67834 * can provide parameter accessor functions of its own.
67835 */
67836 - if (mod->module_init)
67837 - flush_icache_range((unsigned long)mod->module_init,
67838 - (unsigned long)mod->module_init
67839 - + mod->init_size);
67840 - flush_icache_range((unsigned long)mod->module_core,
67841 - (unsigned long)mod->module_core + mod->core_size);
67842 + if (mod->module_init_rx)
67843 + flush_icache_range((unsigned long)mod->module_init_rx,
67844 + (unsigned long)mod->module_init_rx
67845 + + mod->init_size_rx);
67846 + flush_icache_range((unsigned long)mod->module_core_rx,
67847 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
67848
67849 set_fs(old_fs);
67850 }
67851 @@ -2802,8 +2901,10 @@ out:
67852 static void module_deallocate(struct module *mod, struct load_info *info)
67853 {
67854 percpu_modfree(mod);
67855 - module_free(mod, mod->module_init);
67856 - module_free(mod, mod->module_core);
67857 + module_free_exec(mod, mod->module_init_rx);
67858 + module_free_exec(mod, mod->module_core_rx);
67859 + module_free(mod, mod->module_init_rw);
67860 + module_free(mod, mod->module_core_rw);
67861 }
67862
67863 int __weak module_finalize(const Elf_Ehdr *hdr,
67864 @@ -2867,9 +2968,38 @@ static struct module *load_module(void __user *umod,
67865 if (err)
67866 goto free_unload;
67867
67868 + /* Now copy in args */
67869 + mod->args = strndup_user(uargs, ~0UL >> 1);
67870 + if (IS_ERR(mod->args)) {
67871 + err = PTR_ERR(mod->args);
67872 + goto free_unload;
67873 + }
67874 +
67875 /* Set up MODINFO_ATTR fields */
67876 setup_modinfo(mod, &info);
67877
67878 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
67879 + {
67880 + char *p, *p2;
67881 +
67882 + if (strstr(mod->args, "grsec_modharden_netdev")) {
67883 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
67884 + err = -EPERM;
67885 + goto free_modinfo;
67886 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
67887 + p += strlen("grsec_modharden_normal");
67888 + p2 = strstr(p, "_");
67889 + if (p2) {
67890 + *p2 = '\0';
67891 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
67892 + *p2 = '_';
67893 + }
67894 + err = -EPERM;
67895 + goto free_modinfo;
67896 + }
67897 + }
67898 +#endif
67899 +
67900 /* Fix up syms, so that st_value is a pointer to location. */
67901 err = simplify_symbols(mod, &info);
67902 if (err < 0)
67903 @@ -2885,13 +3015,6 @@ static struct module *load_module(void __user *umod,
67904
67905 flush_module_icache(mod);
67906
67907 - /* Now copy in args */
67908 - mod->args = strndup_user(uargs, ~0UL >> 1);
67909 - if (IS_ERR(mod->args)) {
67910 - err = PTR_ERR(mod->args);
67911 - goto free_arch_cleanup;
67912 - }
67913 -
67914 /* Mark state as coming so strong_try_module_get() ignores us. */
67915 mod->state = MODULE_STATE_COMING;
67916
67917 @@ -2948,11 +3071,10 @@ static struct module *load_module(void __user *umod,
67918 unlock:
67919 mutex_unlock(&module_mutex);
67920 synchronize_sched();
67921 - kfree(mod->args);
67922 - free_arch_cleanup:
67923 module_arch_cleanup(mod);
67924 free_modinfo:
67925 free_modinfo(mod);
67926 + kfree(mod->args);
67927 free_unload:
67928 module_unload_free(mod);
67929 free_module:
67930 @@ -2993,16 +3115,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
67931 MODULE_STATE_COMING, mod);
67932
67933 /* Set RO and NX regions for core */
67934 - set_section_ro_nx(mod->module_core,
67935 - mod->core_text_size,
67936 - mod->core_ro_size,
67937 - mod->core_size);
67938 + set_section_ro_nx(mod->module_core_rx,
67939 + mod->core_size_rx,
67940 + mod->core_size_rx,
67941 + mod->core_size_rx);
67942
67943 /* Set RO and NX regions for init */
67944 - set_section_ro_nx(mod->module_init,
67945 - mod->init_text_size,
67946 - mod->init_ro_size,
67947 - mod->init_size);
67948 + set_section_ro_nx(mod->module_init_rx,
67949 + mod->init_size_rx,
67950 + mod->init_size_rx,
67951 + mod->init_size_rx);
67952
67953 do_mod_ctors(mod);
67954 /* Start the module */
67955 @@ -3048,11 +3170,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
67956 mod->strtab = mod->core_strtab;
67957 #endif
67958 unset_module_init_ro_nx(mod);
67959 - module_free(mod, mod->module_init);
67960 - mod->module_init = NULL;
67961 - mod->init_size = 0;
67962 - mod->init_ro_size = 0;
67963 - mod->init_text_size = 0;
67964 + module_free(mod, mod->module_init_rw);
67965 + module_free_exec(mod, mod->module_init_rx);
67966 + mod->module_init_rw = NULL;
67967 + mod->module_init_rx = NULL;
67968 + mod->init_size_rw = 0;
67969 + mod->init_size_rx = 0;
67970 mutex_unlock(&module_mutex);
67971
67972 return 0;
67973 @@ -3083,10 +3206,16 @@ static const char *get_ksymbol(struct module *mod,
67974 unsigned long nextval;
67975
67976 /* At worse, next value is at end of module */
67977 - if (within_module_init(addr, mod))
67978 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
67979 + if (within_module_init_rx(addr, mod))
67980 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
67981 + else if (within_module_init_rw(addr, mod))
67982 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
67983 + else if (within_module_core_rx(addr, mod))
67984 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
67985 + else if (within_module_core_rw(addr, mod))
67986 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
67987 else
67988 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
67989 + return NULL;
67990
67991 /* Scan for closest preceding symbol, and next symbol. (ELF
67992 starts real symbols at 1). */
67993 @@ -3321,7 +3450,7 @@ static int m_show(struct seq_file *m, void *p)
67994 char buf[8];
67995
67996 seq_printf(m, "%s %u",
67997 - mod->name, mod->init_size + mod->core_size);
67998 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
67999 print_unload_info(m, mod);
68000
68001 /* Informative for users. */
68002 @@ -3330,7 +3459,7 @@ static int m_show(struct seq_file *m, void *p)
68003 mod->state == MODULE_STATE_COMING ? "Loading":
68004 "Live");
68005 /* Used by oprofile and other similar tools. */
68006 - seq_printf(m, " 0x%pK", mod->module_core);
68007 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
68008
68009 /* Taints info */
68010 if (mod->taints)
68011 @@ -3366,7 +3495,17 @@ static const struct file_operations proc_modules_operations = {
68012
68013 static int __init proc_modules_init(void)
68014 {
68015 +#ifndef CONFIG_GRKERNSEC_HIDESYM
68016 +#ifdef CONFIG_GRKERNSEC_PROC_USER
68017 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
68018 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68019 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
68020 +#else
68021 proc_create("modules", 0, NULL, &proc_modules_operations);
68022 +#endif
68023 +#else
68024 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
68025 +#endif
68026 return 0;
68027 }
68028 module_init(proc_modules_init);
68029 @@ -3425,12 +3564,12 @@ struct module *__module_address(unsigned long addr)
68030 {
68031 struct module *mod;
68032
68033 - if (addr < module_addr_min || addr > module_addr_max)
68034 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
68035 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
68036 return NULL;
68037
68038 list_for_each_entry_rcu(mod, &modules, list)
68039 - if (within_module_core(addr, mod)
68040 - || within_module_init(addr, mod))
68041 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
68042 return mod;
68043 return NULL;
68044 }
68045 @@ -3464,11 +3603,20 @@ bool is_module_text_address(unsigned long addr)
68046 */
68047 struct module *__module_text_address(unsigned long addr)
68048 {
68049 - struct module *mod = __module_address(addr);
68050 + struct module *mod;
68051 +
68052 +#ifdef CONFIG_X86_32
68053 + addr = ktla_ktva(addr);
68054 +#endif
68055 +
68056 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
68057 + return NULL;
68058 +
68059 + mod = __module_address(addr);
68060 +
68061 if (mod) {
68062 /* Make sure it's within the text section. */
68063 - if (!within(addr, mod->module_init, mod->init_text_size)
68064 - && !within(addr, mod->module_core, mod->core_text_size))
68065 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
68066 mod = NULL;
68067 }
68068 return mod;
68069 diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
68070 index 7e3443f..b2a1e6b 100644
68071 --- a/kernel/mutex-debug.c
68072 +++ b/kernel/mutex-debug.c
68073 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
68074 }
68075
68076 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
68077 - struct thread_info *ti)
68078 + struct task_struct *task)
68079 {
68080 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
68081
68082 /* Mark the current thread as blocked on the lock: */
68083 - ti->task->blocked_on = waiter;
68084 + task->blocked_on = waiter;
68085 }
68086
68087 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
68088 - struct thread_info *ti)
68089 + struct task_struct *task)
68090 {
68091 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
68092 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
68093 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
68094 - ti->task->blocked_on = NULL;
68095 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
68096 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
68097 + task->blocked_on = NULL;
68098
68099 list_del_init(&waiter->list);
68100 waiter->task = NULL;
68101 diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
68102 index 0799fd3..d06ae3b 100644
68103 --- a/kernel/mutex-debug.h
68104 +++ b/kernel/mutex-debug.h
68105 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
68106 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
68107 extern void debug_mutex_add_waiter(struct mutex *lock,
68108 struct mutex_waiter *waiter,
68109 - struct thread_info *ti);
68110 + struct task_struct *task);
68111 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
68112 - struct thread_info *ti);
68113 + struct task_struct *task);
68114 extern void debug_mutex_unlock(struct mutex *lock);
68115 extern void debug_mutex_init(struct mutex *lock, const char *name,
68116 struct lock_class_key *key);
68117 diff --git a/kernel/mutex.c b/kernel/mutex.c
68118 index 89096dd..f91ebc5 100644
68119 --- a/kernel/mutex.c
68120 +++ b/kernel/mutex.c
68121 @@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
68122 spin_lock_mutex(&lock->wait_lock, flags);
68123
68124 debug_mutex_lock_common(lock, &waiter);
68125 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
68126 + debug_mutex_add_waiter(lock, &waiter, task);
68127
68128 /* add waiting tasks to the end of the waitqueue (FIFO): */
68129 list_add_tail(&waiter.list, &lock->wait_list);
68130 @@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
68131 * TASK_UNINTERRUPTIBLE case.)
68132 */
68133 if (unlikely(signal_pending_state(state, task))) {
68134 - mutex_remove_waiter(lock, &waiter,
68135 - task_thread_info(task));
68136 + mutex_remove_waiter(lock, &waiter, task);
68137 mutex_release(&lock->dep_map, 1, ip);
68138 spin_unlock_mutex(&lock->wait_lock, flags);
68139
68140 @@ -249,7 +248,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
68141 done:
68142 lock_acquired(&lock->dep_map, ip);
68143 /* got the lock - rejoice! */
68144 - mutex_remove_waiter(lock, &waiter, current_thread_info());
68145 + mutex_remove_waiter(lock, &waiter, task);
68146 mutex_set_owner(lock);
68147
68148 /* set it to 0 if there are no waiters left: */
68149 diff --git a/kernel/padata.c b/kernel/padata.c
68150 index b452599..5d68f4e 100644
68151 --- a/kernel/padata.c
68152 +++ b/kernel/padata.c
68153 @@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_instance *pinst,
68154 padata->pd = pd;
68155 padata->cb_cpu = cb_cpu;
68156
68157 - if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
68158 - atomic_set(&pd->seq_nr, -1);
68159 + if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
68160 + atomic_set_unchecked(&pd->seq_nr, -1);
68161
68162 - padata->seq_nr = atomic_inc_return(&pd->seq_nr);
68163 + padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
68164
68165 target_cpu = padata_cpu_hash(padata);
68166 queue = per_cpu_ptr(pd->pqueue, target_cpu);
68167 @@ -444,7 +444,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
68168 padata_init_pqueues(pd);
68169 padata_init_squeues(pd);
68170 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
68171 - atomic_set(&pd->seq_nr, -1);
68172 + atomic_set_unchecked(&pd->seq_nr, -1);
68173 atomic_set(&pd->reorder_objects, 0);
68174 atomic_set(&pd->refcnt, 0);
68175 pd->pinst = pinst;
68176 diff --git a/kernel/panic.c b/kernel/panic.c
68177 index 8ed89a1..e83856a 100644
68178 --- a/kernel/panic.c
68179 +++ b/kernel/panic.c
68180 @@ -402,7 +402,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
68181 const char *board;
68182
68183 printk(KERN_WARNING "------------[ cut here ]------------\n");
68184 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
68185 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
68186 board = dmi_get_system_info(DMI_PRODUCT_NAME);
68187 if (board)
68188 printk(KERN_WARNING "Hardware name: %s\n", board);
68189 @@ -457,7 +457,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
68190 */
68191 void __stack_chk_fail(void)
68192 {
68193 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
68194 + dump_stack();
68195 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
68196 __builtin_return_address(0));
68197 }
68198 EXPORT_SYMBOL(__stack_chk_fail);
68199 diff --git a/kernel/pid.c b/kernel/pid.c
68200 index 9f08dfa..6765c40 100644
68201 --- a/kernel/pid.c
68202 +++ b/kernel/pid.c
68203 @@ -33,6 +33,7 @@
68204 #include <linux/rculist.h>
68205 #include <linux/bootmem.h>
68206 #include <linux/hash.h>
68207 +#include <linux/security.h>
68208 #include <linux/pid_namespace.h>
68209 #include <linux/init_task.h>
68210 #include <linux/syscalls.h>
68211 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
68212
68213 int pid_max = PID_MAX_DEFAULT;
68214
68215 -#define RESERVED_PIDS 300
68216 +#define RESERVED_PIDS 500
68217
68218 int pid_max_min = RESERVED_PIDS + 1;
68219 int pid_max_max = PID_MAX_LIMIT;
68220 @@ -420,10 +421,18 @@ EXPORT_SYMBOL(pid_task);
68221 */
68222 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
68223 {
68224 + struct task_struct *task;
68225 +
68226 rcu_lockdep_assert(rcu_read_lock_held(),
68227 "find_task_by_pid_ns() needs rcu_read_lock()"
68228 " protection");
68229 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
68230 +
68231 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
68232 +
68233 + if (gr_pid_is_chrooted(task))
68234 + return NULL;
68235 +
68236 + return task;
68237 }
68238
68239 struct task_struct *find_task_by_vpid(pid_t vnr)
68240 @@ -431,6 +440,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
68241 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
68242 }
68243
68244 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
68245 +{
68246 + rcu_lockdep_assert(rcu_read_lock_held(),
68247 + "find_task_by_pid_ns() needs rcu_read_lock()"
68248 + " protection");
68249 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
68250 +}
68251 +
68252 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
68253 {
68254 struct pid *pid;
68255 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
68256 index 125cb67..a4d1c30 100644
68257 --- a/kernel/posix-cpu-timers.c
68258 +++ b/kernel/posix-cpu-timers.c
68259 @@ -6,6 +6,7 @@
68260 #include <linux/posix-timers.h>
68261 #include <linux/errno.h>
68262 #include <linux/math64.h>
68263 +#include <linux/security.h>
68264 #include <asm/uaccess.h>
68265 #include <linux/kernel_stat.h>
68266 #include <trace/events/timer.h>
68267 @@ -1578,14 +1579,14 @@ struct k_clock clock_posix_cpu = {
68268
68269 static __init int init_posix_cpu_timers(void)
68270 {
68271 - struct k_clock process = {
68272 + static struct k_clock process = {
68273 .clock_getres = process_cpu_clock_getres,
68274 .clock_get = process_cpu_clock_get,
68275 .timer_create = process_cpu_timer_create,
68276 .nsleep = process_cpu_nsleep,
68277 .nsleep_restart = process_cpu_nsleep_restart,
68278 };
68279 - struct k_clock thread = {
68280 + static struct k_clock thread = {
68281 .clock_getres = thread_cpu_clock_getres,
68282 .clock_get = thread_cpu_clock_get,
68283 .timer_create = thread_cpu_timer_create,
68284 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
68285 index 69185ae..cc2847a 100644
68286 --- a/kernel/posix-timers.c
68287 +++ b/kernel/posix-timers.c
68288 @@ -43,6 +43,7 @@
68289 #include <linux/idr.h>
68290 #include <linux/posix-clock.h>
68291 #include <linux/posix-timers.h>
68292 +#include <linux/grsecurity.h>
68293 #include <linux/syscalls.h>
68294 #include <linux/wait.h>
68295 #include <linux/workqueue.h>
68296 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
68297 * which we beg off on and pass to do_sys_settimeofday().
68298 */
68299
68300 -static struct k_clock posix_clocks[MAX_CLOCKS];
68301 +static struct k_clock *posix_clocks[MAX_CLOCKS];
68302
68303 /*
68304 * These ones are defined below.
68305 @@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
68306 */
68307 static __init int init_posix_timers(void)
68308 {
68309 - struct k_clock clock_realtime = {
68310 + static struct k_clock clock_realtime = {
68311 .clock_getres = hrtimer_get_res,
68312 .clock_get = posix_clock_realtime_get,
68313 .clock_set = posix_clock_realtime_set,
68314 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
68315 .timer_get = common_timer_get,
68316 .timer_del = common_timer_del,
68317 };
68318 - struct k_clock clock_monotonic = {
68319 + static struct k_clock clock_monotonic = {
68320 .clock_getres = hrtimer_get_res,
68321 .clock_get = posix_ktime_get_ts,
68322 .nsleep = common_nsleep,
68323 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
68324 .timer_get = common_timer_get,
68325 .timer_del = common_timer_del,
68326 };
68327 - struct k_clock clock_monotonic_raw = {
68328 + static struct k_clock clock_monotonic_raw = {
68329 .clock_getres = hrtimer_get_res,
68330 .clock_get = posix_get_monotonic_raw,
68331 };
68332 - struct k_clock clock_realtime_coarse = {
68333 + static struct k_clock clock_realtime_coarse = {
68334 .clock_getres = posix_get_coarse_res,
68335 .clock_get = posix_get_realtime_coarse,
68336 };
68337 - struct k_clock clock_monotonic_coarse = {
68338 + static struct k_clock clock_monotonic_coarse = {
68339 .clock_getres = posix_get_coarse_res,
68340 .clock_get = posix_get_monotonic_coarse,
68341 };
68342 - struct k_clock clock_boottime = {
68343 + static struct k_clock clock_boottime = {
68344 .clock_getres = hrtimer_get_res,
68345 .clock_get = posix_get_boottime,
68346 .nsleep = common_nsleep,
68347 @@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
68348 return;
68349 }
68350
68351 - posix_clocks[clock_id] = *new_clock;
68352 + posix_clocks[clock_id] = new_clock;
68353 }
68354 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
68355
68356 @@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
68357 return (id & CLOCKFD_MASK) == CLOCKFD ?
68358 &clock_posix_dynamic : &clock_posix_cpu;
68359
68360 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
68361 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
68362 return NULL;
68363 - return &posix_clocks[id];
68364 + return posix_clocks[id];
68365 }
68366
68367 static int common_timer_create(struct k_itimer *new_timer)
68368 @@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
68369 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
68370 return -EFAULT;
68371
68372 + /* only the CLOCK_REALTIME clock can be set, all other clocks
68373 + have their clock_set fptr set to a nosettime dummy function
68374 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
68375 + call common_clock_set, which calls do_sys_settimeofday, which
68376 + we hook
68377 + */
68378 +
68379 return kc->clock_set(which_clock, &new_tp);
68380 }
68381
68382 diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
68383 index d523593..68197a4 100644
68384 --- a/kernel/power/poweroff.c
68385 +++ b/kernel/power/poweroff.c
68386 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
68387 .enable_mask = SYSRQ_ENABLE_BOOT,
68388 };
68389
68390 -static int pm_sysrq_init(void)
68391 +static int __init pm_sysrq_init(void)
68392 {
68393 register_sysrq_key('o', &sysrq_poweroff_op);
68394 return 0;
68395 diff --git a/kernel/power/process.c b/kernel/power/process.c
68396 index 7aac07a..2d3c6dc 100644
68397 --- a/kernel/power/process.c
68398 +++ b/kernel/power/process.c
68399 @@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
68400 u64 elapsed_csecs64;
68401 unsigned int elapsed_csecs;
68402 bool wakeup = false;
68403 + bool timedout = false;
68404
68405 do_gettimeofday(&start);
68406
68407 @@ -43,6 +44,8 @@ static int try_to_freeze_tasks(bool user_only)
68408
68409 while (true) {
68410 todo = 0;
68411 + if (time_after(jiffies, end_time))
68412 + timedout = true;
68413 read_lock(&tasklist_lock);
68414 do_each_thread(g, p) {
68415 if (p == current || !freeze_task(p))
68416 @@ -60,9 +63,13 @@ static int try_to_freeze_tasks(bool user_only)
68417 * try_to_stop() after schedule() in ptrace/signal
68418 * stop sees TIF_FREEZE.
68419 */
68420 - if (!task_is_stopped_or_traced(p) &&
68421 - !freezer_should_skip(p))
68422 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
68423 todo++;
68424 + if (timedout) {
68425 + printk(KERN_ERR "Task refusing to freeze:\n");
68426 + sched_show_task(p);
68427 + }
68428 + }
68429 } while_each_thread(g, p);
68430 read_unlock(&tasklist_lock);
68431
68432 @@ -71,7 +78,7 @@ static int try_to_freeze_tasks(bool user_only)
68433 todo += wq_busy;
68434 }
68435
68436 - if (!todo || time_after(jiffies, end_time))
68437 + if (!todo || timedout)
68438 break;
68439
68440 if (pm_wakeup_pending()) {
68441 diff --git a/kernel/printk.c b/kernel/printk.c
68442 index 32690a0..cd7c798 100644
68443 --- a/kernel/printk.c
68444 +++ b/kernel/printk.c
68445 @@ -313,6 +313,11 @@ static int check_syslog_permissions(int type, bool from_file)
68446 if (from_file && type != SYSLOG_ACTION_OPEN)
68447 return 0;
68448
68449 +#ifdef CONFIG_GRKERNSEC_DMESG
68450 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
68451 + return -EPERM;
68452 +#endif
68453 +
68454 if (syslog_action_restricted(type)) {
68455 if (capable(CAP_SYSLOG))
68456 return 0;
68457 diff --git a/kernel/profile.c b/kernel/profile.c
68458 index 76b8e77..a2930e8 100644
68459 --- a/kernel/profile.c
68460 +++ b/kernel/profile.c
68461 @@ -39,7 +39,7 @@ struct profile_hit {
68462 /* Oprofile timer tick hook */
68463 static int (*timer_hook)(struct pt_regs *) __read_mostly;
68464
68465 -static atomic_t *prof_buffer;
68466 +static atomic_unchecked_t *prof_buffer;
68467 static unsigned long prof_len, prof_shift;
68468
68469 int prof_on __read_mostly;
68470 @@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
68471 hits[i].pc = 0;
68472 continue;
68473 }
68474 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
68475 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
68476 hits[i].hits = hits[i].pc = 0;
68477 }
68478 }
68479 @@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
68480 * Add the current hit(s) and flush the write-queue out
68481 * to the global buffer:
68482 */
68483 - atomic_add(nr_hits, &prof_buffer[pc]);
68484 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
68485 for (i = 0; i < NR_PROFILE_HIT; ++i) {
68486 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
68487 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
68488 hits[i].pc = hits[i].hits = 0;
68489 }
68490 out:
68491 @@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
68492 {
68493 unsigned long pc;
68494 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
68495 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
68496 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
68497 }
68498 #endif /* !CONFIG_SMP */
68499
68500 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
68501 return -EFAULT;
68502 buf++; p++; count--; read++;
68503 }
68504 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
68505 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
68506 if (copy_to_user(buf, (void *)pnt, count))
68507 return -EFAULT;
68508 read += count;
68509 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
68510 }
68511 #endif
68512 profile_discard_flip_buffers();
68513 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
68514 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
68515 return count;
68516 }
68517
68518 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
68519 index 00ab2ca..d237f61 100644
68520 --- a/kernel/ptrace.c
68521 +++ b/kernel/ptrace.c
68522 @@ -285,7 +285,7 @@ static int ptrace_attach(struct task_struct *task, long request,
68523 task->ptrace = PT_PTRACED;
68524 if (seize)
68525 task->ptrace |= PT_SEIZED;
68526 - if (ns_capable(task_user_ns(task), CAP_SYS_PTRACE))
68527 + if (ns_capable_nolog(task_user_ns(task), CAP_SYS_PTRACE))
68528 task->ptrace |= PT_PTRACE_CAP;
68529
68530 __ptrace_link(task, current);
68531 @@ -491,7 +491,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
68532 break;
68533 return -EIO;
68534 }
68535 - if (copy_to_user(dst, buf, retval))
68536 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
68537 return -EFAULT;
68538 copied += retval;
68539 src += retval;
68540 @@ -688,7 +688,7 @@ int ptrace_request(struct task_struct *child, long request,
68541 bool seized = child->ptrace & PT_SEIZED;
68542 int ret = -EIO;
68543 siginfo_t siginfo, *si;
68544 - void __user *datavp = (void __user *) data;
68545 + void __user *datavp = (__force void __user *) data;
68546 unsigned long __user *datalp = datavp;
68547 unsigned long flags;
68548
68549 @@ -890,14 +890,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
68550 goto out;
68551 }
68552
68553 + if (gr_handle_ptrace(child, request)) {
68554 + ret = -EPERM;
68555 + goto out_put_task_struct;
68556 + }
68557 +
68558 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
68559 ret = ptrace_attach(child, request, data);
68560 /*
68561 * Some architectures need to do book-keeping after
68562 * a ptrace attach.
68563 */
68564 - if (!ret)
68565 + if (!ret) {
68566 arch_ptrace_attach(child);
68567 + gr_audit_ptrace(child);
68568 + }
68569 goto out_put_task_struct;
68570 }
68571
68572 @@ -923,7 +930,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
68573 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
68574 if (copied != sizeof(tmp))
68575 return -EIO;
68576 - return put_user(tmp, (unsigned long __user *)data);
68577 + return put_user(tmp, (__force unsigned long __user *)data);
68578 }
68579
68580 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
68581 @@ -1033,14 +1040,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
68582 goto out;
68583 }
68584
68585 + if (gr_handle_ptrace(child, request)) {
68586 + ret = -EPERM;
68587 + goto out_put_task_struct;
68588 + }
68589 +
68590 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
68591 ret = ptrace_attach(child, request, data);
68592 /*
68593 * Some architectures need to do book-keeping after
68594 * a ptrace attach.
68595 */
68596 - if (!ret)
68597 + if (!ret) {
68598 arch_ptrace_attach(child);
68599 + gr_audit_ptrace(child);
68600 + }
68601 goto out_put_task_struct;
68602 }
68603
68604 diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
68605 index 977296d..c4744dc 100644
68606 --- a/kernel/rcutiny.c
68607 +++ b/kernel/rcutiny.c
68608 @@ -46,7 +46,7 @@
68609 struct rcu_ctrlblk;
68610 static void invoke_rcu_callbacks(void);
68611 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
68612 -static void rcu_process_callbacks(struct softirq_action *unused);
68613 +static void rcu_process_callbacks(void);
68614 static void __call_rcu(struct rcu_head *head,
68615 void (*func)(struct rcu_head *rcu),
68616 struct rcu_ctrlblk *rcp);
68617 @@ -297,7 +297,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
68618 rcu_is_callbacks_kthread()));
68619 }
68620
68621 -static void rcu_process_callbacks(struct softirq_action *unused)
68622 +static void rcu_process_callbacks(void)
68623 {
68624 __rcu_process_callbacks(&rcu_sched_ctrlblk);
68625 __rcu_process_callbacks(&rcu_bh_ctrlblk);
68626 diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
68627 index 9cb1ae4..aac7d3e 100644
68628 --- a/kernel/rcutiny_plugin.h
68629 +++ b/kernel/rcutiny_plugin.h
68630 @@ -920,7 +920,7 @@ static int rcu_kthread(void *arg)
68631 have_rcu_kthread_work = morework;
68632 local_irq_restore(flags);
68633 if (work)
68634 - rcu_process_callbacks(NULL);
68635 + rcu_process_callbacks();
68636 schedule_timeout_interruptible(1); /* Leave CPU for others. */
68637 }
68638
68639 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
68640 index a58ac28..196a3d8 100644
68641 --- a/kernel/rcutorture.c
68642 +++ b/kernel/rcutorture.c
68643 @@ -148,12 +148,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
68644 { 0 };
68645 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
68646 { 0 };
68647 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
68648 -static atomic_t n_rcu_torture_alloc;
68649 -static atomic_t n_rcu_torture_alloc_fail;
68650 -static atomic_t n_rcu_torture_free;
68651 -static atomic_t n_rcu_torture_mberror;
68652 -static atomic_t n_rcu_torture_error;
68653 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
68654 +static atomic_unchecked_t n_rcu_torture_alloc;
68655 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
68656 +static atomic_unchecked_t n_rcu_torture_free;
68657 +static atomic_unchecked_t n_rcu_torture_mberror;
68658 +static atomic_unchecked_t n_rcu_torture_error;
68659 static long n_rcu_torture_boost_ktrerror;
68660 static long n_rcu_torture_boost_rterror;
68661 static long n_rcu_torture_boost_failure;
68662 @@ -243,11 +243,11 @@ rcu_torture_alloc(void)
68663
68664 spin_lock_bh(&rcu_torture_lock);
68665 if (list_empty(&rcu_torture_freelist)) {
68666 - atomic_inc(&n_rcu_torture_alloc_fail);
68667 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
68668 spin_unlock_bh(&rcu_torture_lock);
68669 return NULL;
68670 }
68671 - atomic_inc(&n_rcu_torture_alloc);
68672 + atomic_inc_unchecked(&n_rcu_torture_alloc);
68673 p = rcu_torture_freelist.next;
68674 list_del_init(p);
68675 spin_unlock_bh(&rcu_torture_lock);
68676 @@ -260,7 +260,7 @@ rcu_torture_alloc(void)
68677 static void
68678 rcu_torture_free(struct rcu_torture *p)
68679 {
68680 - atomic_inc(&n_rcu_torture_free);
68681 + atomic_inc_unchecked(&n_rcu_torture_free);
68682 spin_lock_bh(&rcu_torture_lock);
68683 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
68684 spin_unlock_bh(&rcu_torture_lock);
68685 @@ -380,7 +380,7 @@ rcu_torture_cb(struct rcu_head *p)
68686 i = rp->rtort_pipe_count;
68687 if (i > RCU_TORTURE_PIPE_LEN)
68688 i = RCU_TORTURE_PIPE_LEN;
68689 - atomic_inc(&rcu_torture_wcount[i]);
68690 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
68691 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
68692 rp->rtort_mbtest = 0;
68693 rcu_torture_free(rp);
68694 @@ -427,7 +427,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
68695 i = rp->rtort_pipe_count;
68696 if (i > RCU_TORTURE_PIPE_LEN)
68697 i = RCU_TORTURE_PIPE_LEN;
68698 - atomic_inc(&rcu_torture_wcount[i]);
68699 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
68700 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
68701 rp->rtort_mbtest = 0;
68702 list_del(&rp->rtort_free);
68703 @@ -916,7 +916,7 @@ rcu_torture_writer(void *arg)
68704 i = old_rp->rtort_pipe_count;
68705 if (i > RCU_TORTURE_PIPE_LEN)
68706 i = RCU_TORTURE_PIPE_LEN;
68707 - atomic_inc(&rcu_torture_wcount[i]);
68708 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
68709 old_rp->rtort_pipe_count++;
68710 cur_ops->deferred_free(old_rp);
68711 }
68712 @@ -997,7 +997,7 @@ static void rcu_torture_timer(unsigned long unused)
68713 return;
68714 }
68715 if (p->rtort_mbtest == 0)
68716 - atomic_inc(&n_rcu_torture_mberror);
68717 + atomic_inc_unchecked(&n_rcu_torture_mberror);
68718 spin_lock(&rand_lock);
68719 cur_ops->read_delay(&rand);
68720 n_rcu_torture_timers++;
68721 @@ -1061,7 +1061,7 @@ rcu_torture_reader(void *arg)
68722 continue;
68723 }
68724 if (p->rtort_mbtest == 0)
68725 - atomic_inc(&n_rcu_torture_mberror);
68726 + atomic_inc_unchecked(&n_rcu_torture_mberror);
68727 cur_ops->read_delay(&rand);
68728 preempt_disable();
68729 pipe_count = p->rtort_pipe_count;
68730 @@ -1123,10 +1123,10 @@ rcu_torture_printk(char *page)
68731 rcu_torture_current,
68732 rcu_torture_current_version,
68733 list_empty(&rcu_torture_freelist),
68734 - atomic_read(&n_rcu_torture_alloc),
68735 - atomic_read(&n_rcu_torture_alloc_fail),
68736 - atomic_read(&n_rcu_torture_free),
68737 - atomic_read(&n_rcu_torture_mberror),
68738 + atomic_read_unchecked(&n_rcu_torture_alloc),
68739 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
68740 + atomic_read_unchecked(&n_rcu_torture_free),
68741 + atomic_read_unchecked(&n_rcu_torture_mberror),
68742 n_rcu_torture_boost_ktrerror,
68743 n_rcu_torture_boost_rterror,
68744 n_rcu_torture_boost_failure,
68745 @@ -1136,7 +1136,7 @@ rcu_torture_printk(char *page)
68746 n_online_attempts,
68747 n_offline_successes,
68748 n_offline_attempts);
68749 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
68750 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
68751 n_rcu_torture_boost_ktrerror != 0 ||
68752 n_rcu_torture_boost_rterror != 0 ||
68753 n_rcu_torture_boost_failure != 0)
68754 @@ -1144,7 +1144,7 @@ rcu_torture_printk(char *page)
68755 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
68756 if (i > 1) {
68757 cnt += sprintf(&page[cnt], "!!! ");
68758 - atomic_inc(&n_rcu_torture_error);
68759 + atomic_inc_unchecked(&n_rcu_torture_error);
68760 WARN_ON_ONCE(1);
68761 }
68762 cnt += sprintf(&page[cnt], "Reader Pipe: ");
68763 @@ -1158,7 +1158,7 @@ rcu_torture_printk(char *page)
68764 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
68765 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
68766 cnt += sprintf(&page[cnt], " %d",
68767 - atomic_read(&rcu_torture_wcount[i]));
68768 + atomic_read_unchecked(&rcu_torture_wcount[i]));
68769 }
68770 cnt += sprintf(&page[cnt], "\n");
68771 if (cur_ops->stats)
68772 @@ -1600,7 +1600,7 @@ rcu_torture_cleanup(void)
68773
68774 if (cur_ops->cleanup)
68775 cur_ops->cleanup();
68776 - if (atomic_read(&n_rcu_torture_error))
68777 + if (atomic_read_unchecked(&n_rcu_torture_error))
68778 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
68779 else
68780 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
68781 @@ -1664,17 +1664,17 @@ rcu_torture_init(void)
68782
68783 rcu_torture_current = NULL;
68784 rcu_torture_current_version = 0;
68785 - atomic_set(&n_rcu_torture_alloc, 0);
68786 - atomic_set(&n_rcu_torture_alloc_fail, 0);
68787 - atomic_set(&n_rcu_torture_free, 0);
68788 - atomic_set(&n_rcu_torture_mberror, 0);
68789 - atomic_set(&n_rcu_torture_error, 0);
68790 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
68791 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
68792 + atomic_set_unchecked(&n_rcu_torture_free, 0);
68793 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
68794 + atomic_set_unchecked(&n_rcu_torture_error, 0);
68795 n_rcu_torture_boost_ktrerror = 0;
68796 n_rcu_torture_boost_rterror = 0;
68797 n_rcu_torture_boost_failure = 0;
68798 n_rcu_torture_boosts = 0;
68799 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
68800 - atomic_set(&rcu_torture_wcount[i], 0);
68801 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
68802 for_each_possible_cpu(cpu) {
68803 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
68804 per_cpu(rcu_torture_count, cpu)[i] = 0;
68805 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
68806 index 6c4a672..70f3202 100644
68807 --- a/kernel/rcutree.c
68808 +++ b/kernel/rcutree.c
68809 @@ -363,9 +363,9 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
68810 rcu_prepare_for_idle(smp_processor_id());
68811 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
68812 smp_mb__before_atomic_inc(); /* See above. */
68813 - atomic_inc(&rdtp->dynticks);
68814 + atomic_inc_unchecked(&rdtp->dynticks);
68815 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
68816 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
68817 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
68818 }
68819
68820 /**
68821 @@ -438,10 +438,10 @@ void rcu_irq_exit(void)
68822 static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
68823 {
68824 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
68825 - atomic_inc(&rdtp->dynticks);
68826 + atomic_inc_unchecked(&rdtp->dynticks);
68827 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
68828 smp_mb__after_atomic_inc(); /* See above. */
68829 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
68830 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
68831 rcu_cleanup_after_idle(smp_processor_id());
68832 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
68833 if (!is_idle_task(current)) {
68834 @@ -531,14 +531,14 @@ void rcu_nmi_enter(void)
68835 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
68836
68837 if (rdtp->dynticks_nmi_nesting == 0 &&
68838 - (atomic_read(&rdtp->dynticks) & 0x1))
68839 + (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
68840 return;
68841 rdtp->dynticks_nmi_nesting++;
68842 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
68843 - atomic_inc(&rdtp->dynticks);
68844 + atomic_inc_unchecked(&rdtp->dynticks);
68845 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
68846 smp_mb__after_atomic_inc(); /* See above. */
68847 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
68848 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
68849 }
68850
68851 /**
68852 @@ -557,9 +557,9 @@ void rcu_nmi_exit(void)
68853 return;
68854 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
68855 smp_mb__before_atomic_inc(); /* See above. */
68856 - atomic_inc(&rdtp->dynticks);
68857 + atomic_inc_unchecked(&rdtp->dynticks);
68858 smp_mb__after_atomic_inc(); /* Force delay to next write. */
68859 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
68860 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
68861 }
68862
68863 #ifdef CONFIG_PROVE_RCU
68864 @@ -575,7 +575,7 @@ int rcu_is_cpu_idle(void)
68865 int ret;
68866
68867 preempt_disable();
68868 - ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
68869 + ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
68870 preempt_enable();
68871 return ret;
68872 }
68873 @@ -604,7 +604,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
68874 */
68875 static int dyntick_save_progress_counter(struct rcu_data *rdp)
68876 {
68877 - rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
68878 + rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
68879 return (rdp->dynticks_snap & 0x1) == 0;
68880 }
68881
68882 @@ -619,7 +619,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
68883 unsigned int curr;
68884 unsigned int snap;
68885
68886 - curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
68887 + curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
68888 snap = (unsigned int)rdp->dynticks_snap;
68889
68890 /*
68891 @@ -1667,7 +1667,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
68892 /*
68893 * Do RCU core processing for the current CPU.
68894 */
68895 -static void rcu_process_callbacks(struct softirq_action *unused)
68896 +static void rcu_process_callbacks(void)
68897 {
68898 trace_rcu_utilization("Start RCU core");
68899 __rcu_process_callbacks(&rcu_sched_state,
68900 @@ -2030,7 +2030,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
68901 rdp->qlen = 0;
68902 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
68903 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_NESTING);
68904 - WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
68905 + WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
68906 rdp->cpu = cpu;
68907 rdp->rsp = rsp;
68908 raw_spin_unlock_irqrestore(&rnp->lock, flags);
68909 @@ -2058,8 +2058,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
68910 rdp->n_force_qs_snap = rsp->n_force_qs;
68911 rdp->blimit = blimit;
68912 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_NESTING;
68913 - atomic_set(&rdp->dynticks->dynticks,
68914 - (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
68915 + atomic_set_unchecked(&rdp->dynticks->dynticks,
68916 + (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
68917 rcu_prepare_for_idle_init(cpu);
68918 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
68919
68920 diff --git a/kernel/rcutree.h b/kernel/rcutree.h
68921 index fddff92..2c08359 100644
68922 --- a/kernel/rcutree.h
68923 +++ b/kernel/rcutree.h
68924 @@ -87,7 +87,7 @@ struct rcu_dynticks {
68925 long long dynticks_nesting; /* Track irq/process nesting level. */
68926 /* Process level is worth LLONG_MAX/2. */
68927 int dynticks_nmi_nesting; /* Track NMI nesting level. */
68928 - atomic_t dynticks; /* Even value for idle, else odd. */
68929 + atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
68930 };
68931
68932 /* RCU's kthread states for tracing. */
68933 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
68934 index 8bb35d7..6ea0a463 100644
68935 --- a/kernel/rcutree_plugin.h
68936 +++ b/kernel/rcutree_plugin.h
68937 @@ -850,7 +850,7 @@ void synchronize_rcu_expedited(void)
68938
68939 /* Clean up and exit. */
68940 smp_mb(); /* ensure expedited GP seen before counter increment. */
68941 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
68942 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
68943 unlock_mb_ret:
68944 mutex_unlock(&sync_rcu_preempt_exp_mutex);
68945 mb_ret:
68946 @@ -1833,8 +1833,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
68947
68948 #else /* #ifndef CONFIG_SMP */
68949
68950 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
68951 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
68952 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
68953 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
68954
68955 static int synchronize_sched_expedited_cpu_stop(void *data)
68956 {
68957 @@ -1889,7 +1889,7 @@ void synchronize_sched_expedited(void)
68958 int firstsnap, s, snap, trycount = 0;
68959
68960 /* Note that atomic_inc_return() implies full memory barrier. */
68961 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
68962 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
68963 get_online_cpus();
68964
68965 /*
68966 @@ -1910,7 +1910,7 @@ void synchronize_sched_expedited(void)
68967 }
68968
68969 /* Check to see if someone else did our work for us. */
68970 - s = atomic_read(&sync_sched_expedited_done);
68971 + s = atomic_read_unchecked(&sync_sched_expedited_done);
68972 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
68973 smp_mb(); /* ensure test happens before caller kfree */
68974 return;
68975 @@ -1925,7 +1925,7 @@ void synchronize_sched_expedited(void)
68976 * grace period works for us.
68977 */
68978 get_online_cpus();
68979 - snap = atomic_read(&sync_sched_expedited_started);
68980 + snap = atomic_read_unchecked(&sync_sched_expedited_started);
68981 smp_mb(); /* ensure read is before try_stop_cpus(). */
68982 }
68983
68984 @@ -1936,12 +1936,12 @@ void synchronize_sched_expedited(void)
68985 * than we did beat us to the punch.
68986 */
68987 do {
68988 - s = atomic_read(&sync_sched_expedited_done);
68989 + s = atomic_read_unchecked(&sync_sched_expedited_done);
68990 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
68991 smp_mb(); /* ensure test happens before caller kfree */
68992 break;
68993 }
68994 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
68995 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
68996
68997 put_online_cpus();
68998 }
68999 diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
69000 index 654cfe6..c0b28e2 100644
69001 --- a/kernel/rcutree_trace.c
69002 +++ b/kernel/rcutree_trace.c
69003 @@ -68,7 +68,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
69004 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
69005 rdp->qs_pending);
69006 seq_printf(m, " dt=%d/%llx/%d df=%lu",
69007 - atomic_read(&rdp->dynticks->dynticks),
69008 + atomic_read_unchecked(&rdp->dynticks->dynticks),
69009 rdp->dynticks->dynticks_nesting,
69010 rdp->dynticks->dynticks_nmi_nesting,
69011 rdp->dynticks_fqs);
69012 @@ -140,7 +140,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
69013 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
69014 rdp->qs_pending);
69015 seq_printf(m, ",%d,%llx,%d,%lu",
69016 - atomic_read(&rdp->dynticks->dynticks),
69017 + atomic_read_unchecked(&rdp->dynticks->dynticks),
69018 rdp->dynticks->dynticks_nesting,
69019 rdp->dynticks->dynticks_nmi_nesting,
69020 rdp->dynticks_fqs);
69021 diff --git a/kernel/resource.c b/kernel/resource.c
69022 index 7640b3a..5879283 100644
69023 --- a/kernel/resource.c
69024 +++ b/kernel/resource.c
69025 @@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
69026
69027 static int __init ioresources_init(void)
69028 {
69029 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
69030 +#ifdef CONFIG_GRKERNSEC_PROC_USER
69031 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
69032 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
69033 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
69034 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
69035 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
69036 +#endif
69037 +#else
69038 proc_create("ioports", 0, NULL, &proc_ioports_operations);
69039 proc_create("iomem", 0, NULL, &proc_iomem_operations);
69040 +#endif
69041 return 0;
69042 }
69043 __initcall(ioresources_init);
69044 diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
69045 index 98ec494..4241d6d 100644
69046 --- a/kernel/rtmutex-tester.c
69047 +++ b/kernel/rtmutex-tester.c
69048 @@ -20,7 +20,7 @@
69049 #define MAX_RT_TEST_MUTEXES 8
69050
69051 static spinlock_t rttest_lock;
69052 -static atomic_t rttest_event;
69053 +static atomic_unchecked_t rttest_event;
69054
69055 struct test_thread_data {
69056 int opcode;
69057 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
69058
69059 case RTTEST_LOCKCONT:
69060 td->mutexes[td->opdata] = 1;
69061 - td->event = atomic_add_return(1, &rttest_event);
69062 + td->event = atomic_add_return_unchecked(1, &rttest_event);
69063 return 0;
69064
69065 case RTTEST_RESET:
69066 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
69067 return 0;
69068
69069 case RTTEST_RESETEVENT:
69070 - atomic_set(&rttest_event, 0);
69071 + atomic_set_unchecked(&rttest_event, 0);
69072 return 0;
69073
69074 default:
69075 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
69076 return ret;
69077
69078 td->mutexes[id] = 1;
69079 - td->event = atomic_add_return(1, &rttest_event);
69080 + td->event = atomic_add_return_unchecked(1, &rttest_event);
69081 rt_mutex_lock(&mutexes[id]);
69082 - td->event = atomic_add_return(1, &rttest_event);
69083 + td->event = atomic_add_return_unchecked(1, &rttest_event);
69084 td->mutexes[id] = 4;
69085 return 0;
69086
69087 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
69088 return ret;
69089
69090 td->mutexes[id] = 1;
69091 - td->event = atomic_add_return(1, &rttest_event);
69092 + td->event = atomic_add_return_unchecked(1, &rttest_event);
69093 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
69094 - td->event = atomic_add_return(1, &rttest_event);
69095 + td->event = atomic_add_return_unchecked(1, &rttest_event);
69096 td->mutexes[id] = ret ? 0 : 4;
69097 return ret ? -EINTR : 0;
69098
69099 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
69100 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
69101 return ret;
69102
69103 - td->event = atomic_add_return(1, &rttest_event);
69104 + td->event = atomic_add_return_unchecked(1, &rttest_event);
69105 rt_mutex_unlock(&mutexes[id]);
69106 - td->event = atomic_add_return(1, &rttest_event);
69107 + td->event = atomic_add_return_unchecked(1, &rttest_event);
69108 td->mutexes[id] = 0;
69109 return 0;
69110
69111 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
69112 break;
69113
69114 td->mutexes[dat] = 2;
69115 - td->event = atomic_add_return(1, &rttest_event);
69116 + td->event = atomic_add_return_unchecked(1, &rttest_event);
69117 break;
69118
69119 default:
69120 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
69121 return;
69122
69123 td->mutexes[dat] = 3;
69124 - td->event = atomic_add_return(1, &rttest_event);
69125 + td->event = atomic_add_return_unchecked(1, &rttest_event);
69126 break;
69127
69128 case RTTEST_LOCKNOWAIT:
69129 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
69130 return;
69131
69132 td->mutexes[dat] = 1;
69133 - td->event = atomic_add_return(1, &rttest_event);
69134 + td->event = atomic_add_return_unchecked(1, &rttest_event);
69135 return;
69136
69137 default:
69138 diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
69139 index e8a1f83..363d17d 100644
69140 --- a/kernel/sched/auto_group.c
69141 +++ b/kernel/sched/auto_group.c
69142 @@ -11,7 +11,7 @@
69143
69144 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
69145 static struct autogroup autogroup_default;
69146 -static atomic_t autogroup_seq_nr;
69147 +static atomic_unchecked_t autogroup_seq_nr;
69148
69149 void __init autogroup_init(struct task_struct *init_task)
69150 {
69151 @@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
69152
69153 kref_init(&ag->kref);
69154 init_rwsem(&ag->lock);
69155 - ag->id = atomic_inc_return(&autogroup_seq_nr);
69156 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
69157 ag->tg = tg;
69158 #ifdef CONFIG_RT_GROUP_SCHED
69159 /*
69160 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
69161 index 478a04c..e16339a 100644
69162 --- a/kernel/sched/core.c
69163 +++ b/kernel/sched/core.c
69164 @@ -3851,6 +3851,8 @@ int can_nice(const struct task_struct *p, const int nice)
69165 /* convert nice value [19,-20] to rlimit style value [1,40] */
69166 int nice_rlim = 20 - nice;
69167
69168 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
69169 +
69170 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
69171 capable(CAP_SYS_NICE));
69172 }
69173 @@ -3884,7 +3886,8 @@ SYSCALL_DEFINE1(nice, int, increment)
69174 if (nice > 19)
69175 nice = 19;
69176
69177 - if (increment < 0 && !can_nice(current, nice))
69178 + if (increment < 0 && (!can_nice(current, nice) ||
69179 + gr_handle_chroot_nice()))
69180 return -EPERM;
69181
69182 retval = security_task_setnice(current, nice);
69183 @@ -4041,6 +4044,7 @@ recheck:
69184 unsigned long rlim_rtprio =
69185 task_rlimit(p, RLIMIT_RTPRIO);
69186
69187 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
69188 /* can't set/change the rt policy */
69189 if (policy != p->policy && !rlim_rtprio)
69190 return -EPERM;
69191 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
69192 index aca16b8..8e3acc4 100644
69193 --- a/kernel/sched/fair.c
69194 +++ b/kernel/sched/fair.c
69195 @@ -5147,7 +5147,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
69196 * run_rebalance_domains is triggered when needed from the scheduler tick.
69197 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
69198 */
69199 -static void run_rebalance_domains(struct softirq_action *h)
69200 +static void run_rebalance_domains(void)
69201 {
69202 int this_cpu = smp_processor_id();
69203 struct rq *this_rq = cpu_rq(this_cpu);
69204 diff --git a/kernel/signal.c b/kernel/signal.c
69205 index b09cf3b..b291c66 100644
69206 --- a/kernel/signal.c
69207 +++ b/kernel/signal.c
69208 @@ -46,12 +46,12 @@ static struct kmem_cache *sigqueue_cachep;
69209
69210 int print_fatal_signals __read_mostly;
69211
69212 -static void __user *sig_handler(struct task_struct *t, int sig)
69213 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
69214 {
69215 return t->sighand->action[sig - 1].sa.sa_handler;
69216 }
69217
69218 -static int sig_handler_ignored(void __user *handler, int sig)
69219 +static int sig_handler_ignored(__sighandler_t handler, int sig)
69220 {
69221 /* Is it explicitly or implicitly ignored? */
69222 return handler == SIG_IGN ||
69223 @@ -61,7 +61,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
69224 static int sig_task_ignored(struct task_struct *t, int sig,
69225 int from_ancestor_ns)
69226 {
69227 - void __user *handler;
69228 + __sighandler_t handler;
69229
69230 handler = sig_handler(t, sig);
69231
69232 @@ -365,6 +365,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
69233 atomic_inc(&user->sigpending);
69234 rcu_read_unlock();
69235
69236 + if (!override_rlimit)
69237 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
69238 +
69239 if (override_rlimit ||
69240 atomic_read(&user->sigpending) <=
69241 task_rlimit(t, RLIMIT_SIGPENDING)) {
69242 @@ -489,7 +492,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
69243
69244 int unhandled_signal(struct task_struct *tsk, int sig)
69245 {
69246 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
69247 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
69248 if (is_global_init(tsk))
69249 return 1;
69250 if (handler != SIG_IGN && handler != SIG_DFL)
69251 @@ -816,6 +819,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
69252 }
69253 }
69254
69255 + /* allow glibc communication via tgkill to other threads in our
69256 + thread group */
69257 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
69258 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
69259 + && gr_handle_signal(t, sig))
69260 + return -EPERM;
69261 +
69262 return security_task_kill(t, info, sig, 0);
69263 }
69264
69265 @@ -1197,7 +1207,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
69266 return send_signal(sig, info, p, 1);
69267 }
69268
69269 -static int
69270 +int
69271 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
69272 {
69273 return send_signal(sig, info, t, 0);
69274 @@ -1234,6 +1244,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
69275 unsigned long int flags;
69276 int ret, blocked, ignored;
69277 struct k_sigaction *action;
69278 + int is_unhandled = 0;
69279
69280 spin_lock_irqsave(&t->sighand->siglock, flags);
69281 action = &t->sighand->action[sig-1];
69282 @@ -1248,9 +1259,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
69283 }
69284 if (action->sa.sa_handler == SIG_DFL)
69285 t->signal->flags &= ~SIGNAL_UNKILLABLE;
69286 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
69287 + is_unhandled = 1;
69288 ret = specific_send_sig_info(sig, info, t);
69289 spin_unlock_irqrestore(&t->sighand->siglock, flags);
69290
69291 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
69292 + normal operation */
69293 + if (is_unhandled) {
69294 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
69295 + gr_handle_crash(t, sig);
69296 + }
69297 +
69298 return ret;
69299 }
69300
69301 @@ -1317,8 +1337,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
69302 ret = check_kill_permission(sig, info, p);
69303 rcu_read_unlock();
69304
69305 - if (!ret && sig)
69306 + if (!ret && sig) {
69307 ret = do_send_sig_info(sig, info, p, true);
69308 + if (!ret)
69309 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
69310 + }
69311
69312 return ret;
69313 }
69314 @@ -2829,7 +2852,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
69315 int error = -ESRCH;
69316
69317 rcu_read_lock();
69318 - p = find_task_by_vpid(pid);
69319 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
69320 + /* allow glibc communication via tgkill to other threads in our
69321 + thread group */
69322 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
69323 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
69324 + p = find_task_by_vpid_unrestricted(pid);
69325 + else
69326 +#endif
69327 + p = find_task_by_vpid(pid);
69328 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
69329 error = check_kill_permission(sig, info, p);
69330 /*
69331 diff --git a/kernel/smp.c b/kernel/smp.c
69332 index db197d6..17aef0b 100644
69333 --- a/kernel/smp.c
69334 +++ b/kernel/smp.c
69335 @@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
69336 }
69337 EXPORT_SYMBOL(smp_call_function);
69338
69339 -void ipi_call_lock(void)
69340 +void ipi_call_lock(void) __acquires(call_function.lock)
69341 {
69342 raw_spin_lock(&call_function.lock);
69343 }
69344
69345 -void ipi_call_unlock(void)
69346 +void ipi_call_unlock(void) __releases(call_function.lock)
69347 {
69348 raw_spin_unlock(&call_function.lock);
69349 }
69350
69351 -void ipi_call_lock_irq(void)
69352 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
69353 {
69354 raw_spin_lock_irq(&call_function.lock);
69355 }
69356
69357 -void ipi_call_unlock_irq(void)
69358 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
69359 {
69360 raw_spin_unlock_irq(&call_function.lock);
69361 }
69362 diff --git a/kernel/softirq.c b/kernel/softirq.c
69363 index 4eb3a0f..6f1fa81 100644
69364 --- a/kernel/softirq.c
69365 +++ b/kernel/softirq.c
69366 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
69367
69368 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
69369
69370 -char *softirq_to_name[NR_SOFTIRQS] = {
69371 +const char * const softirq_to_name[NR_SOFTIRQS] = {
69372 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
69373 "TASKLET", "SCHED", "HRTIMER", "RCU"
69374 };
69375 @@ -235,7 +235,7 @@ restart:
69376 kstat_incr_softirqs_this_cpu(vec_nr);
69377
69378 trace_softirq_entry(vec_nr);
69379 - h->action(h);
69380 + h->action();
69381 trace_softirq_exit(vec_nr);
69382 if (unlikely(prev_count != preempt_count())) {
69383 printk(KERN_ERR "huh, entered softirq %u %s %p"
69384 @@ -385,9 +385,11 @@ void raise_softirq(unsigned int nr)
69385 local_irq_restore(flags);
69386 }
69387
69388 -void open_softirq(int nr, void (*action)(struct softirq_action *))
69389 +void open_softirq(int nr, void (*action)(void))
69390 {
69391 - softirq_vec[nr].action = action;
69392 + pax_open_kernel();
69393 + *(void **)&softirq_vec[nr].action = action;
69394 + pax_close_kernel();
69395 }
69396
69397 /*
69398 @@ -441,7 +443,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
69399
69400 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
69401
69402 -static void tasklet_action(struct softirq_action *a)
69403 +static void tasklet_action(void)
69404 {
69405 struct tasklet_struct *list;
69406
69407 @@ -476,7 +478,7 @@ static void tasklet_action(struct softirq_action *a)
69408 }
69409 }
69410
69411 -static void tasklet_hi_action(struct softirq_action *a)
69412 +static void tasklet_hi_action(void)
69413 {
69414 struct tasklet_struct *list;
69415
69416 diff --git a/kernel/sys.c b/kernel/sys.c
69417 index 888d227..f04b318 100644
69418 --- a/kernel/sys.c
69419 +++ b/kernel/sys.c
69420 @@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
69421 error = -EACCES;
69422 goto out;
69423 }
69424 +
69425 + if (gr_handle_chroot_setpriority(p, niceval)) {
69426 + error = -EACCES;
69427 + goto out;
69428 + }
69429 +
69430 no_nice = security_task_setnice(p, niceval);
69431 if (no_nice) {
69432 error = no_nice;
69433 @@ -572,6 +578,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
69434 goto error;
69435 }
69436
69437 + if (gr_check_group_change(new->gid, new->egid, -1))
69438 + goto error;
69439 +
69440 if (rgid != (gid_t) -1 ||
69441 (egid != (gid_t) -1 && egid != old->gid))
69442 new->sgid = new->egid;
69443 @@ -601,6 +610,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
69444 old = current_cred();
69445
69446 retval = -EPERM;
69447 +
69448 + if (gr_check_group_change(gid, gid, gid))
69449 + goto error;
69450 +
69451 if (nsown_capable(CAP_SETGID))
69452 new->gid = new->egid = new->sgid = new->fsgid = gid;
69453 else if (gid == old->gid || gid == old->sgid)
69454 @@ -618,7 +631,7 @@ error:
69455 /*
69456 * change the user struct in a credentials set to match the new UID
69457 */
69458 -static int set_user(struct cred *new)
69459 +int set_user(struct cred *new)
69460 {
69461 struct user_struct *new_user;
69462
69463 @@ -688,6 +701,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
69464 goto error;
69465 }
69466
69467 + if (gr_check_user_change(new->uid, new->euid, -1))
69468 + goto error;
69469 +
69470 if (new->uid != old->uid) {
69471 retval = set_user(new);
69472 if (retval < 0)
69473 @@ -732,6 +748,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
69474 old = current_cred();
69475
69476 retval = -EPERM;
69477 +
69478 + if (gr_check_crash_uid(uid))
69479 + goto error;
69480 + if (gr_check_user_change(uid, uid, uid))
69481 + goto error;
69482 +
69483 if (nsown_capable(CAP_SETUID)) {
69484 new->suid = new->uid = uid;
69485 if (uid != old->uid) {
69486 @@ -786,6 +808,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
69487 goto error;
69488 }
69489
69490 + if (gr_check_user_change(ruid, euid, -1))
69491 + goto error;
69492 +
69493 if (ruid != (uid_t) -1) {
69494 new->uid = ruid;
69495 if (ruid != old->uid) {
69496 @@ -850,6 +875,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
69497 goto error;
69498 }
69499
69500 + if (gr_check_group_change(rgid, egid, -1))
69501 + goto error;
69502 +
69503 if (rgid != (gid_t) -1)
69504 new->gid = rgid;
69505 if (egid != (gid_t) -1)
69506 @@ -896,6 +924,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
69507 old = current_cred();
69508 old_fsuid = old->fsuid;
69509
69510 + if (gr_check_user_change(-1, -1, uid))
69511 + goto error;
69512 +
69513 if (uid == old->uid || uid == old->euid ||
69514 uid == old->suid || uid == old->fsuid ||
69515 nsown_capable(CAP_SETUID)) {
69516 @@ -906,6 +937,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
69517 }
69518 }
69519
69520 +error:
69521 abort_creds(new);
69522 return old_fsuid;
69523
69524 @@ -932,12 +964,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
69525 if (gid == old->gid || gid == old->egid ||
69526 gid == old->sgid || gid == old->fsgid ||
69527 nsown_capable(CAP_SETGID)) {
69528 + if (gr_check_group_change(-1, -1, gid))
69529 + goto error;
69530 +
69531 if (gid != old_fsgid) {
69532 new->fsgid = gid;
69533 goto change_okay;
69534 }
69535 }
69536
69537 +error:
69538 abort_creds(new);
69539 return old_fsgid;
69540
69541 @@ -1189,7 +1225,10 @@ static int override_release(char __user *release, int len)
69542 }
69543 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
69544 snprintf(buf, len, "2.6.%u%s", v, rest);
69545 - ret = copy_to_user(release, buf, len);
69546 + if (len > sizeof(buf))
69547 + ret = -EFAULT;
69548 + else
69549 + ret = copy_to_user(release, buf, len);
69550 }
69551 return ret;
69552 }
69553 @@ -1243,19 +1282,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
69554 return -EFAULT;
69555
69556 down_read(&uts_sem);
69557 - error = __copy_to_user(&name->sysname, &utsname()->sysname,
69558 + error = __copy_to_user(name->sysname, &utsname()->sysname,
69559 __OLD_UTS_LEN);
69560 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
69561 - error |= __copy_to_user(&name->nodename, &utsname()->nodename,
69562 + error |= __copy_to_user(name->nodename, &utsname()->nodename,
69563 __OLD_UTS_LEN);
69564 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
69565 - error |= __copy_to_user(&name->release, &utsname()->release,
69566 + error |= __copy_to_user(name->release, &utsname()->release,
69567 __OLD_UTS_LEN);
69568 error |= __put_user(0, name->release + __OLD_UTS_LEN);
69569 - error |= __copy_to_user(&name->version, &utsname()->version,
69570 + error |= __copy_to_user(name->version, &utsname()->version,
69571 __OLD_UTS_LEN);
69572 error |= __put_user(0, name->version + __OLD_UTS_LEN);
69573 - error |= __copy_to_user(&name->machine, &utsname()->machine,
69574 + error |= __copy_to_user(name->machine, &utsname()->machine,
69575 __OLD_UTS_LEN);
69576 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
69577 up_read(&uts_sem);
69578 @@ -1838,7 +1877,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
69579 error = get_dumpable(me->mm);
69580 break;
69581 case PR_SET_DUMPABLE:
69582 - if (arg2 < 0 || arg2 > 1) {
69583 + if (arg2 > 1) {
69584 error = -EINVAL;
69585 break;
69586 }
69587 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
69588 index f03a6ef..5fcc8af 100644
69589 --- a/kernel/sysctl.c
69590 +++ b/kernel/sysctl.c
69591 @@ -86,6 +86,13 @@
69592
69593
69594 #if defined(CONFIG_SYSCTL)
69595 +#include <linux/grsecurity.h>
69596 +#include <linux/grinternal.h>
69597 +
69598 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
69599 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
69600 + const int op);
69601 +extern int gr_handle_chroot_sysctl(const int op);
69602
69603 /* External variables not in a header file. */
69604 extern int sysctl_overcommit_memory;
69605 @@ -191,6 +198,7 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
69606 }
69607
69608 #endif
69609 +extern struct ctl_table grsecurity_table[];
69610
69611 static struct ctl_table root_table[];
69612 static struct ctl_table_root sysctl_table_root;
69613 @@ -220,6 +228,20 @@ extern struct ctl_table epoll_table[];
69614 int sysctl_legacy_va_layout;
69615 #endif
69616
69617 +#ifdef CONFIG_PAX_SOFTMODE
69618 +static ctl_table pax_table[] = {
69619 + {
69620 + .procname = "softmode",
69621 + .data = &pax_softmode,
69622 + .maxlen = sizeof(unsigned int),
69623 + .mode = 0600,
69624 + .proc_handler = &proc_dointvec,
69625 + },
69626 +
69627 + { }
69628 +};
69629 +#endif
69630 +
69631 /* The default sysctl tables: */
69632
69633 static struct ctl_table root_table[] = {
69634 @@ -266,6 +288,22 @@ static int max_extfrag_threshold = 1000;
69635 #endif
69636
69637 static struct ctl_table kern_table[] = {
69638 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
69639 + {
69640 + .procname = "grsecurity",
69641 + .mode = 0500,
69642 + .child = grsecurity_table,
69643 + },
69644 +#endif
69645 +
69646 +#ifdef CONFIG_PAX_SOFTMODE
69647 + {
69648 + .procname = "pax",
69649 + .mode = 0500,
69650 + .child = pax_table,
69651 + },
69652 +#endif
69653 +
69654 {
69655 .procname = "sched_child_runs_first",
69656 .data = &sysctl_sched_child_runs_first,
69657 @@ -550,7 +588,7 @@ static struct ctl_table kern_table[] = {
69658 .data = &modprobe_path,
69659 .maxlen = KMOD_PATH_LEN,
69660 .mode = 0644,
69661 - .proc_handler = proc_dostring,
69662 + .proc_handler = proc_dostring_modpriv,
69663 },
69664 {
69665 .procname = "modules_disabled",
69666 @@ -717,16 +755,20 @@ static struct ctl_table kern_table[] = {
69667 .extra1 = &zero,
69668 .extra2 = &one,
69669 },
69670 +#endif
69671 {
69672 .procname = "kptr_restrict",
69673 .data = &kptr_restrict,
69674 .maxlen = sizeof(int),
69675 .mode = 0644,
69676 .proc_handler = proc_dointvec_minmax_sysadmin,
69677 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69678 + .extra1 = &two,
69679 +#else
69680 .extra1 = &zero,
69681 +#endif
69682 .extra2 = &two,
69683 },
69684 -#endif
69685 {
69686 .procname = "ngroups_max",
69687 .data = &ngroups_max,
69688 @@ -1225,6 +1267,13 @@ static struct ctl_table vm_table[] = {
69689 .proc_handler = proc_dointvec_minmax,
69690 .extra1 = &zero,
69691 },
69692 + {
69693 + .procname = "heap_stack_gap",
69694 + .data = &sysctl_heap_stack_gap,
69695 + .maxlen = sizeof(sysctl_heap_stack_gap),
69696 + .mode = 0644,
69697 + .proc_handler = proc_doulongvec_minmax,
69698 + },
69699 #else
69700 {
69701 .procname = "nr_trim_pages",
69702 @@ -1729,6 +1778,17 @@ static int test_perm(int mode, int op)
69703 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
69704 {
69705 int mode;
69706 + int error;
69707 +
69708 + if (table->parent != NULL && table->parent->procname != NULL &&
69709 + table->procname != NULL &&
69710 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
69711 + return -EACCES;
69712 + if (gr_handle_chroot_sysctl(op))
69713 + return -EACCES;
69714 + error = gr_handle_sysctl(table, op);
69715 + if (error)
69716 + return error;
69717
69718 if (root->permissions)
69719 mode = root->permissions(root, current->nsproxy, table);
69720 @@ -2133,6 +2193,16 @@ int proc_dostring(struct ctl_table *table, int write,
69721 buffer, lenp, ppos);
69722 }
69723
69724 +int proc_dostring_modpriv(struct ctl_table *table, int write,
69725 + void __user *buffer, size_t *lenp, loff_t *ppos)
69726 +{
69727 + if (write && !capable(CAP_SYS_MODULE))
69728 + return -EPERM;
69729 +
69730 + return _proc_do_string(table->data, table->maxlen, write,
69731 + buffer, lenp, ppos);
69732 +}
69733 +
69734 static size_t proc_skip_spaces(char **buf)
69735 {
69736 size_t ret;
69737 @@ -2238,6 +2308,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
69738 len = strlen(tmp);
69739 if (len > *size)
69740 len = *size;
69741 + if (len > sizeof(tmp))
69742 + len = sizeof(tmp);
69743 if (copy_to_user(*buf, tmp, len))
69744 return -EFAULT;
69745 *size -= len;
69746 @@ -2554,8 +2626,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
69747 *i = val;
69748 } else {
69749 val = convdiv * (*i) / convmul;
69750 - if (!first)
69751 + if (!first) {
69752 err = proc_put_char(&buffer, &left, '\t');
69753 + if (err)
69754 + break;
69755 + }
69756 err = proc_put_long(&buffer, &left, val, false);
69757 if (err)
69758 break;
69759 @@ -2950,6 +3025,12 @@ int proc_dostring(struct ctl_table *table, int write,
69760 return -ENOSYS;
69761 }
69762
69763 +int proc_dostring_modpriv(struct ctl_table *table, int write,
69764 + void __user *buffer, size_t *lenp, loff_t *ppos)
69765 +{
69766 + return -ENOSYS;
69767 +}
69768 +
69769 int proc_dointvec(struct ctl_table *table, int write,
69770 void __user *buffer, size_t *lenp, loff_t *ppos)
69771 {
69772 @@ -3006,6 +3087,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
69773 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
69774 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
69775 EXPORT_SYMBOL(proc_dostring);
69776 +EXPORT_SYMBOL(proc_dostring_modpriv);
69777 EXPORT_SYMBOL(proc_doulongvec_minmax);
69778 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
69779 EXPORT_SYMBOL(register_sysctl_table);
69780 diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
69781 index a650694..aaeeb20 100644
69782 --- a/kernel/sysctl_binary.c
69783 +++ b/kernel/sysctl_binary.c
69784 @@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
69785 int i;
69786
69787 set_fs(KERNEL_DS);
69788 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
69789 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
69790 set_fs(old_fs);
69791 if (result < 0)
69792 goto out_kfree;
69793 @@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
69794 }
69795
69796 set_fs(KERNEL_DS);
69797 - result = vfs_write(file, buffer, str - buffer, &pos);
69798 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
69799 set_fs(old_fs);
69800 if (result < 0)
69801 goto out_kfree;
69802 @@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
69803 int i;
69804
69805 set_fs(KERNEL_DS);
69806 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
69807 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
69808 set_fs(old_fs);
69809 if (result < 0)
69810 goto out_kfree;
69811 @@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
69812 }
69813
69814 set_fs(KERNEL_DS);
69815 - result = vfs_write(file, buffer, str - buffer, &pos);
69816 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
69817 set_fs(old_fs);
69818 if (result < 0)
69819 goto out_kfree;
69820 @@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
69821 int i;
69822
69823 set_fs(KERNEL_DS);
69824 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
69825 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
69826 set_fs(old_fs);
69827 if (result < 0)
69828 goto out;
69829 @@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
69830 __le16 dnaddr;
69831
69832 set_fs(KERNEL_DS);
69833 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
69834 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
69835 set_fs(old_fs);
69836 if (result < 0)
69837 goto out;
69838 @@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
69839 le16_to_cpu(dnaddr) & 0x3ff);
69840
69841 set_fs(KERNEL_DS);
69842 - result = vfs_write(file, buf, len, &pos);
69843 + result = vfs_write(file, (const char __force_user *)buf, len, &pos);
69844 set_fs(old_fs);
69845 if (result < 0)
69846 goto out;
69847 diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
69848 index 362da65..ab8ef8c 100644
69849 --- a/kernel/sysctl_check.c
69850 +++ b/kernel/sysctl_check.c
69851 @@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
69852 set_fail(&fail, table, "Directory with extra2");
69853 } else {
69854 if ((table->proc_handler == proc_dostring) ||
69855 + (table->proc_handler == proc_dostring_modpriv) ||
69856 (table->proc_handler == proc_dointvec) ||
69857 (table->proc_handler == proc_dointvec_minmax) ||
69858 (table->proc_handler == proc_dointvec_jiffies) ||
69859 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
69860 index e660464..c8b9e67 100644
69861 --- a/kernel/taskstats.c
69862 +++ b/kernel/taskstats.c
69863 @@ -27,9 +27,12 @@
69864 #include <linux/cgroup.h>
69865 #include <linux/fs.h>
69866 #include <linux/file.h>
69867 +#include <linux/grsecurity.h>
69868 #include <net/genetlink.h>
69869 #include <linux/atomic.h>
69870
69871 +extern int gr_is_taskstats_denied(int pid);
69872 +
69873 /*
69874 * Maximum length of a cpumask that can be specified in
69875 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
69876 @@ -556,6 +559,9 @@ err:
69877
69878 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
69879 {
69880 + if (gr_is_taskstats_denied(current->pid))
69881 + return -EACCES;
69882 +
69883 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
69884 return cmd_attr_register_cpumask(info);
69885 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
69886 diff --git a/kernel/time.c b/kernel/time.c
69887 index 73e416d..cfc6f69 100644
69888 --- a/kernel/time.c
69889 +++ b/kernel/time.c
69890 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
69891 return error;
69892
69893 if (tz) {
69894 + /* we log in do_settimeofday called below, so don't log twice
69895 + */
69896 + if (!tv)
69897 + gr_log_timechange();
69898 +
69899 /* SMP safe, global irq locking makes it work. */
69900 sys_tz = *tz;
69901 update_vsyscall_tz();
69902 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
69903 index 8a46f5d..bbe6f9c 100644
69904 --- a/kernel/time/alarmtimer.c
69905 +++ b/kernel/time/alarmtimer.c
69906 @@ -773,7 +773,7 @@ static int __init alarmtimer_init(void)
69907 struct platform_device *pdev;
69908 int error = 0;
69909 int i;
69910 - struct k_clock alarm_clock = {
69911 + static struct k_clock alarm_clock = {
69912 .clock_getres = alarm_clock_getres,
69913 .clock_get = alarm_clock_get,
69914 .timer_create = alarm_timer_create,
69915 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
69916 index fd4a7b1..fae5c2a 100644
69917 --- a/kernel/time/tick-broadcast.c
69918 +++ b/kernel/time/tick-broadcast.c
69919 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
69920 * then clear the broadcast bit.
69921 */
69922 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
69923 - int cpu = smp_processor_id();
69924 + cpu = smp_processor_id();
69925
69926 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
69927 tick_broadcast_clear_oneshot(cpu);
69928 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
69929 index 0c63581..e25dcb6 100644
69930 --- a/kernel/time/timekeeping.c
69931 +++ b/kernel/time/timekeeping.c
69932 @@ -14,6 +14,7 @@
69933 #include <linux/init.h>
69934 #include <linux/mm.h>
69935 #include <linux/sched.h>
69936 +#include <linux/grsecurity.h>
69937 #include <linux/syscore_ops.h>
69938 #include <linux/clocksource.h>
69939 #include <linux/jiffies.h>
69940 @@ -365,6 +366,8 @@ int do_settimeofday(const struct timespec *tv)
69941 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
69942 return -EINVAL;
69943
69944 + gr_log_timechange();
69945 +
69946 write_seqlock_irqsave(&xtime_lock, flags);
69947
69948 timekeeping_forward_now();
69949 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
69950 index 3258455..f35227d 100644
69951 --- a/kernel/time/timer_list.c
69952 +++ b/kernel/time/timer_list.c
69953 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
69954
69955 static void print_name_offset(struct seq_file *m, void *sym)
69956 {
69957 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69958 + SEQ_printf(m, "<%p>", NULL);
69959 +#else
69960 char symname[KSYM_NAME_LEN];
69961
69962 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
69963 SEQ_printf(m, "<%pK>", sym);
69964 else
69965 SEQ_printf(m, "%s", symname);
69966 +#endif
69967 }
69968
69969 static void
69970 @@ -112,7 +116,11 @@ next_one:
69971 static void
69972 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
69973 {
69974 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69975 + SEQ_printf(m, " .base: %p\n", NULL);
69976 +#else
69977 SEQ_printf(m, " .base: %pK\n", base);
69978 +#endif
69979 SEQ_printf(m, " .index: %d\n",
69980 base->index);
69981 SEQ_printf(m, " .resolution: %Lu nsecs\n",
69982 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
69983 {
69984 struct proc_dir_entry *pe;
69985
69986 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
69987 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
69988 +#else
69989 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
69990 +#endif
69991 if (!pe)
69992 return -ENOMEM;
69993 return 0;
69994 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
69995 index 0b537f2..9e71eca 100644
69996 --- a/kernel/time/timer_stats.c
69997 +++ b/kernel/time/timer_stats.c
69998 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
69999 static unsigned long nr_entries;
70000 static struct entry entries[MAX_ENTRIES];
70001
70002 -static atomic_t overflow_count;
70003 +static atomic_unchecked_t overflow_count;
70004
70005 /*
70006 * The entries are in a hash-table, for fast lookup:
70007 @@ -140,7 +140,7 @@ static void reset_entries(void)
70008 nr_entries = 0;
70009 memset(entries, 0, sizeof(entries));
70010 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
70011 - atomic_set(&overflow_count, 0);
70012 + atomic_set_unchecked(&overflow_count, 0);
70013 }
70014
70015 static struct entry *alloc_entry(void)
70016 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
70017 if (likely(entry))
70018 entry->count++;
70019 else
70020 - atomic_inc(&overflow_count);
70021 + atomic_inc_unchecked(&overflow_count);
70022
70023 out_unlock:
70024 raw_spin_unlock_irqrestore(lock, flags);
70025 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
70026
70027 static void print_name_offset(struct seq_file *m, unsigned long addr)
70028 {
70029 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70030 + seq_printf(m, "<%p>", NULL);
70031 +#else
70032 char symname[KSYM_NAME_LEN];
70033
70034 if (lookup_symbol_name(addr, symname) < 0)
70035 seq_printf(m, "<%p>", (void *)addr);
70036 else
70037 seq_printf(m, "%s", symname);
70038 +#endif
70039 }
70040
70041 static int tstats_show(struct seq_file *m, void *v)
70042 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
70043
70044 seq_puts(m, "Timer Stats Version: v0.2\n");
70045 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
70046 - if (atomic_read(&overflow_count))
70047 + if (atomic_read_unchecked(&overflow_count))
70048 seq_printf(m, "Overflow: %d entries\n",
70049 - atomic_read(&overflow_count));
70050 + atomic_read_unchecked(&overflow_count));
70051
70052 for (i = 0; i < nr_entries; i++) {
70053 entry = entries + i;
70054 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
70055 {
70056 struct proc_dir_entry *pe;
70057
70058 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
70059 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
70060 +#else
70061 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
70062 +#endif
70063 if (!pe)
70064 return -ENOMEM;
70065 return 0;
70066 diff --git a/kernel/timer.c b/kernel/timer.c
70067 index a297ffc..5e16b0b 100644
70068 --- a/kernel/timer.c
70069 +++ b/kernel/timer.c
70070 @@ -1354,7 +1354,7 @@ void update_process_times(int user_tick)
70071 /*
70072 * This function runs timers and the timer-tq in bottom half context.
70073 */
70074 -static void run_timer_softirq(struct softirq_action *h)
70075 +static void run_timer_softirq(void)
70076 {
70077 struct tvec_base *base = __this_cpu_read(tvec_bases);
70078
70079 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
70080 index cdea7b5..9b820d4 100644
70081 --- a/kernel/trace/blktrace.c
70082 +++ b/kernel/trace/blktrace.c
70083 @@ -324,7 +324,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
70084 struct blk_trace *bt = filp->private_data;
70085 char buf[16];
70086
70087 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
70088 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
70089
70090 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
70091 }
70092 @@ -389,7 +389,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
70093 return 1;
70094
70095 bt = buf->chan->private_data;
70096 - atomic_inc(&bt->dropped);
70097 + atomic_inc_unchecked(&bt->dropped);
70098 return 0;
70099 }
70100
70101 @@ -490,7 +490,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
70102
70103 bt->dir = dir;
70104 bt->dev = dev;
70105 - atomic_set(&bt->dropped, 0);
70106 + atomic_set_unchecked(&bt->dropped, 0);
70107
70108 ret = -EIO;
70109 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
70110 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
70111 index 683d559..d70d914 100644
70112 --- a/kernel/trace/ftrace.c
70113 +++ b/kernel/trace/ftrace.c
70114 @@ -1726,12 +1726,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
70115 if (unlikely(ftrace_disabled))
70116 return 0;
70117
70118 + ret = ftrace_arch_code_modify_prepare();
70119 + FTRACE_WARN_ON(ret);
70120 + if (ret)
70121 + return 0;
70122 +
70123 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
70124 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
70125 if (ret) {
70126 ftrace_bug(ret, ip);
70127 - return 0;
70128 }
70129 - return 1;
70130 + return ret ? 0 : 1;
70131 }
70132
70133 /*
70134 @@ -2843,7 +2848,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
70135
70136 int
70137 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
70138 - void *data)
70139 + void *data)
70140 {
70141 struct ftrace_func_probe *entry;
70142 struct ftrace_page *pg;
70143 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
70144 index c4579f1..6a439da 100644
70145 --- a/kernel/trace/trace.c
70146 +++ b/kernel/trace/trace.c
70147 @@ -4258,10 +4258,9 @@ static const struct file_operations tracing_dyn_info_fops = {
70148 };
70149 #endif
70150
70151 -static struct dentry *d_tracer;
70152 -
70153 struct dentry *tracing_init_dentry(void)
70154 {
70155 + static struct dentry *d_tracer;
70156 static int once;
70157
70158 if (d_tracer)
70159 @@ -4281,10 +4280,9 @@ struct dentry *tracing_init_dentry(void)
70160 return d_tracer;
70161 }
70162
70163 -static struct dentry *d_percpu;
70164 -
70165 struct dentry *tracing_dentry_percpu(void)
70166 {
70167 + static struct dentry *d_percpu;
70168 static int once;
70169 struct dentry *d_tracer;
70170
70171 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
70172 index c212a7f..7b02394 100644
70173 --- a/kernel/trace/trace_events.c
70174 +++ b/kernel/trace/trace_events.c
70175 @@ -1299,10 +1299,6 @@ static LIST_HEAD(ftrace_module_file_list);
70176 struct ftrace_module_file_ops {
70177 struct list_head list;
70178 struct module *mod;
70179 - struct file_operations id;
70180 - struct file_operations enable;
70181 - struct file_operations format;
70182 - struct file_operations filter;
70183 };
70184
70185 static struct ftrace_module_file_ops *
70186 @@ -1323,17 +1319,12 @@ trace_create_file_ops(struct module *mod)
70187
70188 file_ops->mod = mod;
70189
70190 - file_ops->id = ftrace_event_id_fops;
70191 - file_ops->id.owner = mod;
70192 -
70193 - file_ops->enable = ftrace_enable_fops;
70194 - file_ops->enable.owner = mod;
70195 -
70196 - file_ops->filter = ftrace_event_filter_fops;
70197 - file_ops->filter.owner = mod;
70198 -
70199 - file_ops->format = ftrace_event_format_fops;
70200 - file_ops->format.owner = mod;
70201 + pax_open_kernel();
70202 + *(void **)&mod->trace_id.owner = mod;
70203 + *(void **)&mod->trace_enable.owner = mod;
70204 + *(void **)&mod->trace_filter.owner = mod;
70205 + *(void **)&mod->trace_format.owner = mod;
70206 + pax_close_kernel();
70207
70208 list_add(&file_ops->list, &ftrace_module_file_list);
70209
70210 @@ -1357,8 +1348,8 @@ static void trace_module_add_events(struct module *mod)
70211
70212 for_each_event(call, start, end) {
70213 __trace_add_event_call(*call, mod,
70214 - &file_ops->id, &file_ops->enable,
70215 - &file_ops->filter, &file_ops->format);
70216 + &mod->trace_id, &mod->trace_enable,
70217 + &mod->trace_filter, &mod->trace_format);
70218 }
70219 }
70220
70221 diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
70222 index 00d527c..7c5b1a3 100644
70223 --- a/kernel/trace/trace_kprobe.c
70224 +++ b/kernel/trace/trace_kprobe.c
70225 @@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
70226 long ret;
70227 int maxlen = get_rloc_len(*(u32 *)dest);
70228 u8 *dst = get_rloc_data(dest);
70229 - u8 *src = addr;
70230 + const u8 __user *src = (const u8 __force_user *)addr;
70231 mm_segment_t old_fs = get_fs();
70232 if (!maxlen)
70233 return;
70234 @@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
70235 pagefault_disable();
70236 do
70237 ret = __copy_from_user_inatomic(dst++, src++, 1);
70238 - while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
70239 + while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
70240 dst[-1] = '\0';
70241 pagefault_enable();
70242 set_fs(old_fs);
70243 @@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
70244 ((u8 *)get_rloc_data(dest))[0] = '\0';
70245 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
70246 } else
70247 - *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
70248 + *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
70249 get_rloc_offs(*(u32 *)dest));
70250 }
70251 /* Return the length of string -- including null terminal byte */
70252 @@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
70253 set_fs(KERNEL_DS);
70254 pagefault_disable();
70255 do {
70256 - ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
70257 + ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
70258 len++;
70259 } while (c && ret == 0 && len < MAX_STRING_SIZE);
70260 pagefault_enable();
70261 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
70262 index fd3c8aa..5f324a6 100644
70263 --- a/kernel/trace/trace_mmiotrace.c
70264 +++ b/kernel/trace/trace_mmiotrace.c
70265 @@ -24,7 +24,7 @@ struct header_iter {
70266 static struct trace_array *mmio_trace_array;
70267 static bool overrun_detected;
70268 static unsigned long prev_overruns;
70269 -static atomic_t dropped_count;
70270 +static atomic_unchecked_t dropped_count;
70271
70272 static void mmio_reset_data(struct trace_array *tr)
70273 {
70274 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
70275
70276 static unsigned long count_overruns(struct trace_iterator *iter)
70277 {
70278 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
70279 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
70280 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
70281
70282 if (over > prev_overruns)
70283 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
70284 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
70285 sizeof(*entry), 0, pc);
70286 if (!event) {
70287 - atomic_inc(&dropped_count);
70288 + atomic_inc_unchecked(&dropped_count);
70289 return;
70290 }
70291 entry = ring_buffer_event_data(event);
70292 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
70293 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
70294 sizeof(*entry), 0, pc);
70295 if (!event) {
70296 - atomic_inc(&dropped_count);
70297 + atomic_inc_unchecked(&dropped_count);
70298 return;
70299 }
70300 entry = ring_buffer_event_data(event);
70301 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
70302 index d9c07f0..c1eeceb 100644
70303 --- a/kernel/trace/trace_output.c
70304 +++ b/kernel/trace/trace_output.c
70305 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
70306
70307 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
70308 if (!IS_ERR(p)) {
70309 - p = mangle_path(s->buffer + s->len, p, "\n");
70310 + p = mangle_path(s->buffer + s->len, p, "\n\\");
70311 if (p) {
70312 s->len = p - s->buffer;
70313 return 1;
70314 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
70315 index d4545f4..a9010a1 100644
70316 --- a/kernel/trace/trace_stack.c
70317 +++ b/kernel/trace/trace_stack.c
70318 @@ -53,7 +53,7 @@ static inline void check_stack(void)
70319 return;
70320
70321 /* we do not handle interrupt stacks yet */
70322 - if (!object_is_on_stack(&this_size))
70323 + if (!object_starts_on_stack(&this_size))
70324 return;
70325
70326 local_irq_save(flags);
70327 diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
70328 index 209b379..7f76423 100644
70329 --- a/kernel/trace/trace_workqueue.c
70330 +++ b/kernel/trace/trace_workqueue.c
70331 @@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
70332 int cpu;
70333 pid_t pid;
70334 /* Can be inserted from interrupt or user context, need to be atomic */
70335 - atomic_t inserted;
70336 + atomic_unchecked_t inserted;
70337 /*
70338 * Don't need to be atomic, works are serialized in a single workqueue thread
70339 * on a single CPU.
70340 @@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
70341 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
70342 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
70343 if (node->pid == wq_thread->pid) {
70344 - atomic_inc(&node->inserted);
70345 + atomic_inc_unchecked(&node->inserted);
70346 goto found;
70347 }
70348 }
70349 @@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
70350 tsk = get_pid_task(pid, PIDTYPE_PID);
70351 if (tsk) {
70352 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
70353 - atomic_read(&cws->inserted), cws->executed,
70354 + atomic_read_unchecked(&cws->inserted), cws->executed,
70355 tsk->comm);
70356 put_task_struct(tsk);
70357 }
70358 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
70359 index 8745ac7..d144e37 100644
70360 --- a/lib/Kconfig.debug
70361 +++ b/lib/Kconfig.debug
70362 @@ -1103,6 +1103,7 @@ config LATENCYTOP
70363 depends on DEBUG_KERNEL
70364 depends on STACKTRACE_SUPPORT
70365 depends on PROC_FS
70366 + depends on !GRKERNSEC_HIDESYM
70367 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
70368 select KALLSYMS
70369 select KALLSYMS_ALL
70370 diff --git a/lib/bitmap.c b/lib/bitmap.c
70371 index 0d4a127..33a06c7 100644
70372 --- a/lib/bitmap.c
70373 +++ b/lib/bitmap.c
70374 @@ -419,7 +419,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
70375 {
70376 int c, old_c, totaldigits, ndigits, nchunks, nbits;
70377 u32 chunk;
70378 - const char __user __force *ubuf = (const char __user __force *)buf;
70379 + const char __user *ubuf = (const char __force_user *)buf;
70380
70381 bitmap_zero(maskp, nmaskbits);
70382
70383 @@ -504,7 +504,7 @@ int bitmap_parse_user(const char __user *ubuf,
70384 {
70385 if (!access_ok(VERIFY_READ, ubuf, ulen))
70386 return -EFAULT;
70387 - return __bitmap_parse((const char __force *)ubuf,
70388 + return __bitmap_parse((const char __force_kernel *)ubuf,
70389 ulen, 1, maskp, nmaskbits);
70390
70391 }
70392 @@ -596,7 +596,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
70393 {
70394 unsigned a, b;
70395 int c, old_c, totaldigits;
70396 - const char __user __force *ubuf = (const char __user __force *)buf;
70397 + const char __user *ubuf = (const char __force_user *)buf;
70398 int exp_digit, in_range;
70399
70400 totaldigits = c = 0;
70401 @@ -696,7 +696,7 @@ int bitmap_parselist_user(const char __user *ubuf,
70402 {
70403 if (!access_ok(VERIFY_READ, ubuf, ulen))
70404 return -EFAULT;
70405 - return __bitmap_parselist((const char __force *)ubuf,
70406 + return __bitmap_parselist((const char __force_kernel *)ubuf,
70407 ulen, 1, maskp, nmaskbits);
70408 }
70409 EXPORT_SYMBOL(bitmap_parselist_user);
70410 diff --git a/lib/bug.c b/lib/bug.c
70411 index a28c141..2bd3d95 100644
70412 --- a/lib/bug.c
70413 +++ b/lib/bug.c
70414 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
70415 return BUG_TRAP_TYPE_NONE;
70416
70417 bug = find_bug(bugaddr);
70418 + if (!bug)
70419 + return BUG_TRAP_TYPE_NONE;
70420
70421 file = NULL;
70422 line = 0;
70423 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
70424 index 0ab9ae8..f01ceca 100644
70425 --- a/lib/debugobjects.c
70426 +++ b/lib/debugobjects.c
70427 @@ -288,7 +288,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
70428 if (limit > 4)
70429 return;
70430
70431 - is_on_stack = object_is_on_stack(addr);
70432 + is_on_stack = object_starts_on_stack(addr);
70433 if (is_on_stack == onstack)
70434 return;
70435
70436 diff --git a/lib/devres.c b/lib/devres.c
70437 index 9676617..5149e15 100644
70438 --- a/lib/devres.c
70439 +++ b/lib/devres.c
70440 @@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
70441 void devm_iounmap(struct device *dev, void __iomem *addr)
70442 {
70443 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
70444 - (void *)addr));
70445 + (void __force *)addr));
70446 iounmap(addr);
70447 }
70448 EXPORT_SYMBOL(devm_iounmap);
70449 @@ -192,7 +192,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
70450 {
70451 ioport_unmap(addr);
70452 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
70453 - devm_ioport_map_match, (void *)addr));
70454 + devm_ioport_map_match, (void __force *)addr));
70455 }
70456 EXPORT_SYMBOL(devm_ioport_unmap);
70457
70458 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
70459 index fea790a..ebb0e82 100644
70460 --- a/lib/dma-debug.c
70461 +++ b/lib/dma-debug.c
70462 @@ -925,7 +925,7 @@ out:
70463
70464 static void check_for_stack(struct device *dev, void *addr)
70465 {
70466 - if (object_is_on_stack(addr))
70467 + if (object_starts_on_stack(addr))
70468 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
70469 "stack [addr=%p]\n", addr);
70470 }
70471 diff --git a/lib/extable.c b/lib/extable.c
70472 index 4cac81e..63e9b8f 100644
70473 --- a/lib/extable.c
70474 +++ b/lib/extable.c
70475 @@ -13,6 +13,7 @@
70476 #include <linux/init.h>
70477 #include <linux/sort.h>
70478 #include <asm/uaccess.h>
70479 +#include <asm/pgtable.h>
70480
70481 #ifndef ARCH_HAS_SORT_EXTABLE
70482 /*
70483 @@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
70484 void sort_extable(struct exception_table_entry *start,
70485 struct exception_table_entry *finish)
70486 {
70487 + pax_open_kernel();
70488 sort(start, finish - start, sizeof(struct exception_table_entry),
70489 cmp_ex, NULL);
70490 + pax_close_kernel();
70491 }
70492
70493 #ifdef CONFIG_MODULES
70494 diff --git a/lib/inflate.c b/lib/inflate.c
70495 index 013a761..c28f3fc 100644
70496 --- a/lib/inflate.c
70497 +++ b/lib/inflate.c
70498 @@ -269,7 +269,7 @@ static void free(void *where)
70499 malloc_ptr = free_mem_ptr;
70500 }
70501 #else
70502 -#define malloc(a) kmalloc(a, GFP_KERNEL)
70503 +#define malloc(a) kmalloc((a), GFP_KERNEL)
70504 #define free(a) kfree(a)
70505 #endif
70506
70507 diff --git a/lib/ioremap.c b/lib/ioremap.c
70508 index da4e2ad..6373b5f 100644
70509 --- a/lib/ioremap.c
70510 +++ b/lib/ioremap.c
70511 @@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
70512 unsigned long next;
70513
70514 phys_addr -= addr;
70515 - pmd = pmd_alloc(&init_mm, pud, addr);
70516 + pmd = pmd_alloc_kernel(&init_mm, pud, addr);
70517 if (!pmd)
70518 return -ENOMEM;
70519 do {
70520 @@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
70521 unsigned long next;
70522
70523 phys_addr -= addr;
70524 - pud = pud_alloc(&init_mm, pgd, addr);
70525 + pud = pud_alloc_kernel(&init_mm, pgd, addr);
70526 if (!pud)
70527 return -ENOMEM;
70528 do {
70529 diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
70530 index bd2bea9..6b3c95e 100644
70531 --- a/lib/is_single_threaded.c
70532 +++ b/lib/is_single_threaded.c
70533 @@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
70534 struct task_struct *p, *t;
70535 bool ret;
70536
70537 + if (!mm)
70538 + return true;
70539 +
70540 if (atomic_read(&task->signal->live) != 1)
70541 return false;
70542
70543 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
70544 index dc63d08..95ae14a 100644
70545 --- a/lib/radix-tree.c
70546 +++ b/lib/radix-tree.c
70547 @@ -78,7 +78,7 @@ struct radix_tree_preload {
70548 int nr;
70549 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
70550 };
70551 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
70552 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
70553
70554 static inline void *ptr_to_indirect(void *ptr)
70555 {
70556 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
70557 index 38e612e..4fb99a8 100644
70558 --- a/lib/vsprintf.c
70559 +++ b/lib/vsprintf.c
70560 @@ -16,6 +16,9 @@
70561 * - scnprintf and vscnprintf
70562 */
70563
70564 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70565 +#define __INCLUDED_BY_HIDESYM 1
70566 +#endif
70567 #include <stdarg.h>
70568 #include <linux/module.h>
70569 #include <linux/types.h>
70570 @@ -413,7 +416,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
70571 char sym[KSYM_SYMBOL_LEN];
70572 if (ext == 'B')
70573 sprint_backtrace(sym, value);
70574 - else if (ext != 'f' && ext != 's')
70575 + else if (ext != 'f' && ext != 's' && ext != 'a')
70576 sprint_symbol(sym, value);
70577 else
70578 kallsyms_lookup(value, NULL, NULL, NULL, sym);
70579 @@ -789,7 +792,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
70580 return number(buf, end, *(const netdev_features_t *)addr, spec);
70581 }
70582
70583 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70584 +int kptr_restrict __read_mostly = 2;
70585 +#else
70586 int kptr_restrict __read_mostly;
70587 +#endif
70588
70589 /*
70590 * Show a '%p' thing. A kernel extension is that the '%p' is followed
70591 @@ -803,6 +810,8 @@ int kptr_restrict __read_mostly;
70592 * - 'S' For symbolic direct pointers with offset
70593 * - 's' For symbolic direct pointers without offset
70594 * - 'B' For backtraced symbolic direct pointers with offset
70595 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
70596 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
70597 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
70598 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
70599 * - 'M' For a 6-byte MAC address, it prints the address in the
70600 @@ -848,12 +857,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
70601 {
70602 if (!ptr && *fmt != 'K') {
70603 /*
70604 - * Print (null) with the same width as a pointer so it makes
70605 + * Print (nil) with the same width as a pointer so it makes
70606 * tabular output look nice.
70607 */
70608 if (spec.field_width == -1)
70609 spec.field_width = 2 * sizeof(void *);
70610 - return string(buf, end, "(null)", spec);
70611 + return string(buf, end, "(nil)", spec);
70612 }
70613
70614 switch (*fmt) {
70615 @@ -863,6 +872,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
70616 /* Fallthrough */
70617 case 'S':
70618 case 's':
70619 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70620 + break;
70621 +#else
70622 + return symbol_string(buf, end, ptr, spec, *fmt);
70623 +#endif
70624 + case 'A':
70625 + case 'a':
70626 case 'B':
70627 return symbol_string(buf, end, ptr, spec, *fmt);
70628 case 'R':
70629 @@ -1633,11 +1649,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
70630 typeof(type) value; \
70631 if (sizeof(type) == 8) { \
70632 args = PTR_ALIGN(args, sizeof(u32)); \
70633 - *(u32 *)&value = *(u32 *)args; \
70634 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
70635 + *(u32 *)&value = *(const u32 *)args; \
70636 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
70637 } else { \
70638 args = PTR_ALIGN(args, sizeof(type)); \
70639 - value = *(typeof(type) *)args; \
70640 + value = *(const typeof(type) *)args; \
70641 } \
70642 args += sizeof(type); \
70643 value; \
70644 @@ -1700,7 +1716,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
70645 case FORMAT_TYPE_STR: {
70646 const char *str_arg = args;
70647 args += strlen(str_arg) + 1;
70648 - str = string(str, end, (char *)str_arg, spec);
70649 + str = string(str, end, str_arg, spec);
70650 break;
70651 }
70652
70653 diff --git a/localversion-grsec b/localversion-grsec
70654 new file mode 100644
70655 index 0000000..7cd6065
70656 --- /dev/null
70657 +++ b/localversion-grsec
70658 @@ -0,0 +1 @@
70659 +-grsec
70660 diff --git a/mm/Kconfig b/mm/Kconfig
70661 index e338407..49b5b7a 100644
70662 --- a/mm/Kconfig
70663 +++ b/mm/Kconfig
70664 @@ -247,10 +247,10 @@ config KSM
70665 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
70666
70667 config DEFAULT_MMAP_MIN_ADDR
70668 - int "Low address space to protect from user allocation"
70669 + int "Low address space to protect from user allocation"
70670 depends on MMU
70671 - default 4096
70672 - help
70673 + default 65536
70674 + help
70675 This is the portion of low virtual memory which should be protected
70676 from userspace allocation. Keeping a user from writing to low pages
70677 can help reduce the impact of kernel NULL pointer bugs.
70678 diff --git a/mm/filemap.c b/mm/filemap.c
70679 index b662757..3081ddd 100644
70680 --- a/mm/filemap.c
70681 +++ b/mm/filemap.c
70682 @@ -1770,7 +1770,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
70683 struct address_space *mapping = file->f_mapping;
70684
70685 if (!mapping->a_ops->readpage)
70686 - return -ENOEXEC;
70687 + return -ENODEV;
70688 file_accessed(file);
70689 vma->vm_ops = &generic_file_vm_ops;
70690 vma->vm_flags |= VM_CAN_NONLINEAR;
70691 @@ -2176,6 +2176,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
70692 *pos = i_size_read(inode);
70693
70694 if (limit != RLIM_INFINITY) {
70695 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
70696 if (*pos >= limit) {
70697 send_sig(SIGXFSZ, current, 0);
70698 return -EFBIG;
70699 diff --git a/mm/fremap.c b/mm/fremap.c
70700 index 9ed4fd4..c42648d 100644
70701 --- a/mm/fremap.c
70702 +++ b/mm/fremap.c
70703 @@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
70704 retry:
70705 vma = find_vma(mm, start);
70706
70707 +#ifdef CONFIG_PAX_SEGMEXEC
70708 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
70709 + goto out;
70710 +#endif
70711 +
70712 /*
70713 * Make sure the vma is shared, that it supports prefaulting,
70714 * and that the remapped range is valid and fully within
70715 diff --git a/mm/highmem.c b/mm/highmem.c
70716 index 57d82c6..e9e0552 100644
70717 --- a/mm/highmem.c
70718 +++ b/mm/highmem.c
70719 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
70720 * So no dangers, even with speculative execution.
70721 */
70722 page = pte_page(pkmap_page_table[i]);
70723 + pax_open_kernel();
70724 pte_clear(&init_mm, (unsigned long)page_address(page),
70725 &pkmap_page_table[i]);
70726 -
70727 + pax_close_kernel();
70728 set_page_address(page, NULL);
70729 need_flush = 1;
70730 }
70731 @@ -186,9 +187,11 @@ start:
70732 }
70733 }
70734 vaddr = PKMAP_ADDR(last_pkmap_nr);
70735 +
70736 + pax_open_kernel();
70737 set_pte_at(&init_mm, vaddr,
70738 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
70739 -
70740 + pax_close_kernel();
70741 pkmap_count[last_pkmap_nr] = 1;
70742 set_page_address(page, (void *)vaddr);
70743
70744 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
70745 index 8f7fc39..69bf1e9 100644
70746 --- a/mm/huge_memory.c
70747 +++ b/mm/huge_memory.c
70748 @@ -733,7 +733,7 @@ out:
70749 * run pte_offset_map on the pmd, if an huge pmd could
70750 * materialize from under us from a different thread.
70751 */
70752 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
70753 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
70754 return VM_FAULT_OOM;
70755 /* if an huge pmd materialized from under us just retry later */
70756 if (unlikely(pmd_trans_huge(*pmd)))
70757 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
70758 index 24b1787..e0fbc01 100644
70759 --- a/mm/hugetlb.c
70760 +++ b/mm/hugetlb.c
70761 @@ -2425,6 +2425,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
70762 return 1;
70763 }
70764
70765 +#ifdef CONFIG_PAX_SEGMEXEC
70766 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
70767 +{
70768 + struct mm_struct *mm = vma->vm_mm;
70769 + struct vm_area_struct *vma_m;
70770 + unsigned long address_m;
70771 + pte_t *ptep_m;
70772 +
70773 + vma_m = pax_find_mirror_vma(vma);
70774 + if (!vma_m)
70775 + return;
70776 +
70777 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70778 + address_m = address + SEGMEXEC_TASK_SIZE;
70779 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
70780 + get_page(page_m);
70781 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
70782 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
70783 +}
70784 +#endif
70785 +
70786 /*
70787 * Hugetlb_cow() should be called with page lock of the original hugepage held.
70788 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
70789 @@ -2538,6 +2559,11 @@ retry_avoidcopy:
70790 make_huge_pte(vma, new_page, 1));
70791 page_remove_rmap(old_page);
70792 hugepage_add_new_anon_rmap(new_page, vma, address);
70793 +
70794 +#ifdef CONFIG_PAX_SEGMEXEC
70795 + pax_mirror_huge_pte(vma, address, new_page);
70796 +#endif
70797 +
70798 /* Make the old page be freed below */
70799 new_page = old_page;
70800 mmu_notifier_invalidate_range_end(mm,
70801 @@ -2692,6 +2718,10 @@ retry:
70802 && (vma->vm_flags & VM_SHARED)));
70803 set_huge_pte_at(mm, address, ptep, new_pte);
70804
70805 +#ifdef CONFIG_PAX_SEGMEXEC
70806 + pax_mirror_huge_pte(vma, address, page);
70807 +#endif
70808 +
70809 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
70810 /* Optimization, do the COW without a second fault */
70811 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
70812 @@ -2721,6 +2751,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70813 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
70814 struct hstate *h = hstate_vma(vma);
70815
70816 +#ifdef CONFIG_PAX_SEGMEXEC
70817 + struct vm_area_struct *vma_m;
70818 +#endif
70819 +
70820 address &= huge_page_mask(h);
70821
70822 ptep = huge_pte_offset(mm, address);
70823 @@ -2734,6 +2768,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70824 VM_FAULT_SET_HINDEX(h - hstates);
70825 }
70826
70827 +#ifdef CONFIG_PAX_SEGMEXEC
70828 + vma_m = pax_find_mirror_vma(vma);
70829 + if (vma_m) {
70830 + unsigned long address_m;
70831 +
70832 + if (vma->vm_start > vma_m->vm_start) {
70833 + address_m = address;
70834 + address -= SEGMEXEC_TASK_SIZE;
70835 + vma = vma_m;
70836 + h = hstate_vma(vma);
70837 + } else
70838 + address_m = address + SEGMEXEC_TASK_SIZE;
70839 +
70840 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
70841 + return VM_FAULT_OOM;
70842 + address_m &= HPAGE_MASK;
70843 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
70844 + }
70845 +#endif
70846 +
70847 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
70848 if (!ptep)
70849 return VM_FAULT_OOM;
70850 diff --git a/mm/internal.h b/mm/internal.h
70851 index 2189af4..f2ca332 100644
70852 --- a/mm/internal.h
70853 +++ b/mm/internal.h
70854 @@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
70855 * in mm/page_alloc.c
70856 */
70857 extern void __free_pages_bootmem(struct page *page, unsigned int order);
70858 +extern void free_compound_page(struct page *page);
70859 extern void prep_compound_page(struct page *page, unsigned long order);
70860 #ifdef CONFIG_MEMORY_FAILURE
70861 extern bool is_free_buddy_page(struct page *page);
70862 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
70863 index 45eb621..6ccd8ea 100644
70864 --- a/mm/kmemleak.c
70865 +++ b/mm/kmemleak.c
70866 @@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
70867
70868 for (i = 0; i < object->trace_len; i++) {
70869 void *ptr = (void *)object->trace[i];
70870 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
70871 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
70872 }
70873 }
70874
70875 diff --git a/mm/maccess.c b/mm/maccess.c
70876 index d53adf9..03a24bf 100644
70877 --- a/mm/maccess.c
70878 +++ b/mm/maccess.c
70879 @@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
70880 set_fs(KERNEL_DS);
70881 pagefault_disable();
70882 ret = __copy_from_user_inatomic(dst,
70883 - (__force const void __user *)src, size);
70884 + (const void __force_user *)src, size);
70885 pagefault_enable();
70886 set_fs(old_fs);
70887
70888 @@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
70889
70890 set_fs(KERNEL_DS);
70891 pagefault_disable();
70892 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
70893 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
70894 pagefault_enable();
70895 set_fs(old_fs);
70896
70897 diff --git a/mm/madvise.c b/mm/madvise.c
70898 index 74bf193..feb6fd3 100644
70899 --- a/mm/madvise.c
70900 +++ b/mm/madvise.c
70901 @@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
70902 pgoff_t pgoff;
70903 unsigned long new_flags = vma->vm_flags;
70904
70905 +#ifdef CONFIG_PAX_SEGMEXEC
70906 + struct vm_area_struct *vma_m;
70907 +#endif
70908 +
70909 switch (behavior) {
70910 case MADV_NORMAL:
70911 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
70912 @@ -110,6 +114,13 @@ success:
70913 /*
70914 * vm_flags is protected by the mmap_sem held in write mode.
70915 */
70916 +
70917 +#ifdef CONFIG_PAX_SEGMEXEC
70918 + vma_m = pax_find_mirror_vma(vma);
70919 + if (vma_m)
70920 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
70921 +#endif
70922 +
70923 vma->vm_flags = new_flags;
70924
70925 out:
70926 @@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
70927 struct vm_area_struct ** prev,
70928 unsigned long start, unsigned long end)
70929 {
70930 +
70931 +#ifdef CONFIG_PAX_SEGMEXEC
70932 + struct vm_area_struct *vma_m;
70933 +#endif
70934 +
70935 *prev = vma;
70936 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
70937 return -EINVAL;
70938 @@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
70939 zap_page_range(vma, start, end - start, &details);
70940 } else
70941 zap_page_range(vma, start, end - start, NULL);
70942 +
70943 +#ifdef CONFIG_PAX_SEGMEXEC
70944 + vma_m = pax_find_mirror_vma(vma);
70945 + if (vma_m) {
70946 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
70947 + struct zap_details details = {
70948 + .nonlinear_vma = vma_m,
70949 + .last_index = ULONG_MAX,
70950 + };
70951 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
70952 + } else
70953 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
70954 + }
70955 +#endif
70956 +
70957 return 0;
70958 }
70959
70960 @@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
70961 if (end < start)
70962 goto out;
70963
70964 +#ifdef CONFIG_PAX_SEGMEXEC
70965 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
70966 + if (end > SEGMEXEC_TASK_SIZE)
70967 + goto out;
70968 + } else
70969 +#endif
70970 +
70971 + if (end > TASK_SIZE)
70972 + goto out;
70973 +
70974 error = 0;
70975 if (end == start)
70976 goto out;
70977 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
70978 index 56080ea..115071e 100644
70979 --- a/mm/memory-failure.c
70980 +++ b/mm/memory-failure.c
70981 @@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
70982
70983 int sysctl_memory_failure_recovery __read_mostly = 1;
70984
70985 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
70986 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
70987
70988 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
70989
70990 @@ -202,7 +202,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
70991 si.si_signo = SIGBUS;
70992 si.si_errno = 0;
70993 si.si_code = BUS_MCEERR_AO;
70994 - si.si_addr = (void *)addr;
70995 + si.si_addr = (void __user *)addr;
70996 #ifdef __ARCH_SI_TRAPNO
70997 si.si_trapno = trapno;
70998 #endif
70999 @@ -1010,7 +1010,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
71000 }
71001
71002 nr_pages = 1 << compound_trans_order(hpage);
71003 - atomic_long_add(nr_pages, &mce_bad_pages);
71004 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
71005
71006 /*
71007 * We need/can do nothing about count=0 pages.
71008 @@ -1040,7 +1040,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
71009 if (!PageHWPoison(hpage)
71010 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
71011 || (p != hpage && TestSetPageHWPoison(hpage))) {
71012 - atomic_long_sub(nr_pages, &mce_bad_pages);
71013 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
71014 return 0;
71015 }
71016 set_page_hwpoison_huge_page(hpage);
71017 @@ -1098,7 +1098,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
71018 }
71019 if (hwpoison_filter(p)) {
71020 if (TestClearPageHWPoison(p))
71021 - atomic_long_sub(nr_pages, &mce_bad_pages);
71022 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
71023 unlock_page(hpage);
71024 put_page(hpage);
71025 return 0;
71026 @@ -1315,7 +1315,7 @@ int unpoison_memory(unsigned long pfn)
71027 return 0;
71028 }
71029 if (TestClearPageHWPoison(p))
71030 - atomic_long_sub(nr_pages, &mce_bad_pages);
71031 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
71032 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
71033 return 0;
71034 }
71035 @@ -1329,7 +1329,7 @@ int unpoison_memory(unsigned long pfn)
71036 */
71037 if (TestClearPageHWPoison(page)) {
71038 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
71039 - atomic_long_sub(nr_pages, &mce_bad_pages);
71040 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
71041 freeit = 1;
71042 if (PageHuge(page))
71043 clear_page_hwpoison_huge_page(page);
71044 @@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
71045 }
71046 done:
71047 if (!PageHWPoison(hpage))
71048 - atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
71049 + atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
71050 set_page_hwpoison_huge_page(hpage);
71051 dequeue_hwpoisoned_huge_page(hpage);
71052 /* keep elevated page count for bad page */
71053 @@ -1573,7 +1573,7 @@ int soft_offline_page(struct page *page, int flags)
71054 return ret;
71055
71056 done:
71057 - atomic_long_add(1, &mce_bad_pages);
71058 + atomic_long_add_unchecked(1, &mce_bad_pages);
71059 SetPageHWPoison(page);
71060 /* keep elevated page count for bad page */
71061 return ret;
71062 diff --git a/mm/memory.c b/mm/memory.c
71063 index 10b4dda..06857f3 100644
71064 --- a/mm/memory.c
71065 +++ b/mm/memory.c
71066 @@ -457,8 +457,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
71067 return;
71068
71069 pmd = pmd_offset(pud, start);
71070 +
71071 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
71072 pud_clear(pud);
71073 pmd_free_tlb(tlb, pmd, start);
71074 +#endif
71075 +
71076 }
71077
71078 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
71079 @@ -489,9 +493,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
71080 if (end - 1 > ceiling - 1)
71081 return;
71082
71083 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
71084 pud = pud_offset(pgd, start);
71085 pgd_clear(pgd);
71086 pud_free_tlb(tlb, pud, start);
71087 +#endif
71088 +
71089 }
71090
71091 /*
71092 @@ -1593,12 +1600,6 @@ no_page_table:
71093 return page;
71094 }
71095
71096 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
71097 -{
71098 - return stack_guard_page_start(vma, addr) ||
71099 - stack_guard_page_end(vma, addr+PAGE_SIZE);
71100 -}
71101 -
71102 /**
71103 * __get_user_pages() - pin user pages in memory
71104 * @tsk: task_struct of target task
71105 @@ -1671,10 +1672,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
71106 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
71107 i = 0;
71108
71109 - do {
71110 + while (nr_pages) {
71111 struct vm_area_struct *vma;
71112
71113 - vma = find_extend_vma(mm, start);
71114 + vma = find_vma(mm, start);
71115 if (!vma && in_gate_area(mm, start)) {
71116 unsigned long pg = start & PAGE_MASK;
71117 pgd_t *pgd;
71118 @@ -1722,7 +1723,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
71119 goto next_page;
71120 }
71121
71122 - if (!vma ||
71123 + if (!vma || start < vma->vm_start ||
71124 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
71125 !(vm_flags & vma->vm_flags))
71126 return i ? : -EFAULT;
71127 @@ -1749,11 +1750,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
71128 int ret;
71129 unsigned int fault_flags = 0;
71130
71131 - /* For mlock, just skip the stack guard page. */
71132 - if (foll_flags & FOLL_MLOCK) {
71133 - if (stack_guard_page(vma, start))
71134 - goto next_page;
71135 - }
71136 if (foll_flags & FOLL_WRITE)
71137 fault_flags |= FAULT_FLAG_WRITE;
71138 if (nonblocking)
71139 @@ -1827,7 +1823,7 @@ next_page:
71140 start += PAGE_SIZE;
71141 nr_pages--;
71142 } while (nr_pages && start < vma->vm_end);
71143 - } while (nr_pages);
71144 + }
71145 return i;
71146 }
71147 EXPORT_SYMBOL(__get_user_pages);
71148 @@ -2034,6 +2030,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
71149 page_add_file_rmap(page);
71150 set_pte_at(mm, addr, pte, mk_pte(page, prot));
71151
71152 +#ifdef CONFIG_PAX_SEGMEXEC
71153 + pax_mirror_file_pte(vma, addr, page, ptl);
71154 +#endif
71155 +
71156 retval = 0;
71157 pte_unmap_unlock(pte, ptl);
71158 return retval;
71159 @@ -2068,10 +2068,22 @@ out:
71160 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
71161 struct page *page)
71162 {
71163 +
71164 +#ifdef CONFIG_PAX_SEGMEXEC
71165 + struct vm_area_struct *vma_m;
71166 +#endif
71167 +
71168 if (addr < vma->vm_start || addr >= vma->vm_end)
71169 return -EFAULT;
71170 if (!page_count(page))
71171 return -EINVAL;
71172 +
71173 +#ifdef CONFIG_PAX_SEGMEXEC
71174 + vma_m = pax_find_mirror_vma(vma);
71175 + if (vma_m)
71176 + vma_m->vm_flags |= VM_INSERTPAGE;
71177 +#endif
71178 +
71179 vma->vm_flags |= VM_INSERTPAGE;
71180 return insert_page(vma, addr, page, vma->vm_page_prot);
71181 }
71182 @@ -2157,6 +2169,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
71183 unsigned long pfn)
71184 {
71185 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
71186 + BUG_ON(vma->vm_mirror);
71187
71188 if (addr < vma->vm_start || addr >= vma->vm_end)
71189 return -EFAULT;
71190 @@ -2364,7 +2377,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
71191
71192 BUG_ON(pud_huge(*pud));
71193
71194 - pmd = pmd_alloc(mm, pud, addr);
71195 + pmd = (mm == &init_mm) ?
71196 + pmd_alloc_kernel(mm, pud, addr) :
71197 + pmd_alloc(mm, pud, addr);
71198 if (!pmd)
71199 return -ENOMEM;
71200 do {
71201 @@ -2384,7 +2399,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
71202 unsigned long next;
71203 int err;
71204
71205 - pud = pud_alloc(mm, pgd, addr);
71206 + pud = (mm == &init_mm) ?
71207 + pud_alloc_kernel(mm, pgd, addr) :
71208 + pud_alloc(mm, pgd, addr);
71209 if (!pud)
71210 return -ENOMEM;
71211 do {
71212 @@ -2472,6 +2489,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
71213 copy_user_highpage(dst, src, va, vma);
71214 }
71215
71216 +#ifdef CONFIG_PAX_SEGMEXEC
71217 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
71218 +{
71219 + struct mm_struct *mm = vma->vm_mm;
71220 + spinlock_t *ptl;
71221 + pte_t *pte, entry;
71222 +
71223 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
71224 + entry = *pte;
71225 + if (!pte_present(entry)) {
71226 + if (!pte_none(entry)) {
71227 + BUG_ON(pte_file(entry));
71228 + free_swap_and_cache(pte_to_swp_entry(entry));
71229 + pte_clear_not_present_full(mm, address, pte, 0);
71230 + }
71231 + } else {
71232 + struct page *page;
71233 +
71234 + flush_cache_page(vma, address, pte_pfn(entry));
71235 + entry = ptep_clear_flush(vma, address, pte);
71236 + BUG_ON(pte_dirty(entry));
71237 + page = vm_normal_page(vma, address, entry);
71238 + if (page) {
71239 + update_hiwater_rss(mm);
71240 + if (PageAnon(page))
71241 + dec_mm_counter_fast(mm, MM_ANONPAGES);
71242 + else
71243 + dec_mm_counter_fast(mm, MM_FILEPAGES);
71244 + page_remove_rmap(page);
71245 + page_cache_release(page);
71246 + }
71247 + }
71248 + pte_unmap_unlock(pte, ptl);
71249 +}
71250 +
71251 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
71252 + *
71253 + * the ptl of the lower mapped page is held on entry and is not released on exit
71254 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
71255 + */
71256 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
71257 +{
71258 + struct mm_struct *mm = vma->vm_mm;
71259 + unsigned long address_m;
71260 + spinlock_t *ptl_m;
71261 + struct vm_area_struct *vma_m;
71262 + pmd_t *pmd_m;
71263 + pte_t *pte_m, entry_m;
71264 +
71265 + BUG_ON(!page_m || !PageAnon(page_m));
71266 +
71267 + vma_m = pax_find_mirror_vma(vma);
71268 + if (!vma_m)
71269 + return;
71270 +
71271 + BUG_ON(!PageLocked(page_m));
71272 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
71273 + address_m = address + SEGMEXEC_TASK_SIZE;
71274 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
71275 + pte_m = pte_offset_map(pmd_m, address_m);
71276 + ptl_m = pte_lockptr(mm, pmd_m);
71277 + if (ptl != ptl_m) {
71278 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
71279 + if (!pte_none(*pte_m))
71280 + goto out;
71281 + }
71282 +
71283 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
71284 + page_cache_get(page_m);
71285 + page_add_anon_rmap(page_m, vma_m, address_m);
71286 + inc_mm_counter_fast(mm, MM_ANONPAGES);
71287 + set_pte_at(mm, address_m, pte_m, entry_m);
71288 + update_mmu_cache(vma_m, address_m, entry_m);
71289 +out:
71290 + if (ptl != ptl_m)
71291 + spin_unlock(ptl_m);
71292 + pte_unmap(pte_m);
71293 + unlock_page(page_m);
71294 +}
71295 +
71296 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
71297 +{
71298 + struct mm_struct *mm = vma->vm_mm;
71299 + unsigned long address_m;
71300 + spinlock_t *ptl_m;
71301 + struct vm_area_struct *vma_m;
71302 + pmd_t *pmd_m;
71303 + pte_t *pte_m, entry_m;
71304 +
71305 + BUG_ON(!page_m || PageAnon(page_m));
71306 +
71307 + vma_m = pax_find_mirror_vma(vma);
71308 + if (!vma_m)
71309 + return;
71310 +
71311 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
71312 + address_m = address + SEGMEXEC_TASK_SIZE;
71313 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
71314 + pte_m = pte_offset_map(pmd_m, address_m);
71315 + ptl_m = pte_lockptr(mm, pmd_m);
71316 + if (ptl != ptl_m) {
71317 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
71318 + if (!pte_none(*pte_m))
71319 + goto out;
71320 + }
71321 +
71322 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
71323 + page_cache_get(page_m);
71324 + page_add_file_rmap(page_m);
71325 + inc_mm_counter_fast(mm, MM_FILEPAGES);
71326 + set_pte_at(mm, address_m, pte_m, entry_m);
71327 + update_mmu_cache(vma_m, address_m, entry_m);
71328 +out:
71329 + if (ptl != ptl_m)
71330 + spin_unlock(ptl_m);
71331 + pte_unmap(pte_m);
71332 +}
71333 +
71334 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
71335 +{
71336 + struct mm_struct *mm = vma->vm_mm;
71337 + unsigned long address_m;
71338 + spinlock_t *ptl_m;
71339 + struct vm_area_struct *vma_m;
71340 + pmd_t *pmd_m;
71341 + pte_t *pte_m, entry_m;
71342 +
71343 + vma_m = pax_find_mirror_vma(vma);
71344 + if (!vma_m)
71345 + return;
71346 +
71347 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
71348 + address_m = address + SEGMEXEC_TASK_SIZE;
71349 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
71350 + pte_m = pte_offset_map(pmd_m, address_m);
71351 + ptl_m = pte_lockptr(mm, pmd_m);
71352 + if (ptl != ptl_m) {
71353 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
71354 + if (!pte_none(*pte_m))
71355 + goto out;
71356 + }
71357 +
71358 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
71359 + set_pte_at(mm, address_m, pte_m, entry_m);
71360 +out:
71361 + if (ptl != ptl_m)
71362 + spin_unlock(ptl_m);
71363 + pte_unmap(pte_m);
71364 +}
71365 +
71366 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
71367 +{
71368 + struct page *page_m;
71369 + pte_t entry;
71370 +
71371 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
71372 + goto out;
71373 +
71374 + entry = *pte;
71375 + page_m = vm_normal_page(vma, address, entry);
71376 + if (!page_m)
71377 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
71378 + else if (PageAnon(page_m)) {
71379 + if (pax_find_mirror_vma(vma)) {
71380 + pte_unmap_unlock(pte, ptl);
71381 + lock_page(page_m);
71382 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
71383 + if (pte_same(entry, *pte))
71384 + pax_mirror_anon_pte(vma, address, page_m, ptl);
71385 + else
71386 + unlock_page(page_m);
71387 + }
71388 + } else
71389 + pax_mirror_file_pte(vma, address, page_m, ptl);
71390 +
71391 +out:
71392 + pte_unmap_unlock(pte, ptl);
71393 +}
71394 +#endif
71395 +
71396 /*
71397 * This routine handles present pages, when users try to write
71398 * to a shared page. It is done by copying the page to a new address
71399 @@ -2683,6 +2880,12 @@ gotten:
71400 */
71401 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
71402 if (likely(pte_same(*page_table, orig_pte))) {
71403 +
71404 +#ifdef CONFIG_PAX_SEGMEXEC
71405 + if (pax_find_mirror_vma(vma))
71406 + BUG_ON(!trylock_page(new_page));
71407 +#endif
71408 +
71409 if (old_page) {
71410 if (!PageAnon(old_page)) {
71411 dec_mm_counter_fast(mm, MM_FILEPAGES);
71412 @@ -2734,6 +2937,10 @@ gotten:
71413 page_remove_rmap(old_page);
71414 }
71415
71416 +#ifdef CONFIG_PAX_SEGMEXEC
71417 + pax_mirror_anon_pte(vma, address, new_page, ptl);
71418 +#endif
71419 +
71420 /* Free the old page.. */
71421 new_page = old_page;
71422 ret |= VM_FAULT_WRITE;
71423 @@ -3013,6 +3220,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
71424 swap_free(entry);
71425 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
71426 try_to_free_swap(page);
71427 +
71428 +#ifdef CONFIG_PAX_SEGMEXEC
71429 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
71430 +#endif
71431 +
71432 unlock_page(page);
71433 if (swapcache) {
71434 /*
71435 @@ -3036,6 +3248,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
71436
71437 /* No need to invalidate - it was non-present before */
71438 update_mmu_cache(vma, address, page_table);
71439 +
71440 +#ifdef CONFIG_PAX_SEGMEXEC
71441 + pax_mirror_anon_pte(vma, address, page, ptl);
71442 +#endif
71443 +
71444 unlock:
71445 pte_unmap_unlock(page_table, ptl);
71446 out:
71447 @@ -3055,40 +3272,6 @@ out_release:
71448 }
71449
71450 /*
71451 - * This is like a special single-page "expand_{down|up}wards()",
71452 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
71453 - * doesn't hit another vma.
71454 - */
71455 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
71456 -{
71457 - address &= PAGE_MASK;
71458 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
71459 - struct vm_area_struct *prev = vma->vm_prev;
71460 -
71461 - /*
71462 - * Is there a mapping abutting this one below?
71463 - *
71464 - * That's only ok if it's the same stack mapping
71465 - * that has gotten split..
71466 - */
71467 - if (prev && prev->vm_end == address)
71468 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
71469 -
71470 - expand_downwards(vma, address - PAGE_SIZE);
71471 - }
71472 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
71473 - struct vm_area_struct *next = vma->vm_next;
71474 -
71475 - /* As VM_GROWSDOWN but s/below/above/ */
71476 - if (next && next->vm_start == address + PAGE_SIZE)
71477 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
71478 -
71479 - expand_upwards(vma, address + PAGE_SIZE);
71480 - }
71481 - return 0;
71482 -}
71483 -
71484 -/*
71485 * We enter with non-exclusive mmap_sem (to exclude vma changes,
71486 * but allow concurrent faults), and pte mapped but not yet locked.
71487 * We return with mmap_sem still held, but pte unmapped and unlocked.
71488 @@ -3097,27 +3280,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
71489 unsigned long address, pte_t *page_table, pmd_t *pmd,
71490 unsigned int flags)
71491 {
71492 - struct page *page;
71493 + struct page *page = NULL;
71494 spinlock_t *ptl;
71495 pte_t entry;
71496
71497 - pte_unmap(page_table);
71498 -
71499 - /* Check if we need to add a guard page to the stack */
71500 - if (check_stack_guard_page(vma, address) < 0)
71501 - return VM_FAULT_SIGBUS;
71502 -
71503 - /* Use the zero-page for reads */
71504 if (!(flags & FAULT_FLAG_WRITE)) {
71505 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
71506 vma->vm_page_prot));
71507 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
71508 + ptl = pte_lockptr(mm, pmd);
71509 + spin_lock(ptl);
71510 if (!pte_none(*page_table))
71511 goto unlock;
71512 goto setpte;
71513 }
71514
71515 /* Allocate our own private page. */
71516 + pte_unmap(page_table);
71517 +
71518 if (unlikely(anon_vma_prepare(vma)))
71519 goto oom;
71520 page = alloc_zeroed_user_highpage_movable(vma, address);
71521 @@ -3136,6 +3315,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
71522 if (!pte_none(*page_table))
71523 goto release;
71524
71525 +#ifdef CONFIG_PAX_SEGMEXEC
71526 + if (pax_find_mirror_vma(vma))
71527 + BUG_ON(!trylock_page(page));
71528 +#endif
71529 +
71530 inc_mm_counter_fast(mm, MM_ANONPAGES);
71531 page_add_new_anon_rmap(page, vma, address);
71532 setpte:
71533 @@ -3143,6 +3327,12 @@ setpte:
71534
71535 /* No need to invalidate - it was non-present before */
71536 update_mmu_cache(vma, address, page_table);
71537 +
71538 +#ifdef CONFIG_PAX_SEGMEXEC
71539 + if (page)
71540 + pax_mirror_anon_pte(vma, address, page, ptl);
71541 +#endif
71542 +
71543 unlock:
71544 pte_unmap_unlock(page_table, ptl);
71545 return 0;
71546 @@ -3286,6 +3476,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71547 */
71548 /* Only go through if we didn't race with anybody else... */
71549 if (likely(pte_same(*page_table, orig_pte))) {
71550 +
71551 +#ifdef CONFIG_PAX_SEGMEXEC
71552 + if (anon && pax_find_mirror_vma(vma))
71553 + BUG_ON(!trylock_page(page));
71554 +#endif
71555 +
71556 flush_icache_page(vma, page);
71557 entry = mk_pte(page, vma->vm_page_prot);
71558 if (flags & FAULT_FLAG_WRITE)
71559 @@ -3305,6 +3501,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71560
71561 /* no need to invalidate: a not-present page won't be cached */
71562 update_mmu_cache(vma, address, page_table);
71563 +
71564 +#ifdef CONFIG_PAX_SEGMEXEC
71565 + if (anon)
71566 + pax_mirror_anon_pte(vma, address, page, ptl);
71567 + else
71568 + pax_mirror_file_pte(vma, address, page, ptl);
71569 +#endif
71570 +
71571 } else {
71572 if (cow_page)
71573 mem_cgroup_uncharge_page(cow_page);
71574 @@ -3458,6 +3662,12 @@ int handle_pte_fault(struct mm_struct *mm,
71575 if (flags & FAULT_FLAG_WRITE)
71576 flush_tlb_fix_spurious_fault(vma, address);
71577 }
71578 +
71579 +#ifdef CONFIG_PAX_SEGMEXEC
71580 + pax_mirror_pte(vma, address, pte, pmd, ptl);
71581 + return 0;
71582 +#endif
71583 +
71584 unlock:
71585 pte_unmap_unlock(pte, ptl);
71586 return 0;
71587 @@ -3474,6 +3684,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71588 pmd_t *pmd;
71589 pte_t *pte;
71590
71591 +#ifdef CONFIG_PAX_SEGMEXEC
71592 + struct vm_area_struct *vma_m;
71593 +#endif
71594 +
71595 __set_current_state(TASK_RUNNING);
71596
71597 count_vm_event(PGFAULT);
71598 @@ -3485,6 +3699,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71599 if (unlikely(is_vm_hugetlb_page(vma)))
71600 return hugetlb_fault(mm, vma, address, flags);
71601
71602 +#ifdef CONFIG_PAX_SEGMEXEC
71603 + vma_m = pax_find_mirror_vma(vma);
71604 + if (vma_m) {
71605 + unsigned long address_m;
71606 + pgd_t *pgd_m;
71607 + pud_t *pud_m;
71608 + pmd_t *pmd_m;
71609 +
71610 + if (vma->vm_start > vma_m->vm_start) {
71611 + address_m = address;
71612 + address -= SEGMEXEC_TASK_SIZE;
71613 + vma = vma_m;
71614 + } else
71615 + address_m = address + SEGMEXEC_TASK_SIZE;
71616 +
71617 + pgd_m = pgd_offset(mm, address_m);
71618 + pud_m = pud_alloc(mm, pgd_m, address_m);
71619 + if (!pud_m)
71620 + return VM_FAULT_OOM;
71621 + pmd_m = pmd_alloc(mm, pud_m, address_m);
71622 + if (!pmd_m)
71623 + return VM_FAULT_OOM;
71624 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
71625 + return VM_FAULT_OOM;
71626 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
71627 + }
71628 +#endif
71629 +
71630 pgd = pgd_offset(mm, address);
71631 pud = pud_alloc(mm, pgd, address);
71632 if (!pud)
71633 @@ -3514,7 +3756,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
71634 * run pte_offset_map on the pmd, if an huge pmd could
71635 * materialize from under us from a different thread.
71636 */
71637 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
71638 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
71639 return VM_FAULT_OOM;
71640 /* if an huge pmd materialized from under us just retry later */
71641 if (unlikely(pmd_trans_huge(*pmd)))
71642 @@ -3551,6 +3793,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
71643 spin_unlock(&mm->page_table_lock);
71644 return 0;
71645 }
71646 +
71647 +int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
71648 +{
71649 + pud_t *new = pud_alloc_one(mm, address);
71650 + if (!new)
71651 + return -ENOMEM;
71652 +
71653 + smp_wmb(); /* See comment in __pte_alloc */
71654 +
71655 + spin_lock(&mm->page_table_lock);
71656 + if (pgd_present(*pgd)) /* Another has populated it */
71657 + pud_free(mm, new);
71658 + else
71659 + pgd_populate_kernel(mm, pgd, new);
71660 + spin_unlock(&mm->page_table_lock);
71661 + return 0;
71662 +}
71663 #endif /* __PAGETABLE_PUD_FOLDED */
71664
71665 #ifndef __PAGETABLE_PMD_FOLDED
71666 @@ -3581,6 +3840,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
71667 spin_unlock(&mm->page_table_lock);
71668 return 0;
71669 }
71670 +
71671 +int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
71672 +{
71673 + pmd_t *new = pmd_alloc_one(mm, address);
71674 + if (!new)
71675 + return -ENOMEM;
71676 +
71677 + smp_wmb(); /* See comment in __pte_alloc */
71678 +
71679 + spin_lock(&mm->page_table_lock);
71680 +#ifndef __ARCH_HAS_4LEVEL_HACK
71681 + if (pud_present(*pud)) /* Another has populated it */
71682 + pmd_free(mm, new);
71683 + else
71684 + pud_populate_kernel(mm, pud, new);
71685 +#else
71686 + if (pgd_present(*pud)) /* Another has populated it */
71687 + pmd_free(mm, new);
71688 + else
71689 + pgd_populate_kernel(mm, pud, new);
71690 +#endif /* __ARCH_HAS_4LEVEL_HACK */
71691 + spin_unlock(&mm->page_table_lock);
71692 + return 0;
71693 +}
71694 #endif /* __PAGETABLE_PMD_FOLDED */
71695
71696 int make_pages_present(unsigned long addr, unsigned long end)
71697 @@ -3618,7 +3901,7 @@ static int __init gate_vma_init(void)
71698 gate_vma.vm_start = FIXADDR_USER_START;
71699 gate_vma.vm_end = FIXADDR_USER_END;
71700 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
71701 - gate_vma.vm_page_prot = __P101;
71702 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
71703 /*
71704 * Make sure the vDSO gets into every core dump.
71705 * Dumping its contents makes post-mortem fully interpretable later
71706 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
71707 index 0a37570..2048346 100644
71708 --- a/mm/mempolicy.c
71709 +++ b/mm/mempolicy.c
71710 @@ -640,6 +640,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
71711 unsigned long vmstart;
71712 unsigned long vmend;
71713
71714 +#ifdef CONFIG_PAX_SEGMEXEC
71715 + struct vm_area_struct *vma_m;
71716 +#endif
71717 +
71718 vma = find_vma(mm, start);
71719 if (!vma || vma->vm_start > start)
71720 return -EFAULT;
71721 @@ -679,6 +683,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
71722 err = policy_vma(vma, new_pol);
71723 if (err)
71724 goto out;
71725 +
71726 +#ifdef CONFIG_PAX_SEGMEXEC
71727 + vma_m = pax_find_mirror_vma(vma);
71728 + if (vma_m) {
71729 + err = policy_vma(vma_m, new_pol);
71730 + if (err)
71731 + goto out;
71732 + }
71733 +#endif
71734 +
71735 }
71736
71737 out:
71738 @@ -1112,6 +1126,17 @@ static long do_mbind(unsigned long start, unsigned long len,
71739
71740 if (end < start)
71741 return -EINVAL;
71742 +
71743 +#ifdef CONFIG_PAX_SEGMEXEC
71744 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
71745 + if (end > SEGMEXEC_TASK_SIZE)
71746 + return -EINVAL;
71747 + } else
71748 +#endif
71749 +
71750 + if (end > TASK_SIZE)
71751 + return -EINVAL;
71752 +
71753 if (end == start)
71754 return 0;
71755
71756 @@ -1330,6 +1355,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
71757 if (!mm)
71758 goto out;
71759
71760 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71761 + if (mm != current->mm &&
71762 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
71763 + err = -EPERM;
71764 + goto out;
71765 + }
71766 +#endif
71767 +
71768 /*
71769 * Check if this process has the right to modify the specified
71770 * process. The right exists if the process has administrative
71771 @@ -1339,8 +1372,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
71772 rcu_read_lock();
71773 tcred = __task_cred(task);
71774 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
71775 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
71776 - !capable(CAP_SYS_NICE)) {
71777 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
71778 rcu_read_unlock();
71779 err = -EPERM;
71780 goto out;
71781 diff --git a/mm/migrate.c b/mm/migrate.c
71782 index 1503b6b..156c672 100644
71783 --- a/mm/migrate.c
71784 +++ b/mm/migrate.c
71785 @@ -1370,6 +1370,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
71786 if (!mm)
71787 return -EINVAL;
71788
71789 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71790 + if (mm != current->mm &&
71791 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
71792 + err = -EPERM;
71793 + goto out;
71794 + }
71795 +#endif
71796 +
71797 /*
71798 * Check if this process has the right to modify the specified
71799 * process. The right exists if the process has administrative
71800 @@ -1379,8 +1387,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
71801 rcu_read_lock();
71802 tcred = __task_cred(task);
71803 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
71804 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
71805 - !capable(CAP_SYS_NICE)) {
71806 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
71807 rcu_read_unlock();
71808 err = -EPERM;
71809 goto out;
71810 diff --git a/mm/mlock.c b/mm/mlock.c
71811 index ef726e8..13e0901 100644
71812 --- a/mm/mlock.c
71813 +++ b/mm/mlock.c
71814 @@ -13,6 +13,7 @@
71815 #include <linux/pagemap.h>
71816 #include <linux/mempolicy.h>
71817 #include <linux/syscalls.h>
71818 +#include <linux/security.h>
71819 #include <linux/sched.h>
71820 #include <linux/export.h>
71821 #include <linux/rmap.h>
71822 @@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
71823 return -EINVAL;
71824 if (end == start)
71825 return 0;
71826 + if (end > TASK_SIZE)
71827 + return -EINVAL;
71828 +
71829 vma = find_vma(current->mm, start);
71830 if (!vma || vma->vm_start > start)
71831 return -ENOMEM;
71832 @@ -396,6 +400,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
71833 for (nstart = start ; ; ) {
71834 vm_flags_t newflags;
71835
71836 +#ifdef CONFIG_PAX_SEGMEXEC
71837 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
71838 + break;
71839 +#endif
71840 +
71841 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
71842
71843 newflags = vma->vm_flags | VM_LOCKED;
71844 @@ -501,6 +510,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
71845 lock_limit >>= PAGE_SHIFT;
71846
71847 /* check against resource limits */
71848 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
71849 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
71850 error = do_mlock(start, len, 1);
71851 up_write(&current->mm->mmap_sem);
71852 @@ -524,17 +534,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
71853 static int do_mlockall(int flags)
71854 {
71855 struct vm_area_struct * vma, * prev = NULL;
71856 - unsigned int def_flags = 0;
71857
71858 if (flags & MCL_FUTURE)
71859 - def_flags = VM_LOCKED;
71860 - current->mm->def_flags = def_flags;
71861 + current->mm->def_flags |= VM_LOCKED;
71862 + else
71863 + current->mm->def_flags &= ~VM_LOCKED;
71864 if (flags == MCL_FUTURE)
71865 goto out;
71866
71867 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
71868 vm_flags_t newflags;
71869
71870 +#ifdef CONFIG_PAX_SEGMEXEC
71871 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
71872 + break;
71873 +#endif
71874 +
71875 + BUG_ON(vma->vm_end > TASK_SIZE);
71876 newflags = vma->vm_flags | VM_LOCKED;
71877 if (!(flags & MCL_CURRENT))
71878 newflags &= ~VM_LOCKED;
71879 @@ -567,6 +583,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
71880 lock_limit >>= PAGE_SHIFT;
71881
71882 ret = -ENOMEM;
71883 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
71884 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
71885 capable(CAP_IPC_LOCK))
71886 ret = do_mlockall(flags);
71887 diff --git a/mm/mmap.c b/mm/mmap.c
71888 index da15a79..314aef3 100644
71889 --- a/mm/mmap.c
71890 +++ b/mm/mmap.c
71891 @@ -46,6 +46,16 @@
71892 #define arch_rebalance_pgtables(addr, len) (addr)
71893 #endif
71894
71895 +static inline void verify_mm_writelocked(struct mm_struct *mm)
71896 +{
71897 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
71898 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
71899 + up_read(&mm->mmap_sem);
71900 + BUG();
71901 + }
71902 +#endif
71903 +}
71904 +
71905 static void unmap_region(struct mm_struct *mm,
71906 struct vm_area_struct *vma, struct vm_area_struct *prev,
71907 unsigned long start, unsigned long end);
71908 @@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
71909 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
71910 *
71911 */
71912 -pgprot_t protection_map[16] = {
71913 +pgprot_t protection_map[16] __read_only = {
71914 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
71915 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
71916 };
71917
71918 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
71919 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
71920 {
71921 - return __pgprot(pgprot_val(protection_map[vm_flags &
71922 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
71923 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
71924 pgprot_val(arch_vm_get_page_prot(vm_flags)));
71925 +
71926 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
71927 + if (!(__supported_pte_mask & _PAGE_NX) &&
71928 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
71929 + (vm_flags & (VM_READ | VM_WRITE)))
71930 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
71931 +#endif
71932 +
71933 + return prot;
71934 }
71935 EXPORT_SYMBOL(vm_get_page_prot);
71936
71937 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
71938 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
71939 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
71940 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
71941 /*
71942 * Make sure vm_committed_as in one cacheline and not cacheline shared with
71943 * other variables. It can be updated by several CPUs frequently.
71944 @@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
71945 struct vm_area_struct *next = vma->vm_next;
71946
71947 might_sleep();
71948 + BUG_ON(vma->vm_mirror);
71949 if (vma->vm_ops && vma->vm_ops->close)
71950 vma->vm_ops->close(vma);
71951 if (vma->vm_file) {
71952 @@ -272,6 +293,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
71953 * not page aligned -Ram Gupta
71954 */
71955 rlim = rlimit(RLIMIT_DATA);
71956 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
71957 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
71958 (mm->end_data - mm->start_data) > rlim)
71959 goto out;
71960 @@ -689,6 +711,12 @@ static int
71961 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
71962 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
71963 {
71964 +
71965 +#ifdef CONFIG_PAX_SEGMEXEC
71966 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
71967 + return 0;
71968 +#endif
71969 +
71970 if (is_mergeable_vma(vma, file, vm_flags) &&
71971 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
71972 if (vma->vm_pgoff == vm_pgoff)
71973 @@ -708,6 +736,12 @@ static int
71974 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
71975 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
71976 {
71977 +
71978 +#ifdef CONFIG_PAX_SEGMEXEC
71979 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
71980 + return 0;
71981 +#endif
71982 +
71983 if (is_mergeable_vma(vma, file, vm_flags) &&
71984 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
71985 pgoff_t vm_pglen;
71986 @@ -750,13 +784,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
71987 struct vm_area_struct *vma_merge(struct mm_struct *mm,
71988 struct vm_area_struct *prev, unsigned long addr,
71989 unsigned long end, unsigned long vm_flags,
71990 - struct anon_vma *anon_vma, struct file *file,
71991 + struct anon_vma *anon_vma, struct file *file,
71992 pgoff_t pgoff, struct mempolicy *policy)
71993 {
71994 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
71995 struct vm_area_struct *area, *next;
71996 int err;
71997
71998 +#ifdef CONFIG_PAX_SEGMEXEC
71999 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
72000 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
72001 +
72002 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
72003 +#endif
72004 +
72005 /*
72006 * We later require that vma->vm_flags == vm_flags,
72007 * so this tests vma->vm_flags & VM_SPECIAL, too.
72008 @@ -772,6 +813,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
72009 if (next && next->vm_end == end) /* cases 6, 7, 8 */
72010 next = next->vm_next;
72011
72012 +#ifdef CONFIG_PAX_SEGMEXEC
72013 + if (prev)
72014 + prev_m = pax_find_mirror_vma(prev);
72015 + if (area)
72016 + area_m = pax_find_mirror_vma(area);
72017 + if (next)
72018 + next_m = pax_find_mirror_vma(next);
72019 +#endif
72020 +
72021 /*
72022 * Can it merge with the predecessor?
72023 */
72024 @@ -791,9 +841,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
72025 /* cases 1, 6 */
72026 err = vma_adjust(prev, prev->vm_start,
72027 next->vm_end, prev->vm_pgoff, NULL);
72028 - } else /* cases 2, 5, 7 */
72029 +
72030 +#ifdef CONFIG_PAX_SEGMEXEC
72031 + if (!err && prev_m)
72032 + err = vma_adjust(prev_m, prev_m->vm_start,
72033 + next_m->vm_end, prev_m->vm_pgoff, NULL);
72034 +#endif
72035 +
72036 + } else { /* cases 2, 5, 7 */
72037 err = vma_adjust(prev, prev->vm_start,
72038 end, prev->vm_pgoff, NULL);
72039 +
72040 +#ifdef CONFIG_PAX_SEGMEXEC
72041 + if (!err && prev_m)
72042 + err = vma_adjust(prev_m, prev_m->vm_start,
72043 + end_m, prev_m->vm_pgoff, NULL);
72044 +#endif
72045 +
72046 + }
72047 if (err)
72048 return NULL;
72049 khugepaged_enter_vma_merge(prev);
72050 @@ -807,12 +872,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
72051 mpol_equal(policy, vma_policy(next)) &&
72052 can_vma_merge_before(next, vm_flags,
72053 anon_vma, file, pgoff+pglen)) {
72054 - if (prev && addr < prev->vm_end) /* case 4 */
72055 + if (prev && addr < prev->vm_end) { /* case 4 */
72056 err = vma_adjust(prev, prev->vm_start,
72057 addr, prev->vm_pgoff, NULL);
72058 - else /* cases 3, 8 */
72059 +
72060 +#ifdef CONFIG_PAX_SEGMEXEC
72061 + if (!err && prev_m)
72062 + err = vma_adjust(prev_m, prev_m->vm_start,
72063 + addr_m, prev_m->vm_pgoff, NULL);
72064 +#endif
72065 +
72066 + } else { /* cases 3, 8 */
72067 err = vma_adjust(area, addr, next->vm_end,
72068 next->vm_pgoff - pglen, NULL);
72069 +
72070 +#ifdef CONFIG_PAX_SEGMEXEC
72071 + if (!err && area_m)
72072 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
72073 + next_m->vm_pgoff - pglen, NULL);
72074 +#endif
72075 +
72076 + }
72077 if (err)
72078 return NULL;
72079 khugepaged_enter_vma_merge(area);
72080 @@ -921,14 +1001,11 @@ none:
72081 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
72082 struct file *file, long pages)
72083 {
72084 - const unsigned long stack_flags
72085 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
72086 -
72087 if (file) {
72088 mm->shared_vm += pages;
72089 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
72090 mm->exec_vm += pages;
72091 - } else if (flags & stack_flags)
72092 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
72093 mm->stack_vm += pages;
72094 if (flags & (VM_RESERVED|VM_IO))
72095 mm->reserved_vm += pages;
72096 @@ -955,7 +1032,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
72097 * (the exception is when the underlying filesystem is noexec
72098 * mounted, in which case we dont add PROT_EXEC.)
72099 */
72100 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
72101 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
72102 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
72103 prot |= PROT_EXEC;
72104
72105 @@ -981,7 +1058,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
72106 /* Obtain the address to map to. we verify (or select) it and ensure
72107 * that it represents a valid section of the address space.
72108 */
72109 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
72110 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
72111 if (addr & ~PAGE_MASK)
72112 return addr;
72113
72114 @@ -992,6 +1069,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
72115 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
72116 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
72117
72118 +#ifdef CONFIG_PAX_MPROTECT
72119 + if (mm->pax_flags & MF_PAX_MPROTECT) {
72120 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
72121 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
72122 + gr_log_rwxmmap(file);
72123 +
72124 +#ifdef CONFIG_PAX_EMUPLT
72125 + vm_flags &= ~VM_EXEC;
72126 +#else
72127 + return -EPERM;
72128 +#endif
72129 +
72130 + }
72131 +
72132 + if (!(vm_flags & VM_EXEC))
72133 + vm_flags &= ~VM_MAYEXEC;
72134 +#else
72135 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
72136 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
72137 +#endif
72138 + else
72139 + vm_flags &= ~VM_MAYWRITE;
72140 + }
72141 +#endif
72142 +
72143 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
72144 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
72145 + vm_flags &= ~VM_PAGEEXEC;
72146 +#endif
72147 +
72148 if (flags & MAP_LOCKED)
72149 if (!can_do_mlock())
72150 return -EPERM;
72151 @@ -1003,6 +1110,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
72152 locked += mm->locked_vm;
72153 lock_limit = rlimit(RLIMIT_MEMLOCK);
72154 lock_limit >>= PAGE_SHIFT;
72155 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
72156 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
72157 return -EAGAIN;
72158 }
72159 @@ -1073,6 +1181,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
72160 if (error)
72161 return error;
72162
72163 + if (!gr_acl_handle_mmap(file, prot))
72164 + return -EACCES;
72165 +
72166 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
72167 }
72168 EXPORT_SYMBOL(do_mmap_pgoff);
72169 @@ -1153,7 +1264,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
72170 vm_flags_t vm_flags = vma->vm_flags;
72171
72172 /* If it was private or non-writable, the write bit is already clear */
72173 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
72174 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
72175 return 0;
72176
72177 /* The backer wishes to know when pages are first written to? */
72178 @@ -1202,14 +1313,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
72179 unsigned long charged = 0;
72180 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
72181
72182 +#ifdef CONFIG_PAX_SEGMEXEC
72183 + struct vm_area_struct *vma_m = NULL;
72184 +#endif
72185 +
72186 + /*
72187 + * mm->mmap_sem is required to protect against another thread
72188 + * changing the mappings in case we sleep.
72189 + */
72190 + verify_mm_writelocked(mm);
72191 +
72192 /* Clear old maps */
72193 error = -ENOMEM;
72194 -munmap_back:
72195 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
72196 if (vma && vma->vm_start < addr + len) {
72197 if (do_munmap(mm, addr, len))
72198 return -ENOMEM;
72199 - goto munmap_back;
72200 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
72201 + BUG_ON(vma && vma->vm_start < addr + len);
72202 }
72203
72204 /* Check against address space limit. */
72205 @@ -1258,6 +1379,16 @@ munmap_back:
72206 goto unacct_error;
72207 }
72208
72209 +#ifdef CONFIG_PAX_SEGMEXEC
72210 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
72211 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72212 + if (!vma_m) {
72213 + error = -ENOMEM;
72214 + goto free_vma;
72215 + }
72216 + }
72217 +#endif
72218 +
72219 vma->vm_mm = mm;
72220 vma->vm_start = addr;
72221 vma->vm_end = addr + len;
72222 @@ -1282,6 +1413,19 @@ munmap_back:
72223 error = file->f_op->mmap(file, vma);
72224 if (error)
72225 goto unmap_and_free_vma;
72226 +
72227 +#ifdef CONFIG_PAX_SEGMEXEC
72228 + if (vma_m && (vm_flags & VM_EXECUTABLE))
72229 + added_exe_file_vma(mm);
72230 +#endif
72231 +
72232 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
72233 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
72234 + vma->vm_flags |= VM_PAGEEXEC;
72235 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
72236 + }
72237 +#endif
72238 +
72239 if (vm_flags & VM_EXECUTABLE)
72240 added_exe_file_vma(mm);
72241
72242 @@ -1319,6 +1463,11 @@ munmap_back:
72243 vma_link(mm, vma, prev, rb_link, rb_parent);
72244 file = vma->vm_file;
72245
72246 +#ifdef CONFIG_PAX_SEGMEXEC
72247 + if (vma_m)
72248 + BUG_ON(pax_mirror_vma(vma_m, vma));
72249 +#endif
72250 +
72251 /* Once vma denies write, undo our temporary denial count */
72252 if (correct_wcount)
72253 atomic_inc(&inode->i_writecount);
72254 @@ -1327,6 +1476,7 @@ out:
72255
72256 mm->total_vm += len >> PAGE_SHIFT;
72257 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
72258 + track_exec_limit(mm, addr, addr + len, vm_flags);
72259 if (vm_flags & VM_LOCKED) {
72260 if (!mlock_vma_pages_range(vma, addr, addr + len))
72261 mm->locked_vm += (len >> PAGE_SHIFT);
72262 @@ -1344,6 +1494,12 @@ unmap_and_free_vma:
72263 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
72264 charged = 0;
72265 free_vma:
72266 +
72267 +#ifdef CONFIG_PAX_SEGMEXEC
72268 + if (vma_m)
72269 + kmem_cache_free(vm_area_cachep, vma_m);
72270 +#endif
72271 +
72272 kmem_cache_free(vm_area_cachep, vma);
72273 unacct_error:
72274 if (charged)
72275 @@ -1351,6 +1507,44 @@ unacct_error:
72276 return error;
72277 }
72278
72279 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
72280 +{
72281 + if (!vma) {
72282 +#ifdef CONFIG_STACK_GROWSUP
72283 + if (addr > sysctl_heap_stack_gap)
72284 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
72285 + else
72286 + vma = find_vma(current->mm, 0);
72287 + if (vma && (vma->vm_flags & VM_GROWSUP))
72288 + return false;
72289 +#endif
72290 + return true;
72291 + }
72292 +
72293 + if (addr + len > vma->vm_start)
72294 + return false;
72295 +
72296 + if (vma->vm_flags & VM_GROWSDOWN)
72297 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
72298 +#ifdef CONFIG_STACK_GROWSUP
72299 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
72300 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
72301 +#endif
72302 +
72303 + return true;
72304 +}
72305 +
72306 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
72307 +{
72308 + if (vma->vm_start < len)
72309 + return -ENOMEM;
72310 + if (!(vma->vm_flags & VM_GROWSDOWN))
72311 + return vma->vm_start - len;
72312 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
72313 + return vma->vm_start - len - sysctl_heap_stack_gap;
72314 + return -ENOMEM;
72315 +}
72316 +
72317 /* Get an address range which is currently unmapped.
72318 * For shmat() with addr=0.
72319 *
72320 @@ -1377,18 +1571,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
72321 if (flags & MAP_FIXED)
72322 return addr;
72323
72324 +#ifdef CONFIG_PAX_RANDMMAP
72325 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
72326 +#endif
72327 +
72328 if (addr) {
72329 addr = PAGE_ALIGN(addr);
72330 - vma = find_vma(mm, addr);
72331 - if (TASK_SIZE - len >= addr &&
72332 - (!vma || addr + len <= vma->vm_start))
72333 - return addr;
72334 + if (TASK_SIZE - len >= addr) {
72335 + vma = find_vma(mm, addr);
72336 + if (check_heap_stack_gap(vma, addr, len))
72337 + return addr;
72338 + }
72339 }
72340 if (len > mm->cached_hole_size) {
72341 - start_addr = addr = mm->free_area_cache;
72342 + start_addr = addr = mm->free_area_cache;
72343 } else {
72344 - start_addr = addr = TASK_UNMAPPED_BASE;
72345 - mm->cached_hole_size = 0;
72346 + start_addr = addr = mm->mmap_base;
72347 + mm->cached_hole_size = 0;
72348 }
72349
72350 full_search:
72351 @@ -1399,34 +1598,40 @@ full_search:
72352 * Start a new search - just in case we missed
72353 * some holes.
72354 */
72355 - if (start_addr != TASK_UNMAPPED_BASE) {
72356 - addr = TASK_UNMAPPED_BASE;
72357 - start_addr = addr;
72358 + if (start_addr != mm->mmap_base) {
72359 + start_addr = addr = mm->mmap_base;
72360 mm->cached_hole_size = 0;
72361 goto full_search;
72362 }
72363 return -ENOMEM;
72364 }
72365 - if (!vma || addr + len <= vma->vm_start) {
72366 - /*
72367 - * Remember the place where we stopped the search:
72368 - */
72369 - mm->free_area_cache = addr + len;
72370 - return addr;
72371 - }
72372 + if (check_heap_stack_gap(vma, addr, len))
72373 + break;
72374 if (addr + mm->cached_hole_size < vma->vm_start)
72375 mm->cached_hole_size = vma->vm_start - addr;
72376 addr = vma->vm_end;
72377 }
72378 +
72379 + /*
72380 + * Remember the place where we stopped the search:
72381 + */
72382 + mm->free_area_cache = addr + len;
72383 + return addr;
72384 }
72385 #endif
72386
72387 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
72388 {
72389 +
72390 +#ifdef CONFIG_PAX_SEGMEXEC
72391 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
72392 + return;
72393 +#endif
72394 +
72395 /*
72396 * Is this a new hole at the lowest possible address?
72397 */
72398 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
72399 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
72400 mm->free_area_cache = addr;
72401 mm->cached_hole_size = ~0UL;
72402 }
72403 @@ -1444,7 +1649,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
72404 {
72405 struct vm_area_struct *vma;
72406 struct mm_struct *mm = current->mm;
72407 - unsigned long addr = addr0;
72408 + unsigned long base = mm->mmap_base, addr = addr0;
72409
72410 /* requested length too big for entire address space */
72411 if (len > TASK_SIZE)
72412 @@ -1453,13 +1658,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
72413 if (flags & MAP_FIXED)
72414 return addr;
72415
72416 +#ifdef CONFIG_PAX_RANDMMAP
72417 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
72418 +#endif
72419 +
72420 /* requesting a specific address */
72421 if (addr) {
72422 addr = PAGE_ALIGN(addr);
72423 - vma = find_vma(mm, addr);
72424 - if (TASK_SIZE - len >= addr &&
72425 - (!vma || addr + len <= vma->vm_start))
72426 - return addr;
72427 + if (TASK_SIZE - len >= addr) {
72428 + vma = find_vma(mm, addr);
72429 + if (check_heap_stack_gap(vma, addr, len))
72430 + return addr;
72431 + }
72432 }
72433
72434 /* check if free_area_cache is useful for us */
72435 @@ -1474,7 +1684,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
72436 /* make sure it can fit in the remaining address space */
72437 if (addr > len) {
72438 vma = find_vma(mm, addr-len);
72439 - if (!vma || addr <= vma->vm_start)
72440 + if (check_heap_stack_gap(vma, addr - len, len))
72441 /* remember the address as a hint for next time */
72442 return (mm->free_area_cache = addr-len);
72443 }
72444 @@ -1491,7 +1701,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
72445 * return with success:
72446 */
72447 vma = find_vma(mm, addr);
72448 - if (!vma || addr+len <= vma->vm_start)
72449 + if (check_heap_stack_gap(vma, addr, len))
72450 /* remember the address as a hint for next time */
72451 return (mm->free_area_cache = addr);
72452
72453 @@ -1500,8 +1710,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
72454 mm->cached_hole_size = vma->vm_start - addr;
72455
72456 /* try just below the current vma->vm_start */
72457 - addr = vma->vm_start-len;
72458 - } while (len < vma->vm_start);
72459 + addr = skip_heap_stack_gap(vma, len);
72460 + } while (!IS_ERR_VALUE(addr));
72461
72462 bottomup:
72463 /*
72464 @@ -1510,13 +1720,21 @@ bottomup:
72465 * can happen with large stack limits and large mmap()
72466 * allocations.
72467 */
72468 + mm->mmap_base = TASK_UNMAPPED_BASE;
72469 +
72470 +#ifdef CONFIG_PAX_RANDMMAP
72471 + if (mm->pax_flags & MF_PAX_RANDMMAP)
72472 + mm->mmap_base += mm->delta_mmap;
72473 +#endif
72474 +
72475 + mm->free_area_cache = mm->mmap_base;
72476 mm->cached_hole_size = ~0UL;
72477 - mm->free_area_cache = TASK_UNMAPPED_BASE;
72478 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
72479 /*
72480 * Restore the topdown base:
72481 */
72482 - mm->free_area_cache = mm->mmap_base;
72483 + mm->mmap_base = base;
72484 + mm->free_area_cache = base;
72485 mm->cached_hole_size = ~0UL;
72486
72487 return addr;
72488 @@ -1525,6 +1743,12 @@ bottomup:
72489
72490 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
72491 {
72492 +
72493 +#ifdef CONFIG_PAX_SEGMEXEC
72494 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
72495 + return;
72496 +#endif
72497 +
72498 /*
72499 * Is this a new hole at the highest possible address?
72500 */
72501 @@ -1532,8 +1756,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
72502 mm->free_area_cache = addr;
72503
72504 /* dont allow allocations above current base */
72505 - if (mm->free_area_cache > mm->mmap_base)
72506 + if (mm->free_area_cache > mm->mmap_base) {
72507 mm->free_area_cache = mm->mmap_base;
72508 + mm->cached_hole_size = ~0UL;
72509 + }
72510 }
72511
72512 unsigned long
72513 @@ -1629,6 +1855,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
72514 return vma;
72515 }
72516
72517 +#ifdef CONFIG_PAX_SEGMEXEC
72518 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
72519 +{
72520 + struct vm_area_struct *vma_m;
72521 +
72522 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
72523 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
72524 + BUG_ON(vma->vm_mirror);
72525 + return NULL;
72526 + }
72527 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
72528 + vma_m = vma->vm_mirror;
72529 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
72530 + BUG_ON(vma->vm_file != vma_m->vm_file);
72531 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
72532 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
72533 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
72534 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
72535 + return vma_m;
72536 +}
72537 +#endif
72538 +
72539 /*
72540 * Verify that the stack growth is acceptable and
72541 * update accounting. This is shared with both the
72542 @@ -1645,6 +1893,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
72543 return -ENOMEM;
72544
72545 /* Stack limit test */
72546 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
72547 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
72548 return -ENOMEM;
72549
72550 @@ -1655,6 +1904,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
72551 locked = mm->locked_vm + grow;
72552 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
72553 limit >>= PAGE_SHIFT;
72554 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
72555 if (locked > limit && !capable(CAP_IPC_LOCK))
72556 return -ENOMEM;
72557 }
72558 @@ -1685,37 +1935,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
72559 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
72560 * vma is the last one with address > vma->vm_end. Have to extend vma.
72561 */
72562 +#ifndef CONFIG_IA64
72563 +static
72564 +#endif
72565 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
72566 {
72567 int error;
72568 + bool locknext;
72569
72570 if (!(vma->vm_flags & VM_GROWSUP))
72571 return -EFAULT;
72572
72573 + /* Also guard against wrapping around to address 0. */
72574 + if (address < PAGE_ALIGN(address+1))
72575 + address = PAGE_ALIGN(address+1);
72576 + else
72577 + return -ENOMEM;
72578 +
72579 /*
72580 * We must make sure the anon_vma is allocated
72581 * so that the anon_vma locking is not a noop.
72582 */
72583 if (unlikely(anon_vma_prepare(vma)))
72584 return -ENOMEM;
72585 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
72586 + if (locknext && anon_vma_prepare(vma->vm_next))
72587 + return -ENOMEM;
72588 vma_lock_anon_vma(vma);
72589 + if (locknext)
72590 + vma_lock_anon_vma(vma->vm_next);
72591
72592 /*
72593 * vma->vm_start/vm_end cannot change under us because the caller
72594 * is required to hold the mmap_sem in read mode. We need the
72595 - * anon_vma lock to serialize against concurrent expand_stacks.
72596 - * Also guard against wrapping around to address 0.
72597 + * anon_vma locks to serialize against concurrent expand_stacks
72598 + * and expand_upwards.
72599 */
72600 - if (address < PAGE_ALIGN(address+4))
72601 - address = PAGE_ALIGN(address+4);
72602 - else {
72603 - vma_unlock_anon_vma(vma);
72604 - return -ENOMEM;
72605 - }
72606 error = 0;
72607
72608 /* Somebody else might have raced and expanded it already */
72609 - if (address > vma->vm_end) {
72610 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
72611 + error = -ENOMEM;
72612 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
72613 unsigned long size, grow;
72614
72615 size = address - vma->vm_start;
72616 @@ -1730,6 +1991,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
72617 }
72618 }
72619 }
72620 + if (locknext)
72621 + vma_unlock_anon_vma(vma->vm_next);
72622 vma_unlock_anon_vma(vma);
72623 khugepaged_enter_vma_merge(vma);
72624 return error;
72625 @@ -1743,6 +2006,8 @@ int expand_downwards(struct vm_area_struct *vma,
72626 unsigned long address)
72627 {
72628 int error;
72629 + bool lockprev = false;
72630 + struct vm_area_struct *prev;
72631
72632 /*
72633 * We must make sure the anon_vma is allocated
72634 @@ -1756,6 +2021,15 @@ int expand_downwards(struct vm_area_struct *vma,
72635 if (error)
72636 return error;
72637
72638 + prev = vma->vm_prev;
72639 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
72640 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
72641 +#endif
72642 + if (lockprev && anon_vma_prepare(prev))
72643 + return -ENOMEM;
72644 + if (lockprev)
72645 + vma_lock_anon_vma(prev);
72646 +
72647 vma_lock_anon_vma(vma);
72648
72649 /*
72650 @@ -1765,9 +2039,17 @@ int expand_downwards(struct vm_area_struct *vma,
72651 */
72652
72653 /* Somebody else might have raced and expanded it already */
72654 - if (address < vma->vm_start) {
72655 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
72656 + error = -ENOMEM;
72657 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
72658 unsigned long size, grow;
72659
72660 +#ifdef CONFIG_PAX_SEGMEXEC
72661 + struct vm_area_struct *vma_m;
72662 +
72663 + vma_m = pax_find_mirror_vma(vma);
72664 +#endif
72665 +
72666 size = vma->vm_end - address;
72667 grow = (vma->vm_start - address) >> PAGE_SHIFT;
72668
72669 @@ -1777,11 +2059,22 @@ int expand_downwards(struct vm_area_struct *vma,
72670 if (!error) {
72671 vma->vm_start = address;
72672 vma->vm_pgoff -= grow;
72673 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
72674 +
72675 +#ifdef CONFIG_PAX_SEGMEXEC
72676 + if (vma_m) {
72677 + vma_m->vm_start -= grow << PAGE_SHIFT;
72678 + vma_m->vm_pgoff -= grow;
72679 + }
72680 +#endif
72681 +
72682 perf_event_mmap(vma);
72683 }
72684 }
72685 }
72686 vma_unlock_anon_vma(vma);
72687 + if (lockprev)
72688 + vma_unlock_anon_vma(prev);
72689 khugepaged_enter_vma_merge(vma);
72690 return error;
72691 }
72692 @@ -1851,6 +2144,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
72693 do {
72694 long nrpages = vma_pages(vma);
72695
72696 +#ifdef CONFIG_PAX_SEGMEXEC
72697 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
72698 + vma = remove_vma(vma);
72699 + continue;
72700 + }
72701 +#endif
72702 +
72703 mm->total_vm -= nrpages;
72704 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
72705 vma = remove_vma(vma);
72706 @@ -1896,6 +2196,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
72707 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
72708 vma->vm_prev = NULL;
72709 do {
72710 +
72711 +#ifdef CONFIG_PAX_SEGMEXEC
72712 + if (vma->vm_mirror) {
72713 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
72714 + vma->vm_mirror->vm_mirror = NULL;
72715 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
72716 + vma->vm_mirror = NULL;
72717 + }
72718 +#endif
72719 +
72720 rb_erase(&vma->vm_rb, &mm->mm_rb);
72721 mm->map_count--;
72722 tail_vma = vma;
72723 @@ -1924,14 +2234,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72724 struct vm_area_struct *new;
72725 int err = -ENOMEM;
72726
72727 +#ifdef CONFIG_PAX_SEGMEXEC
72728 + struct vm_area_struct *vma_m, *new_m = NULL;
72729 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
72730 +#endif
72731 +
72732 if (is_vm_hugetlb_page(vma) && (addr &
72733 ~(huge_page_mask(hstate_vma(vma)))))
72734 return -EINVAL;
72735
72736 +#ifdef CONFIG_PAX_SEGMEXEC
72737 + vma_m = pax_find_mirror_vma(vma);
72738 +#endif
72739 +
72740 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
72741 if (!new)
72742 goto out_err;
72743
72744 +#ifdef CONFIG_PAX_SEGMEXEC
72745 + if (vma_m) {
72746 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
72747 + if (!new_m) {
72748 + kmem_cache_free(vm_area_cachep, new);
72749 + goto out_err;
72750 + }
72751 + }
72752 +#endif
72753 +
72754 /* most fields are the same, copy all, and then fixup */
72755 *new = *vma;
72756
72757 @@ -1944,6 +2273,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72758 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
72759 }
72760
72761 +#ifdef CONFIG_PAX_SEGMEXEC
72762 + if (vma_m) {
72763 + *new_m = *vma_m;
72764 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
72765 + new_m->vm_mirror = new;
72766 + new->vm_mirror = new_m;
72767 +
72768 + if (new_below)
72769 + new_m->vm_end = addr_m;
72770 + else {
72771 + new_m->vm_start = addr_m;
72772 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
72773 + }
72774 + }
72775 +#endif
72776 +
72777 pol = mpol_dup(vma_policy(vma));
72778 if (IS_ERR(pol)) {
72779 err = PTR_ERR(pol);
72780 @@ -1969,6 +2314,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72781 else
72782 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
72783
72784 +#ifdef CONFIG_PAX_SEGMEXEC
72785 + if (!err && vma_m) {
72786 + if (anon_vma_clone(new_m, vma_m))
72787 + goto out_free_mpol;
72788 +
72789 + mpol_get(pol);
72790 + vma_set_policy(new_m, pol);
72791 +
72792 + if (new_m->vm_file) {
72793 + get_file(new_m->vm_file);
72794 + if (vma_m->vm_flags & VM_EXECUTABLE)
72795 + added_exe_file_vma(mm);
72796 + }
72797 +
72798 + if (new_m->vm_ops && new_m->vm_ops->open)
72799 + new_m->vm_ops->open(new_m);
72800 +
72801 + if (new_below)
72802 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
72803 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
72804 + else
72805 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
72806 +
72807 + if (err) {
72808 + if (new_m->vm_ops && new_m->vm_ops->close)
72809 + new_m->vm_ops->close(new_m);
72810 + if (new_m->vm_file) {
72811 + if (vma_m->vm_flags & VM_EXECUTABLE)
72812 + removed_exe_file_vma(mm);
72813 + fput(new_m->vm_file);
72814 + }
72815 + mpol_put(pol);
72816 + }
72817 + }
72818 +#endif
72819 +
72820 /* Success. */
72821 if (!err)
72822 return 0;
72823 @@ -1981,10 +2362,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72824 removed_exe_file_vma(mm);
72825 fput(new->vm_file);
72826 }
72827 - unlink_anon_vmas(new);
72828 out_free_mpol:
72829 mpol_put(pol);
72830 out_free_vma:
72831 +
72832 +#ifdef CONFIG_PAX_SEGMEXEC
72833 + if (new_m) {
72834 + unlink_anon_vmas(new_m);
72835 + kmem_cache_free(vm_area_cachep, new_m);
72836 + }
72837 +#endif
72838 +
72839 + unlink_anon_vmas(new);
72840 kmem_cache_free(vm_area_cachep, new);
72841 out_err:
72842 return err;
72843 @@ -1997,6 +2386,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72844 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
72845 unsigned long addr, int new_below)
72846 {
72847 +
72848 +#ifdef CONFIG_PAX_SEGMEXEC
72849 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
72850 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
72851 + if (mm->map_count >= sysctl_max_map_count-1)
72852 + return -ENOMEM;
72853 + } else
72854 +#endif
72855 +
72856 if (mm->map_count >= sysctl_max_map_count)
72857 return -ENOMEM;
72858
72859 @@ -2008,11 +2406,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
72860 * work. This now handles partial unmappings.
72861 * Jeremy Fitzhardinge <jeremy@goop.org>
72862 */
72863 +#ifdef CONFIG_PAX_SEGMEXEC
72864 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72865 {
72866 + int ret = __do_munmap(mm, start, len);
72867 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
72868 + return ret;
72869 +
72870 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
72871 +}
72872 +
72873 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72874 +#else
72875 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72876 +#endif
72877 +{
72878 unsigned long end;
72879 struct vm_area_struct *vma, *prev, *last;
72880
72881 + /*
72882 + * mm->mmap_sem is required to protect against another thread
72883 + * changing the mappings in case we sleep.
72884 + */
72885 + verify_mm_writelocked(mm);
72886 +
72887 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
72888 return -EINVAL;
72889
72890 @@ -2087,6 +2504,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72891 /* Fix up all other VM information */
72892 remove_vma_list(mm, vma);
72893
72894 + track_exec_limit(mm, start, end, 0UL);
72895 +
72896 return 0;
72897 }
72898
72899 @@ -2099,22 +2518,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
72900
72901 profile_munmap(addr);
72902
72903 +#ifdef CONFIG_PAX_SEGMEXEC
72904 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
72905 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
72906 + return -EINVAL;
72907 +#endif
72908 +
72909 down_write(&mm->mmap_sem);
72910 ret = do_munmap(mm, addr, len);
72911 up_write(&mm->mmap_sem);
72912 return ret;
72913 }
72914
72915 -static inline void verify_mm_writelocked(struct mm_struct *mm)
72916 -{
72917 -#ifdef CONFIG_DEBUG_VM
72918 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
72919 - WARN_ON(1);
72920 - up_read(&mm->mmap_sem);
72921 - }
72922 -#endif
72923 -}
72924 -
72925 /*
72926 * this is really a simplified "do_mmap". it only handles
72927 * anonymous maps. eventually we may be able to do some
72928 @@ -2128,6 +2543,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
72929 struct rb_node ** rb_link, * rb_parent;
72930 pgoff_t pgoff = addr >> PAGE_SHIFT;
72931 int error;
72932 + unsigned long charged;
72933
72934 len = PAGE_ALIGN(len);
72935 if (!len)
72936 @@ -2139,16 +2555,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
72937
72938 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
72939
72940 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
72941 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
72942 + flags &= ~VM_EXEC;
72943 +
72944 +#ifdef CONFIG_PAX_MPROTECT
72945 + if (mm->pax_flags & MF_PAX_MPROTECT)
72946 + flags &= ~VM_MAYEXEC;
72947 +#endif
72948 +
72949 + }
72950 +#endif
72951 +
72952 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
72953 if (error & ~PAGE_MASK)
72954 return error;
72955
72956 + charged = len >> PAGE_SHIFT;
72957 +
72958 /*
72959 * mlock MCL_FUTURE?
72960 */
72961 if (mm->def_flags & VM_LOCKED) {
72962 unsigned long locked, lock_limit;
72963 - locked = len >> PAGE_SHIFT;
72964 + locked = charged;
72965 locked += mm->locked_vm;
72966 lock_limit = rlimit(RLIMIT_MEMLOCK);
72967 lock_limit >>= PAGE_SHIFT;
72968 @@ -2165,22 +2595,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
72969 /*
72970 * Clear old maps. this also does some error checking for us
72971 */
72972 - munmap_back:
72973 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
72974 if (vma && vma->vm_start < addr + len) {
72975 if (do_munmap(mm, addr, len))
72976 return -ENOMEM;
72977 - goto munmap_back;
72978 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
72979 + BUG_ON(vma && vma->vm_start < addr + len);
72980 }
72981
72982 /* Check against address space limits *after* clearing old maps... */
72983 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
72984 + if (!may_expand_vm(mm, charged))
72985 return -ENOMEM;
72986
72987 if (mm->map_count > sysctl_max_map_count)
72988 return -ENOMEM;
72989
72990 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
72991 + if (security_vm_enough_memory(charged))
72992 return -ENOMEM;
72993
72994 /* Can we just expand an old private anonymous mapping? */
72995 @@ -2194,7 +2624,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
72996 */
72997 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72998 if (!vma) {
72999 - vm_unacct_memory(len >> PAGE_SHIFT);
73000 + vm_unacct_memory(charged);
73001 return -ENOMEM;
73002 }
73003
73004 @@ -2208,11 +2638,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
73005 vma_link(mm, vma, prev, rb_link, rb_parent);
73006 out:
73007 perf_event_mmap(vma);
73008 - mm->total_vm += len >> PAGE_SHIFT;
73009 + mm->total_vm += charged;
73010 if (flags & VM_LOCKED) {
73011 if (!mlock_vma_pages_range(vma, addr, addr + len))
73012 - mm->locked_vm += (len >> PAGE_SHIFT);
73013 + mm->locked_vm += charged;
73014 }
73015 + track_exec_limit(mm, addr, addr + len, flags);
73016 return addr;
73017 }
73018
73019 @@ -2259,8 +2690,10 @@ void exit_mmap(struct mm_struct *mm)
73020 * Walk the list again, actually closing and freeing it,
73021 * with preemption enabled, without holding any MM locks.
73022 */
73023 - while (vma)
73024 + while (vma) {
73025 + vma->vm_mirror = NULL;
73026 vma = remove_vma(vma);
73027 + }
73028
73029 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
73030 }
73031 @@ -2274,6 +2707,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
73032 struct vm_area_struct * __vma, * prev;
73033 struct rb_node ** rb_link, * rb_parent;
73034
73035 +#ifdef CONFIG_PAX_SEGMEXEC
73036 + struct vm_area_struct *vma_m = NULL;
73037 +#endif
73038 +
73039 + if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
73040 + return -EPERM;
73041 +
73042 /*
73043 * The vm_pgoff of a purely anonymous vma should be irrelevant
73044 * until its first write fault, when page's anon_vma and index
73045 @@ -2296,7 +2736,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
73046 if ((vma->vm_flags & VM_ACCOUNT) &&
73047 security_vm_enough_memory_mm(mm, vma_pages(vma)))
73048 return -ENOMEM;
73049 +
73050 +#ifdef CONFIG_PAX_SEGMEXEC
73051 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
73052 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
73053 + if (!vma_m)
73054 + return -ENOMEM;
73055 + }
73056 +#endif
73057 +
73058 vma_link(mm, vma, prev, rb_link, rb_parent);
73059 +
73060 +#ifdef CONFIG_PAX_SEGMEXEC
73061 + if (vma_m)
73062 + BUG_ON(pax_mirror_vma(vma_m, vma));
73063 +#endif
73064 +
73065 return 0;
73066 }
73067
73068 @@ -2315,6 +2770,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
73069 struct mempolicy *pol;
73070 bool faulted_in_anon_vma = true;
73071
73072 + BUG_ON(vma->vm_mirror);
73073 +
73074 /*
73075 * If anonymous vma has not yet been faulted, update new pgoff
73076 * to match new location, to increase its chance of merging.
73077 @@ -2382,6 +2839,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
73078 return NULL;
73079 }
73080
73081 +#ifdef CONFIG_PAX_SEGMEXEC
73082 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
73083 +{
73084 + struct vm_area_struct *prev_m;
73085 + struct rb_node **rb_link_m, *rb_parent_m;
73086 + struct mempolicy *pol_m;
73087 +
73088 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
73089 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
73090 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
73091 + *vma_m = *vma;
73092 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
73093 + if (anon_vma_clone(vma_m, vma))
73094 + return -ENOMEM;
73095 + pol_m = vma_policy(vma_m);
73096 + mpol_get(pol_m);
73097 + vma_set_policy(vma_m, pol_m);
73098 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
73099 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
73100 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
73101 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
73102 + if (vma_m->vm_file)
73103 + get_file(vma_m->vm_file);
73104 + if (vma_m->vm_ops && vma_m->vm_ops->open)
73105 + vma_m->vm_ops->open(vma_m);
73106 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
73107 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
73108 + vma_m->vm_mirror = vma;
73109 + vma->vm_mirror = vma_m;
73110 + return 0;
73111 +}
73112 +#endif
73113 +
73114 /*
73115 * Return true if the calling process may expand its vm space by the passed
73116 * number of pages
73117 @@ -2393,6 +2883,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
73118
73119 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
73120
73121 +#ifdef CONFIG_PAX_RANDMMAP
73122 + if (mm->pax_flags & MF_PAX_RANDMMAP)
73123 + cur -= mm->brk_gap;
73124 +#endif
73125 +
73126 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
73127 if (cur + npages > lim)
73128 return 0;
73129 return 1;
73130 @@ -2463,6 +2959,22 @@ int install_special_mapping(struct mm_struct *mm,
73131 vma->vm_start = addr;
73132 vma->vm_end = addr + len;
73133
73134 +#ifdef CONFIG_PAX_MPROTECT
73135 + if (mm->pax_flags & MF_PAX_MPROTECT) {
73136 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
73137 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
73138 + return -EPERM;
73139 + if (!(vm_flags & VM_EXEC))
73140 + vm_flags &= ~VM_MAYEXEC;
73141 +#else
73142 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
73143 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
73144 +#endif
73145 + else
73146 + vm_flags &= ~VM_MAYWRITE;
73147 + }
73148 +#endif
73149 +
73150 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
73151 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
73152
73153 diff --git a/mm/mprotect.c b/mm/mprotect.c
73154 index f437d05..e3763f6 100644
73155 --- a/mm/mprotect.c
73156 +++ b/mm/mprotect.c
73157 @@ -23,10 +23,16 @@
73158 #include <linux/mmu_notifier.h>
73159 #include <linux/migrate.h>
73160 #include <linux/perf_event.h>
73161 +
73162 +#ifdef CONFIG_PAX_MPROTECT
73163 +#include <linux/elf.h>
73164 +#endif
73165 +
73166 #include <asm/uaccess.h>
73167 #include <asm/pgtable.h>
73168 #include <asm/cacheflush.h>
73169 #include <asm/tlbflush.h>
73170 +#include <asm/mmu_context.h>
73171
73172 #ifndef pgprot_modify
73173 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
73174 @@ -141,6 +147,48 @@ static void change_protection(struct vm_area_struct *vma,
73175 flush_tlb_range(vma, start, end);
73176 }
73177
73178 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
73179 +/* called while holding the mmap semaphor for writing except stack expansion */
73180 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
73181 +{
73182 + unsigned long oldlimit, newlimit = 0UL;
73183 +
73184 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
73185 + return;
73186 +
73187 + spin_lock(&mm->page_table_lock);
73188 + oldlimit = mm->context.user_cs_limit;
73189 + if ((prot & VM_EXEC) && oldlimit < end)
73190 + /* USER_CS limit moved up */
73191 + newlimit = end;
73192 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
73193 + /* USER_CS limit moved down */
73194 + newlimit = start;
73195 +
73196 + if (newlimit) {
73197 + mm->context.user_cs_limit = newlimit;
73198 +
73199 +#ifdef CONFIG_SMP
73200 + wmb();
73201 + cpus_clear(mm->context.cpu_user_cs_mask);
73202 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
73203 +#endif
73204 +
73205 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
73206 + }
73207 + spin_unlock(&mm->page_table_lock);
73208 + if (newlimit == end) {
73209 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
73210 +
73211 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
73212 + if (is_vm_hugetlb_page(vma))
73213 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
73214 + else
73215 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
73216 + }
73217 +}
73218 +#endif
73219 +
73220 int
73221 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
73222 unsigned long start, unsigned long end, unsigned long newflags)
73223 @@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
73224 int error;
73225 int dirty_accountable = 0;
73226
73227 +#ifdef CONFIG_PAX_SEGMEXEC
73228 + struct vm_area_struct *vma_m = NULL;
73229 + unsigned long start_m, end_m;
73230 +
73231 + start_m = start + SEGMEXEC_TASK_SIZE;
73232 + end_m = end + SEGMEXEC_TASK_SIZE;
73233 +#endif
73234 +
73235 if (newflags == oldflags) {
73236 *pprev = vma;
73237 return 0;
73238 }
73239
73240 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
73241 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
73242 +
73243 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
73244 + return -ENOMEM;
73245 +
73246 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
73247 + return -ENOMEM;
73248 + }
73249 +
73250 /*
73251 * If we make a private mapping writable we increase our commit;
73252 * but (without finer accounting) cannot reduce our commit if we
73253 @@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
73254 }
73255 }
73256
73257 +#ifdef CONFIG_PAX_SEGMEXEC
73258 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
73259 + if (start != vma->vm_start) {
73260 + error = split_vma(mm, vma, start, 1);
73261 + if (error)
73262 + goto fail;
73263 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
73264 + *pprev = (*pprev)->vm_next;
73265 + }
73266 +
73267 + if (end != vma->vm_end) {
73268 + error = split_vma(mm, vma, end, 0);
73269 + if (error)
73270 + goto fail;
73271 + }
73272 +
73273 + if (pax_find_mirror_vma(vma)) {
73274 + error = __do_munmap(mm, start_m, end_m - start_m);
73275 + if (error)
73276 + goto fail;
73277 + } else {
73278 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
73279 + if (!vma_m) {
73280 + error = -ENOMEM;
73281 + goto fail;
73282 + }
73283 + vma->vm_flags = newflags;
73284 + error = pax_mirror_vma(vma_m, vma);
73285 + if (error) {
73286 + vma->vm_flags = oldflags;
73287 + goto fail;
73288 + }
73289 + }
73290 + }
73291 +#endif
73292 +
73293 /*
73294 * First try to merge with previous and/or next vma.
73295 */
73296 @@ -204,9 +306,21 @@ success:
73297 * vm_flags and vm_page_prot are protected by the mmap_sem
73298 * held in write mode.
73299 */
73300 +
73301 +#ifdef CONFIG_PAX_SEGMEXEC
73302 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
73303 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
73304 +#endif
73305 +
73306 vma->vm_flags = newflags;
73307 +
73308 +#ifdef CONFIG_PAX_MPROTECT
73309 + if (mm->binfmt && mm->binfmt->handle_mprotect)
73310 + mm->binfmt->handle_mprotect(vma, newflags);
73311 +#endif
73312 +
73313 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
73314 - vm_get_page_prot(newflags));
73315 + vm_get_page_prot(vma->vm_flags));
73316
73317 if (vma_wants_writenotify(vma)) {
73318 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
73319 @@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
73320 end = start + len;
73321 if (end <= start)
73322 return -ENOMEM;
73323 +
73324 +#ifdef CONFIG_PAX_SEGMEXEC
73325 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
73326 + if (end > SEGMEXEC_TASK_SIZE)
73327 + return -EINVAL;
73328 + } else
73329 +#endif
73330 +
73331 + if (end > TASK_SIZE)
73332 + return -EINVAL;
73333 +
73334 if (!arch_validate_prot(prot))
73335 return -EINVAL;
73336
73337 @@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
73338 /*
73339 * Does the application expect PROT_READ to imply PROT_EXEC:
73340 */
73341 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
73342 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
73343 prot |= PROT_EXEC;
73344
73345 vm_flags = calc_vm_prot_bits(prot);
73346 @@ -288,6 +413,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
73347 if (start > vma->vm_start)
73348 prev = vma;
73349
73350 +#ifdef CONFIG_PAX_MPROTECT
73351 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
73352 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
73353 +#endif
73354 +
73355 for (nstart = start ; ; ) {
73356 unsigned long newflags;
73357
73358 @@ -297,6 +427,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
73359
73360 /* newflags >> 4 shift VM_MAY% in place of VM_% */
73361 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
73362 + if (prot & (PROT_WRITE | PROT_EXEC))
73363 + gr_log_rwxmprotect(vma->vm_file);
73364 +
73365 + error = -EACCES;
73366 + goto out;
73367 + }
73368 +
73369 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
73370 error = -EACCES;
73371 goto out;
73372 }
73373 @@ -311,6 +449,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
73374 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
73375 if (error)
73376 goto out;
73377 +
73378 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
73379 +
73380 nstart = tmp;
73381
73382 if (nstart < prev->vm_end)
73383 diff --git a/mm/mremap.c b/mm/mremap.c
73384 index 87bb839..c3bfadb 100644
73385 --- a/mm/mremap.c
73386 +++ b/mm/mremap.c
73387 @@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
73388 continue;
73389 pte = ptep_get_and_clear(mm, old_addr, old_pte);
73390 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
73391 +
73392 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
73393 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
73394 + pte = pte_exprotect(pte);
73395 +#endif
73396 +
73397 set_pte_at(mm, new_addr, new_pte, pte);
73398 }
73399
73400 @@ -299,6 +305,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
73401 if (is_vm_hugetlb_page(vma))
73402 goto Einval;
73403
73404 +#ifdef CONFIG_PAX_SEGMEXEC
73405 + if (pax_find_mirror_vma(vma))
73406 + goto Einval;
73407 +#endif
73408 +
73409 /* We can't remap across vm area boundaries */
73410 if (old_len > vma->vm_end - addr)
73411 goto Efault;
73412 @@ -355,20 +366,25 @@ static unsigned long mremap_to(unsigned long addr,
73413 unsigned long ret = -EINVAL;
73414 unsigned long charged = 0;
73415 unsigned long map_flags;
73416 + unsigned long pax_task_size = TASK_SIZE;
73417
73418 if (new_addr & ~PAGE_MASK)
73419 goto out;
73420
73421 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
73422 +#ifdef CONFIG_PAX_SEGMEXEC
73423 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
73424 + pax_task_size = SEGMEXEC_TASK_SIZE;
73425 +#endif
73426 +
73427 + pax_task_size -= PAGE_SIZE;
73428 +
73429 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
73430 goto out;
73431
73432 /* Check if the location we're moving into overlaps the
73433 * old location at all, and fail if it does.
73434 */
73435 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
73436 - goto out;
73437 -
73438 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
73439 + if (addr + old_len > new_addr && new_addr + new_len > addr)
73440 goto out;
73441
73442 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
73443 @@ -440,6 +456,7 @@ unsigned long do_mremap(unsigned long addr,
73444 struct vm_area_struct *vma;
73445 unsigned long ret = -EINVAL;
73446 unsigned long charged = 0;
73447 + unsigned long pax_task_size = TASK_SIZE;
73448
73449 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
73450 goto out;
73451 @@ -458,6 +475,17 @@ unsigned long do_mremap(unsigned long addr,
73452 if (!new_len)
73453 goto out;
73454
73455 +#ifdef CONFIG_PAX_SEGMEXEC
73456 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
73457 + pax_task_size = SEGMEXEC_TASK_SIZE;
73458 +#endif
73459 +
73460 + pax_task_size -= PAGE_SIZE;
73461 +
73462 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
73463 + old_len > pax_task_size || addr > pax_task_size-old_len)
73464 + goto out;
73465 +
73466 if (flags & MREMAP_FIXED) {
73467 if (flags & MREMAP_MAYMOVE)
73468 ret = mremap_to(addr, old_len, new_addr, new_len);
73469 @@ -507,6 +535,7 @@ unsigned long do_mremap(unsigned long addr,
73470 addr + new_len);
73471 }
73472 ret = addr;
73473 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
73474 goto out;
73475 }
73476 }
73477 @@ -533,7 +562,13 @@ unsigned long do_mremap(unsigned long addr,
73478 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
73479 if (ret)
73480 goto out;
73481 +
73482 + map_flags = vma->vm_flags;
73483 ret = move_vma(vma, addr, old_len, new_len, new_addr);
73484 + if (!(ret & ~PAGE_MASK)) {
73485 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
73486 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
73487 + }
73488 }
73489 out:
73490 if (ret & ~PAGE_MASK)
73491 diff --git a/mm/nommu.c b/mm/nommu.c
73492 index f59e170..34e2a2b 100644
73493 --- a/mm/nommu.c
73494 +++ b/mm/nommu.c
73495 @@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
73496 int sysctl_overcommit_ratio = 50; /* default is 50% */
73497 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
73498 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
73499 -int heap_stack_gap = 0;
73500
73501 atomic_long_t mmap_pages_allocated;
73502
73503 @@ -827,15 +826,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
73504 EXPORT_SYMBOL(find_vma);
73505
73506 /*
73507 - * find a VMA
73508 - * - we don't extend stack VMAs under NOMMU conditions
73509 - */
73510 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
73511 -{
73512 - return find_vma(mm, addr);
73513 -}
73514 -
73515 -/*
73516 * expand a stack to a given address
73517 * - not supported under NOMMU conditions
73518 */
73519 @@ -1555,6 +1545,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
73520
73521 /* most fields are the same, copy all, and then fixup */
73522 *new = *vma;
73523 + INIT_LIST_HEAD(&new->anon_vma_chain);
73524 *region = *vma->vm_region;
73525 new->vm_region = region;
73526
73527 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
73528 index a13ded1..b949d15 100644
73529 --- a/mm/page_alloc.c
73530 +++ b/mm/page_alloc.c
73531 @@ -335,7 +335,7 @@ out:
73532 * This usage means that zero-order pages may not be compound.
73533 */
73534
73535 -static void free_compound_page(struct page *page)
73536 +void free_compound_page(struct page *page)
73537 {
73538 __free_pages_ok(page, compound_order(page));
73539 }
73540 @@ -692,6 +692,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
73541 int i;
73542 int bad = 0;
73543
73544 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
73545 + unsigned long index = 1UL << order;
73546 +#endif
73547 +
73548 trace_mm_page_free(page, order);
73549 kmemcheck_free_shadow(page, order);
73550
73551 @@ -707,6 +711,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
73552 debug_check_no_obj_freed(page_address(page),
73553 PAGE_SIZE << order);
73554 }
73555 +
73556 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
73557 + for (; index; --index)
73558 + sanitize_highpage(page + index - 1);
73559 +#endif
73560 +
73561 arch_free_page(page, order);
73562 kernel_map_pages(page, 1 << order, 0);
73563
73564 @@ -830,8 +840,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
73565 arch_alloc_page(page, order);
73566 kernel_map_pages(page, 1 << order, 1);
73567
73568 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
73569 if (gfp_flags & __GFP_ZERO)
73570 prep_zero_page(page, order, gfp_flags);
73571 +#endif
73572
73573 if (order && (gfp_flags & __GFP_COMP))
73574 prep_compound_page(page, order);
73575 @@ -3468,7 +3480,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
73576 unsigned long pfn;
73577
73578 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
73579 +#ifdef CONFIG_X86_32
73580 + /* boot failures in VMware 8 on 32bit vanilla since
73581 + this change */
73582 + if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
73583 +#else
73584 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
73585 +#endif
73586 return 1;
73587 }
73588 return 0;
73589 diff --git a/mm/percpu.c b/mm/percpu.c
73590 index f47af91..7eeef99 100644
73591 --- a/mm/percpu.c
73592 +++ b/mm/percpu.c
73593 @@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
73594 static unsigned int pcpu_high_unit_cpu __read_mostly;
73595
73596 /* the address of the first chunk which starts with the kernel static area */
73597 -void *pcpu_base_addr __read_mostly;
73598 +void *pcpu_base_addr __read_only;
73599 EXPORT_SYMBOL_GPL(pcpu_base_addr);
73600
73601 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
73602 diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
73603 index c20ff48..137702a 100644
73604 --- a/mm/process_vm_access.c
73605 +++ b/mm/process_vm_access.c
73606 @@ -13,6 +13,7 @@
73607 #include <linux/uio.h>
73608 #include <linux/sched.h>
73609 #include <linux/highmem.h>
73610 +#include <linux/security.h>
73611 #include <linux/ptrace.h>
73612 #include <linux/slab.h>
73613 #include <linux/syscalls.h>
73614 @@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
73615 size_t iov_l_curr_offset = 0;
73616 ssize_t iov_len;
73617
73618 + return -ENOSYS; // PaX: until properly audited
73619 +
73620 /*
73621 * Work out how many pages of struct pages we're going to need
73622 * when eventually calling get_user_pages
73623 */
73624 for (i = 0; i < riovcnt; i++) {
73625 iov_len = rvec[i].iov_len;
73626 - if (iov_len > 0) {
73627 - nr_pages_iov = ((unsigned long)rvec[i].iov_base
73628 - + iov_len)
73629 - / PAGE_SIZE - (unsigned long)rvec[i].iov_base
73630 - / PAGE_SIZE + 1;
73631 - nr_pages = max(nr_pages, nr_pages_iov);
73632 - }
73633 + if (iov_len <= 0)
73634 + continue;
73635 + nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
73636 + (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
73637 + nr_pages = max(nr_pages, nr_pages_iov);
73638 }
73639
73640 if (nr_pages == 0)
73641 @@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
73642 goto free_proc_pages;
73643 }
73644
73645 + if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
73646 + rc = -EPERM;
73647 + goto put_task_struct;
73648 + }
73649 +
73650 mm = mm_access(task, PTRACE_MODE_ATTACH);
73651 if (!mm || IS_ERR(mm)) {
73652 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
73653 diff --git a/mm/rmap.c b/mm/rmap.c
73654 index c8454e0..b04f3a2 100644
73655 --- a/mm/rmap.c
73656 +++ b/mm/rmap.c
73657 @@ -152,6 +152,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
73658 struct anon_vma *anon_vma = vma->anon_vma;
73659 struct anon_vma_chain *avc;
73660
73661 +#ifdef CONFIG_PAX_SEGMEXEC
73662 + struct anon_vma_chain *avc_m = NULL;
73663 +#endif
73664 +
73665 might_sleep();
73666 if (unlikely(!anon_vma)) {
73667 struct mm_struct *mm = vma->vm_mm;
73668 @@ -161,6 +165,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
73669 if (!avc)
73670 goto out_enomem;
73671
73672 +#ifdef CONFIG_PAX_SEGMEXEC
73673 + avc_m = anon_vma_chain_alloc(GFP_KERNEL);
73674 + if (!avc_m)
73675 + goto out_enomem_free_avc;
73676 +#endif
73677 +
73678 anon_vma = find_mergeable_anon_vma(vma);
73679 allocated = NULL;
73680 if (!anon_vma) {
73681 @@ -174,6 +184,21 @@ int anon_vma_prepare(struct vm_area_struct *vma)
73682 /* page_table_lock to protect against threads */
73683 spin_lock(&mm->page_table_lock);
73684 if (likely(!vma->anon_vma)) {
73685 +
73686 +#ifdef CONFIG_PAX_SEGMEXEC
73687 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
73688 +
73689 + if (vma_m) {
73690 + BUG_ON(vma_m->anon_vma);
73691 + vma_m->anon_vma = anon_vma;
73692 + avc_m->anon_vma = anon_vma;
73693 + avc_m->vma = vma;
73694 + list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
73695 + list_add(&avc_m->same_anon_vma, &anon_vma->head);
73696 + avc_m = NULL;
73697 + }
73698 +#endif
73699 +
73700 vma->anon_vma = anon_vma;
73701 avc->anon_vma = anon_vma;
73702 avc->vma = vma;
73703 @@ -187,12 +212,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
73704
73705 if (unlikely(allocated))
73706 put_anon_vma(allocated);
73707 +
73708 +#ifdef CONFIG_PAX_SEGMEXEC
73709 + if (unlikely(avc_m))
73710 + anon_vma_chain_free(avc_m);
73711 +#endif
73712 +
73713 if (unlikely(avc))
73714 anon_vma_chain_free(avc);
73715 }
73716 return 0;
73717
73718 out_enomem_free_avc:
73719 +
73720 +#ifdef CONFIG_PAX_SEGMEXEC
73721 + if (avc_m)
73722 + anon_vma_chain_free(avc_m);
73723 +#endif
73724 +
73725 anon_vma_chain_free(avc);
73726 out_enomem:
73727 return -ENOMEM;
73728 @@ -243,7 +280,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
73729 * Attach the anon_vmas from src to dst.
73730 * Returns 0 on success, -ENOMEM on failure.
73731 */
73732 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
73733 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
73734 {
73735 struct anon_vma_chain *avc, *pavc;
73736 struct anon_vma *root = NULL;
73737 @@ -321,7 +358,7 @@ void anon_vma_moveto_tail(struct vm_area_struct *dst)
73738 * the corresponding VMA in the parent process is attached to.
73739 * Returns 0 on success, non-zero on failure.
73740 */
73741 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
73742 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
73743 {
73744 struct anon_vma_chain *avc;
73745 struct anon_vma *anon_vma;
73746 diff --git a/mm/shmem.c b/mm/shmem.c
73747 index 269d049..a9d2b50 100644
73748 --- a/mm/shmem.c
73749 +++ b/mm/shmem.c
73750 @@ -31,7 +31,7 @@
73751 #include <linux/export.h>
73752 #include <linux/swap.h>
73753
73754 -static struct vfsmount *shm_mnt;
73755 +struct vfsmount *shm_mnt;
73756
73757 #ifdef CONFIG_SHMEM
73758 /*
73759 @@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
73760 #define BOGO_DIRENT_SIZE 20
73761
73762 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
73763 -#define SHORT_SYMLINK_LEN 128
73764 +#define SHORT_SYMLINK_LEN 64
73765
73766 struct shmem_xattr {
73767 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
73768 @@ -2180,8 +2180,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
73769 int err = -ENOMEM;
73770
73771 /* Round up to L1_CACHE_BYTES to resist false sharing */
73772 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
73773 - L1_CACHE_BYTES), GFP_KERNEL);
73774 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
73775 if (!sbinfo)
73776 return -ENOMEM;
73777
73778 diff --git a/mm/slab.c b/mm/slab.c
73779 index f0bd785..348b96a 100644
73780 --- a/mm/slab.c
73781 +++ b/mm/slab.c
73782 @@ -153,7 +153,7 @@
73783
73784 /* Legal flag mask for kmem_cache_create(). */
73785 #if DEBUG
73786 -# define CREATE_MASK (SLAB_RED_ZONE | \
73787 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
73788 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
73789 SLAB_CACHE_DMA | \
73790 SLAB_STORE_USER | \
73791 @@ -161,7 +161,7 @@
73792 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
73793 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
73794 #else
73795 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
73796 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
73797 SLAB_CACHE_DMA | \
73798 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
73799 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
73800 @@ -290,7 +290,7 @@ struct kmem_list3 {
73801 * Need this for bootstrapping a per node allocator.
73802 */
73803 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
73804 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
73805 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
73806 #define CACHE_CACHE 0
73807 #define SIZE_AC MAX_NUMNODES
73808 #define SIZE_L3 (2 * MAX_NUMNODES)
73809 @@ -391,10 +391,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
73810 if ((x)->max_freeable < i) \
73811 (x)->max_freeable = i; \
73812 } while (0)
73813 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
73814 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
73815 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
73816 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
73817 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
73818 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
73819 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
73820 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
73821 #else
73822 #define STATS_INC_ACTIVE(x) do { } while (0)
73823 #define STATS_DEC_ACTIVE(x) do { } while (0)
73824 @@ -542,7 +542,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
73825 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
73826 */
73827 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
73828 - const struct slab *slab, void *obj)
73829 + const struct slab *slab, const void *obj)
73830 {
73831 u32 offset = (obj - slab->s_mem);
73832 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
73833 @@ -568,7 +568,7 @@ struct cache_names {
73834 static struct cache_names __initdata cache_names[] = {
73835 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
73836 #include <linux/kmalloc_sizes.h>
73837 - {NULL,}
73838 + {NULL}
73839 #undef CACHE
73840 };
73841
73842 @@ -1588,7 +1588,7 @@ void __init kmem_cache_init(void)
73843 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
73844 sizes[INDEX_AC].cs_size,
73845 ARCH_KMALLOC_MINALIGN,
73846 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
73847 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73848 NULL);
73849
73850 if (INDEX_AC != INDEX_L3) {
73851 @@ -1596,7 +1596,7 @@ void __init kmem_cache_init(void)
73852 kmem_cache_create(names[INDEX_L3].name,
73853 sizes[INDEX_L3].cs_size,
73854 ARCH_KMALLOC_MINALIGN,
73855 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
73856 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73857 NULL);
73858 }
73859
73860 @@ -1614,7 +1614,7 @@ void __init kmem_cache_init(void)
73861 sizes->cs_cachep = kmem_cache_create(names->name,
73862 sizes->cs_size,
73863 ARCH_KMALLOC_MINALIGN,
73864 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
73865 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73866 NULL);
73867 }
73868 #ifdef CONFIG_ZONE_DMA
73869 @@ -4339,10 +4339,10 @@ static int s_show(struct seq_file *m, void *p)
73870 }
73871 /* cpu stats */
73872 {
73873 - unsigned long allochit = atomic_read(&cachep->allochit);
73874 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
73875 - unsigned long freehit = atomic_read(&cachep->freehit);
73876 - unsigned long freemiss = atomic_read(&cachep->freemiss);
73877 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
73878 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
73879 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
73880 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
73881
73882 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
73883 allochit, allocmiss, freehit, freemiss);
73884 @@ -4601,13 +4601,62 @@ static int __init slab_proc_init(void)
73885 {
73886 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
73887 #ifdef CONFIG_DEBUG_SLAB_LEAK
73888 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
73889 + proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
73890 #endif
73891 return 0;
73892 }
73893 module_init(slab_proc_init);
73894 #endif
73895
73896 +void check_object_size(const void *ptr, unsigned long n, bool to)
73897 +{
73898 +
73899 +#ifdef CONFIG_PAX_USERCOPY
73900 + struct page *page;
73901 + struct kmem_cache *cachep = NULL;
73902 + struct slab *slabp;
73903 + unsigned int objnr;
73904 + unsigned long offset;
73905 + const char *type;
73906 +
73907 + if (!n)
73908 + return;
73909 +
73910 + type = "<null>";
73911 + if (ZERO_OR_NULL_PTR(ptr))
73912 + goto report;
73913 +
73914 + if (!virt_addr_valid(ptr))
73915 + return;
73916 +
73917 + page = virt_to_head_page(ptr);
73918 +
73919 + type = "<process stack>";
73920 + if (!PageSlab(page)) {
73921 + if (object_is_on_stack(ptr, n) == -1)
73922 + goto report;
73923 + return;
73924 + }
73925 +
73926 + cachep = page_get_cache(page);
73927 + type = cachep->name;
73928 + if (!(cachep->flags & SLAB_USERCOPY))
73929 + goto report;
73930 +
73931 + slabp = page_get_slab(page);
73932 + objnr = obj_to_index(cachep, slabp, ptr);
73933 + BUG_ON(objnr >= cachep->num);
73934 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
73935 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
73936 + return;
73937 +
73938 +report:
73939 + pax_report_usercopy(ptr, n, to, type);
73940 +#endif
73941 +
73942 +}
73943 +EXPORT_SYMBOL(check_object_size);
73944 +
73945 /**
73946 * ksize - get the actual amount of memory allocated for a given object
73947 * @objp: Pointer to the object
73948 diff --git a/mm/slob.c b/mm/slob.c
73949 index 8105be4..e045f96 100644
73950 --- a/mm/slob.c
73951 +++ b/mm/slob.c
73952 @@ -29,7 +29,7 @@
73953 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
73954 * alloc_pages() directly, allocating compound pages so the page order
73955 * does not have to be separately tracked, and also stores the exact
73956 - * allocation size in page->private so that it can be used to accurately
73957 + * allocation size in slob_page->size so that it can be used to accurately
73958 * provide ksize(). These objects are detected in kfree() because slob_page()
73959 * is false for them.
73960 *
73961 @@ -58,6 +58,7 @@
73962 */
73963
73964 #include <linux/kernel.h>
73965 +#include <linux/sched.h>
73966 #include <linux/slab.h>
73967 #include <linux/mm.h>
73968 #include <linux/swap.h> /* struct reclaim_state */
73969 @@ -102,7 +103,8 @@ struct slob_page {
73970 unsigned long flags; /* mandatory */
73971 atomic_t _count; /* mandatory */
73972 slobidx_t units; /* free units left in page */
73973 - unsigned long pad[2];
73974 + unsigned long pad[1];
73975 + unsigned long size; /* size when >=PAGE_SIZE */
73976 slob_t *free; /* first free slob_t in page */
73977 struct list_head list; /* linked list of free pages */
73978 };
73979 @@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
73980 */
73981 static inline int is_slob_page(struct slob_page *sp)
73982 {
73983 - return PageSlab((struct page *)sp);
73984 + return PageSlab((struct page *)sp) && !sp->size;
73985 }
73986
73987 static inline void set_slob_page(struct slob_page *sp)
73988 @@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
73989
73990 static inline struct slob_page *slob_page(const void *addr)
73991 {
73992 - return (struct slob_page *)virt_to_page(addr);
73993 + return (struct slob_page *)virt_to_head_page(addr);
73994 }
73995
73996 /*
73997 @@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
73998 /*
73999 * Return the size of a slob block.
74000 */
74001 -static slobidx_t slob_units(slob_t *s)
74002 +static slobidx_t slob_units(const slob_t *s)
74003 {
74004 if (s->units > 0)
74005 return s->units;
74006 @@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
74007 /*
74008 * Return the next free slob block pointer after this one.
74009 */
74010 -static slob_t *slob_next(slob_t *s)
74011 +static slob_t *slob_next(const slob_t *s)
74012 {
74013 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
74014 slobidx_t next;
74015 @@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
74016 /*
74017 * Returns true if s is the last free block in its page.
74018 */
74019 -static int slob_last(slob_t *s)
74020 +static int slob_last(const slob_t *s)
74021 {
74022 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
74023 }
74024 @@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
74025 if (!page)
74026 return NULL;
74027
74028 + set_slob_page(page);
74029 return page_address(page);
74030 }
74031
74032 @@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
74033 if (!b)
74034 return NULL;
74035 sp = slob_page(b);
74036 - set_slob_page(sp);
74037
74038 spin_lock_irqsave(&slob_lock, flags);
74039 sp->units = SLOB_UNITS(PAGE_SIZE);
74040 sp->free = b;
74041 + sp->size = 0;
74042 INIT_LIST_HEAD(&sp->list);
74043 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
74044 set_slob_page_free(sp, slob_list);
74045 @@ -476,10 +479,9 @@ out:
74046 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
74047 */
74048
74049 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
74050 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
74051 {
74052 - unsigned int *m;
74053 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
74054 + slob_t *m;
74055 void *ret;
74056
74057 gfp &= gfp_allowed_mask;
74058 @@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
74059
74060 if (!m)
74061 return NULL;
74062 - *m = size;
74063 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
74064 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
74065 + m[0].units = size;
74066 + m[1].units = align;
74067 ret = (void *)m + align;
74068
74069 trace_kmalloc_node(_RET_IP_, ret,
74070 @@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
74071 gfp |= __GFP_COMP;
74072 ret = slob_new_pages(gfp, order, node);
74073 if (ret) {
74074 - struct page *page;
74075 - page = virt_to_page(ret);
74076 - page->private = size;
74077 + struct slob_page *sp;
74078 + sp = slob_page(ret);
74079 + sp->size = size;
74080 }
74081
74082 trace_kmalloc_node(_RET_IP_, ret,
74083 size, PAGE_SIZE << order, gfp, node);
74084 }
74085
74086 - kmemleak_alloc(ret, size, 1, gfp);
74087 + return ret;
74088 +}
74089 +
74090 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
74091 +{
74092 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
74093 + void *ret = __kmalloc_node_align(size, gfp, node, align);
74094 +
74095 + if (!ZERO_OR_NULL_PTR(ret))
74096 + kmemleak_alloc(ret, size, 1, gfp);
74097 return ret;
74098 }
74099 EXPORT_SYMBOL(__kmalloc_node);
74100 @@ -533,13 +547,92 @@ void kfree(const void *block)
74101 sp = slob_page(block);
74102 if (is_slob_page(sp)) {
74103 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
74104 - unsigned int *m = (unsigned int *)(block - align);
74105 - slob_free(m, *m + align);
74106 - } else
74107 + slob_t *m = (slob_t *)(block - align);
74108 + slob_free(m, m[0].units + align);
74109 + } else {
74110 + clear_slob_page(sp);
74111 + free_slob_page(sp);
74112 + sp->size = 0;
74113 put_page(&sp->page);
74114 + }
74115 }
74116 EXPORT_SYMBOL(kfree);
74117
74118 +void check_object_size(const void *ptr, unsigned long n, bool to)
74119 +{
74120 +
74121 +#ifdef CONFIG_PAX_USERCOPY
74122 + struct slob_page *sp;
74123 + const slob_t *free;
74124 + const void *base;
74125 + unsigned long flags;
74126 + const char *type;
74127 +
74128 + if (!n)
74129 + return;
74130 +
74131 + type = "<null>";
74132 + if (ZERO_OR_NULL_PTR(ptr))
74133 + goto report;
74134 +
74135 + if (!virt_addr_valid(ptr))
74136 + return;
74137 +
74138 + type = "<process stack>";
74139 + sp = slob_page(ptr);
74140 + if (!PageSlab((struct page *)sp)) {
74141 + if (object_is_on_stack(ptr, n) == -1)
74142 + goto report;
74143 + return;
74144 + }
74145 +
74146 + type = "<slob>";
74147 + if (sp->size) {
74148 + base = page_address(&sp->page);
74149 + if (base <= ptr && n <= sp->size - (ptr - base))
74150 + return;
74151 + goto report;
74152 + }
74153 +
74154 + /* some tricky double walking to find the chunk */
74155 + spin_lock_irqsave(&slob_lock, flags);
74156 + base = (void *)((unsigned long)ptr & PAGE_MASK);
74157 + free = sp->free;
74158 +
74159 + while (!slob_last(free) && (void *)free <= ptr) {
74160 + base = free + slob_units(free);
74161 + free = slob_next(free);
74162 + }
74163 +
74164 + while (base < (void *)free) {
74165 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
74166 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
74167 + int offset;
74168 +
74169 + if (ptr < base + align)
74170 + break;
74171 +
74172 + offset = ptr - base - align;
74173 + if (offset >= m) {
74174 + base += size;
74175 + continue;
74176 + }
74177 +
74178 + if (n > m - offset)
74179 + break;
74180 +
74181 + spin_unlock_irqrestore(&slob_lock, flags);
74182 + return;
74183 + }
74184 +
74185 + spin_unlock_irqrestore(&slob_lock, flags);
74186 +report:
74187 + pax_report_usercopy(ptr, n, to, type);
74188 +#endif
74189 +
74190 +}
74191 +EXPORT_SYMBOL(check_object_size);
74192 +
74193 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
74194 size_t ksize(const void *block)
74195 {
74196 @@ -552,10 +645,10 @@ size_t ksize(const void *block)
74197 sp = slob_page(block);
74198 if (is_slob_page(sp)) {
74199 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
74200 - unsigned int *m = (unsigned int *)(block - align);
74201 - return SLOB_UNITS(*m) * SLOB_UNIT;
74202 + slob_t *m = (slob_t *)(block - align);
74203 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
74204 } else
74205 - return sp->page.private;
74206 + return sp->size;
74207 }
74208 EXPORT_SYMBOL(ksize);
74209
74210 @@ -571,8 +664,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
74211 {
74212 struct kmem_cache *c;
74213
74214 +#ifdef CONFIG_PAX_USERCOPY
74215 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
74216 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
74217 +#else
74218 c = slob_alloc(sizeof(struct kmem_cache),
74219 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
74220 +#endif
74221
74222 if (c) {
74223 c->name = name;
74224 @@ -614,17 +712,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
74225
74226 lockdep_trace_alloc(flags);
74227
74228 +#ifdef CONFIG_PAX_USERCOPY
74229 + b = __kmalloc_node_align(c->size, flags, node, c->align);
74230 +#else
74231 if (c->size < PAGE_SIZE) {
74232 b = slob_alloc(c->size, flags, c->align, node);
74233 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
74234 SLOB_UNITS(c->size) * SLOB_UNIT,
74235 flags, node);
74236 } else {
74237 + struct slob_page *sp;
74238 +
74239 b = slob_new_pages(flags, get_order(c->size), node);
74240 + sp = slob_page(b);
74241 + sp->size = c->size;
74242 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
74243 PAGE_SIZE << get_order(c->size),
74244 flags, node);
74245 }
74246 +#endif
74247
74248 if (c->ctor)
74249 c->ctor(b);
74250 @@ -636,10 +742,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
74251
74252 static void __kmem_cache_free(void *b, int size)
74253 {
74254 - if (size < PAGE_SIZE)
74255 + struct slob_page *sp = slob_page(b);
74256 +
74257 + if (is_slob_page(sp))
74258 slob_free(b, size);
74259 - else
74260 + else {
74261 + clear_slob_page(sp);
74262 + free_slob_page(sp);
74263 + sp->size = 0;
74264 slob_free_pages(b, get_order(size));
74265 + }
74266 }
74267
74268 static void kmem_rcu_free(struct rcu_head *head)
74269 @@ -652,17 +764,31 @@ static void kmem_rcu_free(struct rcu_head *head)
74270
74271 void kmem_cache_free(struct kmem_cache *c, void *b)
74272 {
74273 + int size = c->size;
74274 +
74275 +#ifdef CONFIG_PAX_USERCOPY
74276 + if (size + c->align < PAGE_SIZE) {
74277 + size += c->align;
74278 + b -= c->align;
74279 + }
74280 +#endif
74281 +
74282 kmemleak_free_recursive(b, c->flags);
74283 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
74284 struct slob_rcu *slob_rcu;
74285 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
74286 - slob_rcu->size = c->size;
74287 + slob_rcu = b + (size - sizeof(struct slob_rcu));
74288 + slob_rcu->size = size;
74289 call_rcu(&slob_rcu->head, kmem_rcu_free);
74290 } else {
74291 - __kmem_cache_free(b, c->size);
74292 + __kmem_cache_free(b, size);
74293 }
74294
74295 +#ifdef CONFIG_PAX_USERCOPY
74296 + trace_kfree(_RET_IP_, b);
74297 +#else
74298 trace_kmem_cache_free(_RET_IP_, b);
74299 +#endif
74300 +
74301 }
74302 EXPORT_SYMBOL(kmem_cache_free);
74303
74304 diff --git a/mm/slub.c b/mm/slub.c
74305 index 0342a5d..8180ae9 100644
74306 --- a/mm/slub.c
74307 +++ b/mm/slub.c
74308 @@ -208,7 +208,7 @@ struct track {
74309
74310 enum track_item { TRACK_ALLOC, TRACK_FREE };
74311
74312 -#ifdef CONFIG_SYSFS
74313 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74314 static int sysfs_slab_add(struct kmem_cache *);
74315 static int sysfs_slab_alias(struct kmem_cache *, const char *);
74316 static void sysfs_slab_remove(struct kmem_cache *);
74317 @@ -532,7 +532,7 @@ static void print_track(const char *s, struct track *t)
74318 if (!t->addr)
74319 return;
74320
74321 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
74322 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
74323 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
74324 #ifdef CONFIG_STACKTRACE
74325 {
74326 @@ -2571,6 +2571,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
74327
74328 page = virt_to_head_page(x);
74329
74330 + BUG_ON(!PageSlab(page));
74331 +
74332 slab_free(s, page, x, _RET_IP_);
74333
74334 trace_kmem_cache_free(_RET_IP_, x);
74335 @@ -2604,7 +2606,7 @@ static int slub_min_objects;
74336 * Merge control. If this is set then no merging of slab caches will occur.
74337 * (Could be removed. This was introduced to pacify the merge skeptics.)
74338 */
74339 -static int slub_nomerge;
74340 +static int slub_nomerge = 1;
74341
74342 /*
74343 * Calculate the order of allocation given an slab object size.
74344 @@ -3057,7 +3059,7 @@ static int kmem_cache_open(struct kmem_cache *s,
74345 else
74346 s->cpu_partial = 30;
74347
74348 - s->refcount = 1;
74349 + atomic_set(&s->refcount, 1);
74350 #ifdef CONFIG_NUMA
74351 s->remote_node_defrag_ratio = 1000;
74352 #endif
74353 @@ -3161,8 +3163,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
74354 void kmem_cache_destroy(struct kmem_cache *s)
74355 {
74356 down_write(&slub_lock);
74357 - s->refcount--;
74358 - if (!s->refcount) {
74359 + if (atomic_dec_and_test(&s->refcount)) {
74360 list_del(&s->list);
74361 up_write(&slub_lock);
74362 if (kmem_cache_close(s)) {
74363 @@ -3373,6 +3374,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
74364 EXPORT_SYMBOL(__kmalloc_node);
74365 #endif
74366
74367 +void check_object_size(const void *ptr, unsigned long n, bool to)
74368 +{
74369 +
74370 +#ifdef CONFIG_PAX_USERCOPY
74371 + struct page *page;
74372 + struct kmem_cache *s = NULL;
74373 + unsigned long offset;
74374 + const char *type;
74375 +
74376 + if (!n)
74377 + return;
74378 +
74379 + type = "<null>";
74380 + if (ZERO_OR_NULL_PTR(ptr))
74381 + goto report;
74382 +
74383 + if (!virt_addr_valid(ptr))
74384 + return;
74385 +
74386 + page = virt_to_head_page(ptr);
74387 +
74388 + type = "<process stack>";
74389 + if (!PageSlab(page)) {
74390 + if (object_is_on_stack(ptr, n) == -1)
74391 + goto report;
74392 + return;
74393 + }
74394 +
74395 + s = page->slab;
74396 + type = s->name;
74397 + if (!(s->flags & SLAB_USERCOPY))
74398 + goto report;
74399 +
74400 + offset = (ptr - page_address(page)) % s->size;
74401 + if (offset <= s->objsize && n <= s->objsize - offset)
74402 + return;
74403 +
74404 +report:
74405 + pax_report_usercopy(ptr, n, to, type);
74406 +#endif
74407 +
74408 +}
74409 +EXPORT_SYMBOL(check_object_size);
74410 +
74411 size_t ksize(const void *object)
74412 {
74413 struct page *page;
74414 @@ -3647,7 +3692,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
74415 int node;
74416
74417 list_add(&s->list, &slab_caches);
74418 - s->refcount = -1;
74419 + atomic_set(&s->refcount, -1);
74420
74421 for_each_node_state(node, N_NORMAL_MEMORY) {
74422 struct kmem_cache_node *n = get_node(s, node);
74423 @@ -3767,17 +3812,17 @@ void __init kmem_cache_init(void)
74424
74425 /* Caches that are not of the two-to-the-power-of size */
74426 if (KMALLOC_MIN_SIZE <= 32) {
74427 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
74428 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
74429 caches++;
74430 }
74431
74432 if (KMALLOC_MIN_SIZE <= 64) {
74433 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
74434 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
74435 caches++;
74436 }
74437
74438 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
74439 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
74440 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
74441 caches++;
74442 }
74443
74444 @@ -3845,7 +3890,7 @@ static int slab_unmergeable(struct kmem_cache *s)
74445 /*
74446 * We may have set a slab to be unmergeable during bootstrap.
74447 */
74448 - if (s->refcount < 0)
74449 + if (atomic_read(&s->refcount) < 0)
74450 return 1;
74451
74452 return 0;
74453 @@ -3904,7 +3949,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
74454 down_write(&slub_lock);
74455 s = find_mergeable(size, align, flags, name, ctor);
74456 if (s) {
74457 - s->refcount++;
74458 + atomic_inc(&s->refcount);
74459 /*
74460 * Adjust the object sizes so that we clear
74461 * the complete object on kzalloc.
74462 @@ -3913,7 +3958,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
74463 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
74464
74465 if (sysfs_slab_alias(s, name)) {
74466 - s->refcount--;
74467 + atomic_dec(&s->refcount);
74468 goto err;
74469 }
74470 up_write(&slub_lock);
74471 @@ -4042,7 +4087,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
74472 }
74473 #endif
74474
74475 -#ifdef CONFIG_SYSFS
74476 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74477 static int count_inuse(struct page *page)
74478 {
74479 return page->inuse;
74480 @@ -4429,12 +4474,12 @@ static void resiliency_test(void)
74481 validate_slab_cache(kmalloc_caches[9]);
74482 }
74483 #else
74484 -#ifdef CONFIG_SYSFS
74485 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74486 static void resiliency_test(void) {};
74487 #endif
74488 #endif
74489
74490 -#ifdef CONFIG_SYSFS
74491 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74492 enum slab_stat_type {
74493 SL_ALL, /* All slabs */
74494 SL_PARTIAL, /* Only partially allocated slabs */
74495 @@ -4677,7 +4722,7 @@ SLAB_ATTR_RO(ctor);
74496
74497 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
74498 {
74499 - return sprintf(buf, "%d\n", s->refcount - 1);
74500 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
74501 }
74502 SLAB_ATTR_RO(aliases);
74503
74504 @@ -5244,6 +5289,7 @@ static char *create_unique_id(struct kmem_cache *s)
74505 return name;
74506 }
74507
74508 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74509 static int sysfs_slab_add(struct kmem_cache *s)
74510 {
74511 int err;
74512 @@ -5306,6 +5352,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
74513 kobject_del(&s->kobj);
74514 kobject_put(&s->kobj);
74515 }
74516 +#endif
74517
74518 /*
74519 * Need to buffer aliases during bootup until sysfs becomes
74520 @@ -5319,6 +5366,7 @@ struct saved_alias {
74521
74522 static struct saved_alias *alias_list;
74523
74524 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74525 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
74526 {
74527 struct saved_alias *al;
74528 @@ -5341,6 +5389,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
74529 alias_list = al;
74530 return 0;
74531 }
74532 +#endif
74533
74534 static int __init slab_sysfs_init(void)
74535 {
74536 diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
74537 index 1b7e22a..3fcd4f3 100644
74538 --- a/mm/sparse-vmemmap.c
74539 +++ b/mm/sparse-vmemmap.c
74540 @@ -128,7 +128,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
74541 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
74542 if (!p)
74543 return NULL;
74544 - pud_populate(&init_mm, pud, p);
74545 + pud_populate_kernel(&init_mm, pud, p);
74546 }
74547 return pud;
74548 }
74549 @@ -140,7 +140,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
74550 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
74551 if (!p)
74552 return NULL;
74553 - pgd_populate(&init_mm, pgd, p);
74554 + pgd_populate_kernel(&init_mm, pgd, p);
74555 }
74556 return pgd;
74557 }
74558 diff --git a/mm/swap.c b/mm/swap.c
74559 index 14380e9..e244704 100644
74560 --- a/mm/swap.c
74561 +++ b/mm/swap.c
74562 @@ -30,6 +30,7 @@
74563 #include <linux/backing-dev.h>
74564 #include <linux/memcontrol.h>
74565 #include <linux/gfp.h>
74566 +#include <linux/hugetlb.h>
74567
74568 #include "internal.h"
74569
74570 @@ -70,6 +71,8 @@ static void __put_compound_page(struct page *page)
74571
74572 __page_cache_release(page);
74573 dtor = get_compound_page_dtor(page);
74574 + if (!PageHuge(page))
74575 + BUG_ON(dtor != free_compound_page);
74576 (*dtor)(page);
74577 }
74578
74579 diff --git a/mm/swapfile.c b/mm/swapfile.c
74580 index f31b29d..8bdcae2 100644
74581 --- a/mm/swapfile.c
74582 +++ b/mm/swapfile.c
74583 @@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
74584
74585 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
74586 /* Activity counter to indicate that a swapon or swapoff has occurred */
74587 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
74588 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
74589
74590 static inline unsigned char swap_count(unsigned char ent)
74591 {
74592 @@ -1669,7 +1669,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
74593 }
74594 filp_close(swap_file, NULL);
74595 err = 0;
74596 - atomic_inc(&proc_poll_event);
74597 + atomic_inc_unchecked(&proc_poll_event);
74598 wake_up_interruptible(&proc_poll_wait);
74599
74600 out_dput:
74601 @@ -1685,8 +1685,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
74602
74603 poll_wait(file, &proc_poll_wait, wait);
74604
74605 - if (seq->poll_event != atomic_read(&proc_poll_event)) {
74606 - seq->poll_event = atomic_read(&proc_poll_event);
74607 + if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
74608 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
74609 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
74610 }
74611
74612 @@ -1784,7 +1784,7 @@ static int swaps_open(struct inode *inode, struct file *file)
74613 return ret;
74614
74615 seq = file->private_data;
74616 - seq->poll_event = atomic_read(&proc_poll_event);
74617 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
74618 return 0;
74619 }
74620
74621 @@ -2122,7 +2122,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
74622 (p->flags & SWP_DISCARDABLE) ? "D" : "");
74623
74624 mutex_unlock(&swapon_mutex);
74625 - atomic_inc(&proc_poll_event);
74626 + atomic_inc_unchecked(&proc_poll_event);
74627 wake_up_interruptible(&proc_poll_wait);
74628
74629 if (S_ISREG(inode->i_mode))
74630 diff --git a/mm/util.c b/mm/util.c
74631 index 136ac4f..f917fa9 100644
74632 --- a/mm/util.c
74633 +++ b/mm/util.c
74634 @@ -243,6 +243,12 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
74635 void arch_pick_mmap_layout(struct mm_struct *mm)
74636 {
74637 mm->mmap_base = TASK_UNMAPPED_BASE;
74638 +
74639 +#ifdef CONFIG_PAX_RANDMMAP
74640 + if (mm->pax_flags & MF_PAX_RANDMMAP)
74641 + mm->mmap_base += mm->delta_mmap;
74642 +#endif
74643 +
74644 mm->get_unmapped_area = arch_get_unmapped_area;
74645 mm->unmap_area = arch_unmap_area;
74646 }
74647 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
74648 index 86ce9a5..e0bd080 100644
74649 --- a/mm/vmalloc.c
74650 +++ b/mm/vmalloc.c
74651 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
74652
74653 pte = pte_offset_kernel(pmd, addr);
74654 do {
74655 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
74656 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
74657 +
74658 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
74659 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
74660 + BUG_ON(!pte_exec(*pte));
74661 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
74662 + continue;
74663 + }
74664 +#endif
74665 +
74666 + {
74667 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
74668 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
74669 + }
74670 } while (pte++, addr += PAGE_SIZE, addr != end);
74671 }
74672
74673 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
74674 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
74675 {
74676 pte_t *pte;
74677 + int ret = -ENOMEM;
74678
74679 /*
74680 * nr is a running index into the array which helps higher level
74681 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
74682 pte = pte_alloc_kernel(pmd, addr);
74683 if (!pte)
74684 return -ENOMEM;
74685 +
74686 + pax_open_kernel();
74687 do {
74688 struct page *page = pages[*nr];
74689
74690 - if (WARN_ON(!pte_none(*pte)))
74691 - return -EBUSY;
74692 - if (WARN_ON(!page))
74693 - return -ENOMEM;
74694 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
74695 + if (pgprot_val(prot) & _PAGE_NX)
74696 +#endif
74697 +
74698 + if (WARN_ON(!pte_none(*pte))) {
74699 + ret = -EBUSY;
74700 + goto out;
74701 + }
74702 + if (WARN_ON(!page)) {
74703 + ret = -ENOMEM;
74704 + goto out;
74705 + }
74706 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
74707 (*nr)++;
74708 } while (pte++, addr += PAGE_SIZE, addr != end);
74709 - return 0;
74710 + ret = 0;
74711 +out:
74712 + pax_close_kernel();
74713 + return ret;
74714 }
74715
74716 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
74717 @@ -119,7 +144,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
74718 pmd_t *pmd;
74719 unsigned long next;
74720
74721 - pmd = pmd_alloc(&init_mm, pud, addr);
74722 + pmd = pmd_alloc_kernel(&init_mm, pud, addr);
74723 if (!pmd)
74724 return -ENOMEM;
74725 do {
74726 @@ -136,7 +161,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
74727 pud_t *pud;
74728 unsigned long next;
74729
74730 - pud = pud_alloc(&init_mm, pgd, addr);
74731 + pud = pud_alloc_kernel(&init_mm, pgd, addr);
74732 if (!pud)
74733 return -ENOMEM;
74734 do {
74735 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
74736 * and fall back on vmalloc() if that fails. Others
74737 * just put it in the vmalloc space.
74738 */
74739 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
74740 +#ifdef CONFIG_MODULES
74741 +#ifdef MODULES_VADDR
74742 unsigned long addr = (unsigned long)x;
74743 if (addr >= MODULES_VADDR && addr < MODULES_END)
74744 return 1;
74745 #endif
74746 +
74747 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
74748 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
74749 + return 1;
74750 +#endif
74751 +
74752 +#endif
74753 +
74754 return is_vmalloc_addr(x);
74755 }
74756
74757 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
74758
74759 if (!pgd_none(*pgd)) {
74760 pud_t *pud = pud_offset(pgd, addr);
74761 +#ifdef CONFIG_X86
74762 + if (!pud_large(*pud))
74763 +#endif
74764 if (!pud_none(*pud)) {
74765 pmd_t *pmd = pmd_offset(pud, addr);
74766 +#ifdef CONFIG_X86
74767 + if (!pmd_large(*pmd))
74768 +#endif
74769 if (!pmd_none(*pmd)) {
74770 pte_t *ptep, pte;
74771
74772 @@ -1319,6 +1359,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
74773 struct vm_struct *area;
74774
74775 BUG_ON(in_interrupt());
74776 +
74777 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
74778 + if (flags & VM_KERNEXEC) {
74779 + if (start != VMALLOC_START || end != VMALLOC_END)
74780 + return NULL;
74781 + start = (unsigned long)MODULES_EXEC_VADDR;
74782 + end = (unsigned long)MODULES_EXEC_END;
74783 + }
74784 +#endif
74785 +
74786 if (flags & VM_IOREMAP) {
74787 int bit = fls(size);
74788
74789 @@ -1551,6 +1601,11 @@ void *vmap(struct page **pages, unsigned int count,
74790 if (count > totalram_pages)
74791 return NULL;
74792
74793 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
74794 + if (!(pgprot_val(prot) & _PAGE_NX))
74795 + flags |= VM_KERNEXEC;
74796 +#endif
74797 +
74798 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
74799 __builtin_return_address(0));
74800 if (!area)
74801 @@ -1652,6 +1707,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
74802 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
74803 goto fail;
74804
74805 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
74806 + if (!(pgprot_val(prot) & _PAGE_NX))
74807 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
74808 + VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
74809 + else
74810 +#endif
74811 +
74812 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
74813 start, end, node, gfp_mask, caller);
74814 if (!area)
74815 @@ -1825,10 +1887,9 @@ EXPORT_SYMBOL(vzalloc_node);
74816 * For tight control over page level allocator and protection flags
74817 * use __vmalloc() instead.
74818 */
74819 -
74820 void *vmalloc_exec(unsigned long size)
74821 {
74822 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
74823 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
74824 -1, __builtin_return_address(0));
74825 }
74826
74827 @@ -2123,6 +2184,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
74828 unsigned long uaddr = vma->vm_start;
74829 unsigned long usize = vma->vm_end - vma->vm_start;
74830
74831 + BUG_ON(vma->vm_mirror);
74832 +
74833 if ((PAGE_SIZE-1) & (unsigned long)addr)
74834 return -EINVAL;
74835
74836 diff --git a/mm/vmstat.c b/mm/vmstat.c
74837 index f600557..1459fc8 100644
74838 --- a/mm/vmstat.c
74839 +++ b/mm/vmstat.c
74840 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
74841 *
74842 * vm_stat contains the global counters
74843 */
74844 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
74845 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
74846 EXPORT_SYMBOL(vm_stat);
74847
74848 #ifdef CONFIG_SMP
74849 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
74850 v = p->vm_stat_diff[i];
74851 p->vm_stat_diff[i] = 0;
74852 local_irq_restore(flags);
74853 - atomic_long_add(v, &zone->vm_stat[i]);
74854 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
74855 global_diff[i] += v;
74856 #ifdef CONFIG_NUMA
74857 /* 3 seconds idle till flush */
74858 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
74859
74860 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
74861 if (global_diff[i])
74862 - atomic_long_add(global_diff[i], &vm_stat[i]);
74863 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
74864 }
74865
74866 #endif
74867 @@ -1208,10 +1208,20 @@ static int __init setup_vmstat(void)
74868 start_cpu_timer(cpu);
74869 #endif
74870 #ifdef CONFIG_PROC_FS
74871 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
74872 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
74873 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
74874 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
74875 + {
74876 + mode_t gr_mode = S_IRUGO;
74877 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
74878 + gr_mode = S_IRUSR;
74879 +#endif
74880 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
74881 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
74882 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
74883 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
74884 +#else
74885 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
74886 +#endif
74887 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
74888 + }
74889 #endif
74890 return 0;
74891 }
74892 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
74893 index efea35b..9c8dd0b 100644
74894 --- a/net/8021q/vlan.c
74895 +++ b/net/8021q/vlan.c
74896 @@ -554,8 +554,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
74897 err = -EPERM;
74898 if (!capable(CAP_NET_ADMIN))
74899 break;
74900 - if ((args.u.name_type >= 0) &&
74901 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
74902 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
74903 struct vlan_net *vn;
74904
74905 vn = net_generic(net, vlan_net_id);
74906 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
74907 index fccae26..e7ece2f 100644
74908 --- a/net/9p/trans_fd.c
74909 +++ b/net/9p/trans_fd.c
74910 @@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
74911 oldfs = get_fs();
74912 set_fs(get_ds());
74913 /* The cast to a user pointer is valid due to the set_fs() */
74914 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
74915 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
74916 set_fs(oldfs);
74917
74918 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
74919 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
74920 index 876fbe8..8bbea9f 100644
74921 --- a/net/atm/atm_misc.c
74922 +++ b/net/atm/atm_misc.c
74923 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
74924 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
74925 return 1;
74926 atm_return(vcc, truesize);
74927 - atomic_inc(&vcc->stats->rx_drop);
74928 + atomic_inc_unchecked(&vcc->stats->rx_drop);
74929 return 0;
74930 }
74931 EXPORT_SYMBOL(atm_charge);
74932 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
74933 }
74934 }
74935 atm_return(vcc, guess);
74936 - atomic_inc(&vcc->stats->rx_drop);
74937 + atomic_inc_unchecked(&vcc->stats->rx_drop);
74938 return NULL;
74939 }
74940 EXPORT_SYMBOL(atm_alloc_charge);
74941 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
74942
74943 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
74944 {
74945 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
74946 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
74947 __SONET_ITEMS
74948 #undef __HANDLE_ITEM
74949 }
74950 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
74951
74952 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
74953 {
74954 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
74955 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
74956 __SONET_ITEMS
74957 #undef __HANDLE_ITEM
74958 }
74959 diff --git a/net/atm/lec.h b/net/atm/lec.h
74960 index dfc0719..47c5322 100644
74961 --- a/net/atm/lec.h
74962 +++ b/net/atm/lec.h
74963 @@ -48,7 +48,7 @@ struct lane2_ops {
74964 const u8 *tlvs, u32 sizeoftlvs);
74965 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
74966 const u8 *tlvs, u32 sizeoftlvs);
74967 -};
74968 +} __no_const;
74969
74970 /*
74971 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
74972 diff --git a/net/atm/mpc.h b/net/atm/mpc.h
74973 index 0919a88..a23d54e 100644
74974 --- a/net/atm/mpc.h
74975 +++ b/net/atm/mpc.h
74976 @@ -33,7 +33,7 @@ struct mpoa_client {
74977 struct mpc_parameters parameters; /* parameters for this client */
74978
74979 const struct net_device_ops *old_ops;
74980 - struct net_device_ops new_ops;
74981 + net_device_ops_no_const new_ops;
74982 };
74983
74984
74985 diff --git a/net/atm/proc.c b/net/atm/proc.c
74986 index 0d020de..011c7bb 100644
74987 --- a/net/atm/proc.c
74988 +++ b/net/atm/proc.c
74989 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
74990 const struct k_atm_aal_stats *stats)
74991 {
74992 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
74993 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
74994 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
74995 - atomic_read(&stats->rx_drop));
74996 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
74997 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
74998 + atomic_read_unchecked(&stats->rx_drop));
74999 }
75000
75001 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
75002 diff --git a/net/atm/resources.c b/net/atm/resources.c
75003 index 23f45ce..c748f1a 100644
75004 --- a/net/atm/resources.c
75005 +++ b/net/atm/resources.c
75006 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
75007 static void copy_aal_stats(struct k_atm_aal_stats *from,
75008 struct atm_aal_stats *to)
75009 {
75010 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
75011 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
75012 __AAL_STAT_ITEMS
75013 #undef __HANDLE_ITEM
75014 }
75015 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
75016 static void subtract_aal_stats(struct k_atm_aal_stats *from,
75017 struct atm_aal_stats *to)
75018 {
75019 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
75020 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
75021 __AAL_STAT_ITEMS
75022 #undef __HANDLE_ITEM
75023 }
75024 diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
75025 index 3512e25..2b33401 100644
75026 --- a/net/batman-adv/bat_iv_ogm.c
75027 +++ b/net/batman-adv/bat_iv_ogm.c
75028 @@ -541,7 +541,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
75029
75030 /* change sequence number to network order */
75031 batman_ogm_packet->seqno =
75032 - htonl((uint32_t)atomic_read(&hard_iface->seqno));
75033 + htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
75034
75035 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
75036 batman_ogm_packet->tt_crc = htons((uint16_t)
75037 @@ -561,7 +561,7 @@ void bat_ogm_schedule(struct hard_iface *hard_iface, int tt_num_changes)
75038 else
75039 batman_ogm_packet->gw_flags = NO_FLAGS;
75040
75041 - atomic_inc(&hard_iface->seqno);
75042 + atomic_inc_unchecked(&hard_iface->seqno);
75043
75044 slide_own_bcast_window(hard_iface);
75045 bat_ogm_queue_add(bat_priv, hard_iface->packet_buff,
75046 @@ -922,7 +922,7 @@ static void bat_ogm_process(const struct ethhdr *ethhdr,
75047 return;
75048
75049 /* could be changed by schedule_own_packet() */
75050 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
75051 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
75052
75053 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
75054
75055 diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
75056 index 7704df4..beb4e16 100644
75057 --- a/net/batman-adv/hard-interface.c
75058 +++ b/net/batman-adv/hard-interface.c
75059 @@ -326,8 +326,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
75060 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
75061 dev_add_pack(&hard_iface->batman_adv_ptype);
75062
75063 - atomic_set(&hard_iface->seqno, 1);
75064 - atomic_set(&hard_iface->frag_seqno, 1);
75065 + atomic_set_unchecked(&hard_iface->seqno, 1);
75066 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
75067 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
75068 hard_iface->net_dev->name);
75069
75070 diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
75071 index 987c75a..20d6f36 100644
75072 --- a/net/batman-adv/soft-interface.c
75073 +++ b/net/batman-adv/soft-interface.c
75074 @@ -645,7 +645,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
75075
75076 /* set broadcast sequence number */
75077 bcast_packet->seqno =
75078 - htonl(atomic_inc_return(&bat_priv->bcast_seqno));
75079 + htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
75080
75081 add_bcast_packet_to_list(bat_priv, skb, 1);
75082
75083 @@ -843,7 +843,7 @@ struct net_device *softif_create(const char *name)
75084 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
75085
75086 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
75087 - atomic_set(&bat_priv->bcast_seqno, 1);
75088 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
75089 atomic_set(&bat_priv->ttvn, 0);
75090 atomic_set(&bat_priv->tt_local_changes, 0);
75091 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
75092 diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
75093 index e9eb043..d174eeb 100644
75094 --- a/net/batman-adv/types.h
75095 +++ b/net/batman-adv/types.h
75096 @@ -38,8 +38,8 @@ struct hard_iface {
75097 int16_t if_num;
75098 char if_status;
75099 struct net_device *net_dev;
75100 - atomic_t seqno;
75101 - atomic_t frag_seqno;
75102 + atomic_unchecked_t seqno;
75103 + atomic_unchecked_t frag_seqno;
75104 unsigned char *packet_buff;
75105 int packet_len;
75106 struct kobject *hardif_obj;
75107 @@ -154,7 +154,7 @@ struct bat_priv {
75108 atomic_t orig_interval; /* uint */
75109 atomic_t hop_penalty; /* uint */
75110 atomic_t log_level; /* uint */
75111 - atomic_t bcast_seqno;
75112 + atomic_unchecked_t bcast_seqno;
75113 atomic_t bcast_queue_left;
75114 atomic_t batman_queue_left;
75115 atomic_t ttvn; /* translation table version number */
75116 diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
75117 index 07d1c1d..7e9bea9 100644
75118 --- a/net/batman-adv/unicast.c
75119 +++ b/net/batman-adv/unicast.c
75120 @@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
75121 frag1->flags = UNI_FRAG_HEAD | large_tail;
75122 frag2->flags = large_tail;
75123
75124 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
75125 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
75126 frag1->seqno = htons(seqno - 1);
75127 frag2->seqno = htons(seqno);
75128
75129 diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
75130 index 280953b..cd219bb 100644
75131 --- a/net/bluetooth/hci_conn.c
75132 +++ b/net/bluetooth/hci_conn.c
75133 @@ -234,7 +234,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
75134 memset(&cp, 0, sizeof(cp));
75135
75136 cp.handle = cpu_to_le16(conn->handle);
75137 - memcpy(cp.ltk, ltk, sizeof(ltk));
75138 + memcpy(cp.ltk, ltk, sizeof(cp.ltk));
75139
75140 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
75141 }
75142 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
75143 index 32d338c..d24bcdb 100644
75144 --- a/net/bluetooth/l2cap_core.c
75145 +++ b/net/bluetooth/l2cap_core.c
75146 @@ -2418,8 +2418,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
75147 break;
75148
75149 case L2CAP_CONF_RFC:
75150 - if (olen == sizeof(rfc))
75151 - memcpy(&rfc, (void *)val, olen);
75152 + if (olen != sizeof(rfc))
75153 + break;
75154 +
75155 + memcpy(&rfc, (void *)val, olen);
75156
75157 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
75158 rfc.mode != chan->mode)
75159 @@ -2537,8 +2539,10 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
75160
75161 switch (type) {
75162 case L2CAP_CONF_RFC:
75163 - if (olen == sizeof(rfc))
75164 - memcpy(&rfc, (void *)val, olen);
75165 + if (olen != sizeof(rfc))
75166 + break;
75167 +
75168 + memcpy(&rfc, (void *)val, olen);
75169 goto done;
75170 }
75171 }
75172 diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
75173 index 5449294..7da9a5f 100644
75174 --- a/net/bridge/netfilter/ebt_ulog.c
75175 +++ b/net/bridge/netfilter/ebt_ulog.c
75176 @@ -96,6 +96,7 @@ static void ulog_timer(unsigned long data)
75177 spin_unlock_bh(&ulog_buffers[data].lock);
75178 }
75179
75180 +static struct sk_buff *ulog_alloc_skb(unsigned int size) __size_overflow(1);
75181 static struct sk_buff *ulog_alloc_skb(unsigned int size)
75182 {
75183 struct sk_buff *skb;
75184 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
75185 index 5fe2ff3..10968b5 100644
75186 --- a/net/bridge/netfilter/ebtables.c
75187 +++ b/net/bridge/netfilter/ebtables.c
75188 @@ -1523,7 +1523,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
75189 tmp.valid_hooks = t->table->valid_hooks;
75190 }
75191 mutex_unlock(&ebt_mutex);
75192 - if (copy_to_user(user, &tmp, *len) != 0){
75193 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
75194 BUGPRINT("c2u Didn't work\n");
75195 ret = -EFAULT;
75196 break;
75197 diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
75198 index a97d97a..6f679ed 100644
75199 --- a/net/caif/caif_socket.c
75200 +++ b/net/caif/caif_socket.c
75201 @@ -48,19 +48,20 @@ static struct dentry *debugfsdir;
75202 #ifdef CONFIG_DEBUG_FS
75203 struct debug_fs_counter {
75204 atomic_t caif_nr_socks;
75205 - atomic_t caif_sock_create;
75206 - atomic_t num_connect_req;
75207 - atomic_t num_connect_resp;
75208 - atomic_t num_connect_fail_resp;
75209 - atomic_t num_disconnect;
75210 - atomic_t num_remote_shutdown_ind;
75211 - atomic_t num_tx_flow_off_ind;
75212 - atomic_t num_tx_flow_on_ind;
75213 - atomic_t num_rx_flow_off;
75214 - atomic_t num_rx_flow_on;
75215 + atomic_unchecked_t caif_sock_create;
75216 + atomic_unchecked_t num_connect_req;
75217 + atomic_unchecked_t num_connect_resp;
75218 + atomic_unchecked_t num_connect_fail_resp;
75219 + atomic_unchecked_t num_disconnect;
75220 + atomic_unchecked_t num_remote_shutdown_ind;
75221 + atomic_unchecked_t num_tx_flow_off_ind;
75222 + atomic_unchecked_t num_tx_flow_on_ind;
75223 + atomic_unchecked_t num_rx_flow_off;
75224 + atomic_unchecked_t num_rx_flow_on;
75225 };
75226 static struct debug_fs_counter cnt;
75227 #define dbfs_atomic_inc(v) atomic_inc_return(v)
75228 +#define dbfs_atomic_inc_unchecked(v) atomic_inc_return_unchecked(v)
75229 #define dbfs_atomic_dec(v) atomic_dec_return(v)
75230 #else
75231 #define dbfs_atomic_inc(v) 0
75232 @@ -161,7 +162,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75233 atomic_read(&cf_sk->sk.sk_rmem_alloc),
75234 sk_rcvbuf_lowwater(cf_sk));
75235 set_rx_flow_off(cf_sk);
75236 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
75237 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
75238 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
75239 }
75240
75241 @@ -172,7 +173,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75242 set_rx_flow_off(cf_sk);
75243 if (net_ratelimit())
75244 pr_debug("sending flow OFF due to rmem_schedule\n");
75245 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
75246 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
75247 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
75248 }
75249 skb->dev = NULL;
75250 @@ -233,14 +234,14 @@ static void caif_ctrl_cb(struct cflayer *layr,
75251 switch (flow) {
75252 case CAIF_CTRLCMD_FLOW_ON_IND:
75253 /* OK from modem to start sending again */
75254 - dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
75255 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
75256 set_tx_flow_on(cf_sk);
75257 cf_sk->sk.sk_state_change(&cf_sk->sk);
75258 break;
75259
75260 case CAIF_CTRLCMD_FLOW_OFF_IND:
75261 /* Modem asks us to shut up */
75262 - dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
75263 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
75264 set_tx_flow_off(cf_sk);
75265 cf_sk->sk.sk_state_change(&cf_sk->sk);
75266 break;
75267 @@ -249,7 +250,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
75268 /* We're now connected */
75269 caif_client_register_refcnt(&cf_sk->layer,
75270 cfsk_hold, cfsk_put);
75271 - dbfs_atomic_inc(&cnt.num_connect_resp);
75272 + dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
75273 cf_sk->sk.sk_state = CAIF_CONNECTED;
75274 set_tx_flow_on(cf_sk);
75275 cf_sk->sk.sk_state_change(&cf_sk->sk);
75276 @@ -263,7 +264,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
75277
75278 case CAIF_CTRLCMD_INIT_FAIL_RSP:
75279 /* Connect request failed */
75280 - dbfs_atomic_inc(&cnt.num_connect_fail_resp);
75281 + dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
75282 cf_sk->sk.sk_err = ECONNREFUSED;
75283 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
75284 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
75285 @@ -277,7 +278,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
75286
75287 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
75288 /* Modem has closed this connection, or device is down. */
75289 - dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
75290 + dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
75291 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
75292 cf_sk->sk.sk_err = ECONNRESET;
75293 set_rx_flow_on(cf_sk);
75294 @@ -297,7 +298,7 @@ static void caif_check_flow_release(struct sock *sk)
75295 return;
75296
75297 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
75298 - dbfs_atomic_inc(&cnt.num_rx_flow_on);
75299 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
75300 set_rx_flow_on(cf_sk);
75301 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
75302 }
75303 @@ -856,7 +857,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
75304 /*ifindex = id of the interface.*/
75305 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
75306
75307 - dbfs_atomic_inc(&cnt.num_connect_req);
75308 + dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
75309 cf_sk->layer.receive = caif_sktrecv_cb;
75310
75311 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
75312 @@ -945,7 +946,7 @@ static int caif_release(struct socket *sock)
75313 spin_unlock_bh(&sk->sk_receive_queue.lock);
75314 sock->sk = NULL;
75315
75316 - dbfs_atomic_inc(&cnt.num_disconnect);
75317 + dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
75318
75319 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
75320 if (cf_sk->debugfs_socket_dir != NULL)
75321 @@ -1124,7 +1125,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
75322 cf_sk->conn_req.protocol = protocol;
75323 /* Increase the number of sockets created. */
75324 dbfs_atomic_inc(&cnt.caif_nr_socks);
75325 - num = dbfs_atomic_inc(&cnt.caif_sock_create);
75326 + num = dbfs_atomic_inc_unchecked(&cnt.caif_sock_create);
75327 #ifdef CONFIG_DEBUG_FS
75328 if (!IS_ERR(debugfsdir)) {
75329
75330 diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
75331 index 5cf5222..6f704ad 100644
75332 --- a/net/caif/cfctrl.c
75333 +++ b/net/caif/cfctrl.c
75334 @@ -9,6 +9,7 @@
75335 #include <linux/stddef.h>
75336 #include <linux/spinlock.h>
75337 #include <linux/slab.h>
75338 +#include <linux/sched.h>
75339 #include <net/caif/caif_layer.h>
75340 #include <net/caif/cfpkt.h>
75341 #include <net/caif/cfctrl.h>
75342 @@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void)
75343 memset(&dev_info, 0, sizeof(dev_info));
75344 dev_info.id = 0xff;
75345 cfsrvl_init(&this->serv, 0, &dev_info, false);
75346 - atomic_set(&this->req_seq_no, 1);
75347 - atomic_set(&this->rsp_seq_no, 1);
75348 + atomic_set_unchecked(&this->req_seq_no, 1);
75349 + atomic_set_unchecked(&this->rsp_seq_no, 1);
75350 this->serv.layer.receive = cfctrl_recv;
75351 sprintf(this->serv.layer.name, "ctrl");
75352 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
75353 @@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
75354 struct cfctrl_request_info *req)
75355 {
75356 spin_lock_bh(&ctrl->info_list_lock);
75357 - atomic_inc(&ctrl->req_seq_no);
75358 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
75359 + atomic_inc_unchecked(&ctrl->req_seq_no);
75360 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
75361 list_add_tail(&req->list, &ctrl->list);
75362 spin_unlock_bh(&ctrl->info_list_lock);
75363 }
75364 @@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
75365 if (p != first)
75366 pr_warn("Requests are not received in order\n");
75367
75368 - atomic_set(&ctrl->rsp_seq_no,
75369 + atomic_set_unchecked(&ctrl->rsp_seq_no,
75370 p->sequence_no);
75371 list_del(&p->list);
75372 goto out;
75373 diff --git a/net/can/gw.c b/net/can/gw.c
75374 index 3d79b12..8de85fa 100644
75375 --- a/net/can/gw.c
75376 +++ b/net/can/gw.c
75377 @@ -96,7 +96,7 @@ struct cf_mod {
75378 struct {
75379 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
75380 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
75381 - } csumfunc;
75382 + } __no_const csumfunc;
75383 };
75384
75385
75386 diff --git a/net/compat.c b/net/compat.c
75387 index 6def90e..c6992fa 100644
75388 --- a/net/compat.c
75389 +++ b/net/compat.c
75390 @@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
75391 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
75392 __get_user(kmsg->msg_flags, &umsg->msg_flags))
75393 return -EFAULT;
75394 - kmsg->msg_name = compat_ptr(tmp1);
75395 - kmsg->msg_iov = compat_ptr(tmp2);
75396 - kmsg->msg_control = compat_ptr(tmp3);
75397 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
75398 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
75399 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
75400 return 0;
75401 }
75402
75403 @@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
75404
75405 if (kern_msg->msg_namelen) {
75406 if (mode == VERIFY_READ) {
75407 - int err = move_addr_to_kernel(kern_msg->msg_name,
75408 + int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
75409 kern_msg->msg_namelen,
75410 kern_address);
75411 if (err < 0)
75412 @@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
75413 kern_msg->msg_name = NULL;
75414
75415 tot_len = iov_from_user_compat_to_kern(kern_iov,
75416 - (struct compat_iovec __user *)kern_msg->msg_iov,
75417 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
75418 kern_msg->msg_iovlen);
75419 if (tot_len >= 0)
75420 kern_msg->msg_iov = kern_iov;
75421 @@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
75422
75423 #define CMSG_COMPAT_FIRSTHDR(msg) \
75424 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
75425 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
75426 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
75427 (struct compat_cmsghdr __user *)NULL)
75428
75429 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
75430 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
75431 (ucmlen) <= (unsigned long) \
75432 ((mhdr)->msg_controllen - \
75433 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
75434 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
75435
75436 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
75437 struct compat_cmsghdr __user *cmsg, int cmsg_len)
75438 {
75439 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
75440 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
75441 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
75442 msg->msg_controllen)
75443 return NULL;
75444 return (struct compat_cmsghdr __user *)ptr;
75445 @@ -221,7 +221,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
75446 {
75447 struct compat_timeval ctv;
75448 struct compat_timespec cts[3];
75449 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
75450 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
75451 struct compat_cmsghdr cmhdr;
75452 int cmlen;
75453
75454 @@ -273,7 +273,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
75455
75456 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
75457 {
75458 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
75459 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
75460 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
75461 int fdnum = scm->fp->count;
75462 struct file **fp = scm->fp->fp;
75463 @@ -370,7 +370,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
75464 return -EFAULT;
75465 old_fs = get_fs();
75466 set_fs(KERNEL_DS);
75467 - err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
75468 + err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
75469 set_fs(old_fs);
75470
75471 return err;
75472 @@ -431,7 +431,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
75473 len = sizeof(ktime);
75474 old_fs = get_fs();
75475 set_fs(KERNEL_DS);
75476 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
75477 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
75478 set_fs(old_fs);
75479
75480 if (!err) {
75481 @@ -566,7 +566,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
75482 case MCAST_JOIN_GROUP:
75483 case MCAST_LEAVE_GROUP:
75484 {
75485 - struct compat_group_req __user *gr32 = (void *)optval;
75486 + struct compat_group_req __user *gr32 = (void __user *)optval;
75487 struct group_req __user *kgr =
75488 compat_alloc_user_space(sizeof(struct group_req));
75489 u32 interface;
75490 @@ -587,7 +587,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
75491 case MCAST_BLOCK_SOURCE:
75492 case MCAST_UNBLOCK_SOURCE:
75493 {
75494 - struct compat_group_source_req __user *gsr32 = (void *)optval;
75495 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
75496 struct group_source_req __user *kgsr = compat_alloc_user_space(
75497 sizeof(struct group_source_req));
75498 u32 interface;
75499 @@ -608,7 +608,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
75500 }
75501 case MCAST_MSFILTER:
75502 {
75503 - struct compat_group_filter __user *gf32 = (void *)optval;
75504 + struct compat_group_filter __user *gf32 = (void __user *)optval;
75505 struct group_filter __user *kgf;
75506 u32 interface, fmode, numsrc;
75507
75508 @@ -646,7 +646,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
75509 char __user *optval, int __user *optlen,
75510 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
75511 {
75512 - struct compat_group_filter __user *gf32 = (void *)optval;
75513 + struct compat_group_filter __user *gf32 = (void __user *)optval;
75514 struct group_filter __user *kgf;
75515 int __user *koptlen;
75516 u32 interface, fmode, numsrc;
75517 diff --git a/net/core/datagram.c b/net/core/datagram.c
75518 index 68bbf9f..5ef0d12 100644
75519 --- a/net/core/datagram.c
75520 +++ b/net/core/datagram.c
75521 @@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
75522 }
75523
75524 kfree_skb(skb);
75525 - atomic_inc(&sk->sk_drops);
75526 + atomic_inc_unchecked(&sk->sk_drops);
75527 sk_mem_reclaim_partial(sk);
75528
75529 return err;
75530 diff --git a/net/core/dev.c b/net/core/dev.c
75531 index 0336374..659088a 100644
75532 --- a/net/core/dev.c
75533 +++ b/net/core/dev.c
75534 @@ -1138,10 +1138,14 @@ void dev_load(struct net *net, const char *name)
75535 if (no_module && capable(CAP_NET_ADMIN))
75536 no_module = request_module("netdev-%s", name);
75537 if (no_module && capable(CAP_SYS_MODULE)) {
75538 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
75539 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
75540 +#else
75541 if (!request_module("%s", name))
75542 pr_err("Loading kernel module for a network device "
75543 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
75544 "instead\n", name);
75545 +#endif
75546 }
75547 }
75548 EXPORT_SYMBOL(dev_load);
75549 @@ -1605,7 +1609,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
75550 {
75551 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
75552 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
75553 - atomic_long_inc(&dev->rx_dropped);
75554 + atomic_long_inc_unchecked(&dev->rx_dropped);
75555 kfree_skb(skb);
75556 return NET_RX_DROP;
75557 }
75558 @@ -1615,7 +1619,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
75559 nf_reset(skb);
75560
75561 if (unlikely(!is_skb_forwardable(dev, skb))) {
75562 - atomic_long_inc(&dev->rx_dropped);
75563 + atomic_long_inc_unchecked(&dev->rx_dropped);
75564 kfree_skb(skb);
75565 return NET_RX_DROP;
75566 }
75567 @@ -2077,7 +2081,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
75568
75569 struct dev_gso_cb {
75570 void (*destructor)(struct sk_buff *skb);
75571 -};
75572 +} __no_const;
75573
75574 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
75575
75576 @@ -2933,7 +2937,7 @@ enqueue:
75577
75578 local_irq_restore(flags);
75579
75580 - atomic_long_inc(&skb->dev->rx_dropped);
75581 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
75582 kfree_skb(skb);
75583 return NET_RX_DROP;
75584 }
75585 @@ -3005,7 +3009,7 @@ int netif_rx_ni(struct sk_buff *skb)
75586 }
75587 EXPORT_SYMBOL(netif_rx_ni);
75588
75589 -static void net_tx_action(struct softirq_action *h)
75590 +static void net_tx_action(void)
75591 {
75592 struct softnet_data *sd = &__get_cpu_var(softnet_data);
75593
75594 @@ -3293,7 +3297,7 @@ ncls:
75595 if (pt_prev) {
75596 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
75597 } else {
75598 - atomic_long_inc(&skb->dev->rx_dropped);
75599 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
75600 kfree_skb(skb);
75601 /* Jamal, now you will not able to escape explaining
75602 * me how you were going to use this. :-)
75603 @@ -3853,7 +3857,7 @@ void netif_napi_del(struct napi_struct *napi)
75604 }
75605 EXPORT_SYMBOL(netif_napi_del);
75606
75607 -static void net_rx_action(struct softirq_action *h)
75608 +static void net_rx_action(void)
75609 {
75610 struct softnet_data *sd = &__get_cpu_var(softnet_data);
75611 unsigned long time_limit = jiffies + 2;
75612 @@ -5878,7 +5882,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
75613 } else {
75614 netdev_stats_to_stats64(storage, &dev->stats);
75615 }
75616 - storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
75617 + storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
75618 return storage;
75619 }
75620 EXPORT_SYMBOL(dev_get_stats);
75621 diff --git a/net/core/flow.c b/net/core/flow.c
75622 index e318c7e..168b1d0 100644
75623 --- a/net/core/flow.c
75624 +++ b/net/core/flow.c
75625 @@ -61,7 +61,7 @@ struct flow_cache {
75626 struct timer_list rnd_timer;
75627 };
75628
75629 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
75630 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
75631 EXPORT_SYMBOL(flow_cache_genid);
75632 static struct flow_cache flow_cache_global;
75633 static struct kmem_cache *flow_cachep __read_mostly;
75634 @@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
75635
75636 static int flow_entry_valid(struct flow_cache_entry *fle)
75637 {
75638 - if (atomic_read(&flow_cache_genid) != fle->genid)
75639 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
75640 return 0;
75641 if (fle->object && !fle->object->ops->check(fle->object))
75642 return 0;
75643 @@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
75644 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
75645 fcp->hash_count++;
75646 }
75647 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
75648 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
75649 flo = fle->object;
75650 if (!flo)
75651 goto ret_object;
75652 @@ -280,7 +280,7 @@ nocache:
75653 }
75654 flo = resolver(net, key, family, dir, flo, ctx);
75655 if (fle) {
75656 - fle->genid = atomic_read(&flow_cache_genid);
75657 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
75658 if (!IS_ERR(flo))
75659 fle->object = flo;
75660 else
75661 diff --git a/net/core/iovec.c b/net/core/iovec.c
75662 index c40f27e..7f49254 100644
75663 --- a/net/core/iovec.c
75664 +++ b/net/core/iovec.c
75665 @@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
75666 if (m->msg_namelen) {
75667 if (mode == VERIFY_READ) {
75668 void __user *namep;
75669 - namep = (void __user __force *) m->msg_name;
75670 + namep = (void __force_user *) m->msg_name;
75671 err = move_addr_to_kernel(namep, m->msg_namelen,
75672 address);
75673 if (err < 0)
75674 @@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
75675 }
75676
75677 size = m->msg_iovlen * sizeof(struct iovec);
75678 - if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
75679 + if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
75680 return -EFAULT;
75681
75682 m->msg_iov = iov;
75683 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
75684 index 5c30296..ebe7b61 100644
75685 --- a/net/core/rtnetlink.c
75686 +++ b/net/core/rtnetlink.c
75687 @@ -57,7 +57,7 @@ struct rtnl_link {
75688 rtnl_doit_func doit;
75689 rtnl_dumpit_func dumpit;
75690 rtnl_calcit_func calcit;
75691 -};
75692 +} __no_const;
75693
75694 static DEFINE_MUTEX(rtnl_mutex);
75695
75696 diff --git a/net/core/scm.c b/net/core/scm.c
75697 index ff52ad0..aff1c0f 100644
75698 --- a/net/core/scm.c
75699 +++ b/net/core/scm.c
75700 @@ -220,7 +220,7 @@ EXPORT_SYMBOL(__scm_send);
75701 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
75702 {
75703 struct cmsghdr __user *cm
75704 - = (__force struct cmsghdr __user *)msg->msg_control;
75705 + = (struct cmsghdr __force_user *)msg->msg_control;
75706 struct cmsghdr cmhdr;
75707 int cmlen = CMSG_LEN(len);
75708 int err;
75709 @@ -243,7 +243,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
75710 err = -EFAULT;
75711 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
75712 goto out;
75713 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
75714 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
75715 goto out;
75716 cmlen = CMSG_SPACE(len);
75717 if (msg->msg_controllen < cmlen)
75718 @@ -259,7 +259,7 @@ EXPORT_SYMBOL(put_cmsg);
75719 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
75720 {
75721 struct cmsghdr __user *cm
75722 - = (__force struct cmsghdr __user*)msg->msg_control;
75723 + = (struct cmsghdr __force_user *)msg->msg_control;
75724
75725 int fdmax = 0;
75726 int fdnum = scm->fp->count;
75727 @@ -279,7 +279,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
75728 if (fdnum < fdmax)
75729 fdmax = fdnum;
75730
75731 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
75732 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
75733 i++, cmfptr++)
75734 {
75735 int new_fd;
75736 diff --git a/net/core/sock.c b/net/core/sock.c
75737 index 02f8dfe..86dfd4a 100644
75738 --- a/net/core/sock.c
75739 +++ b/net/core/sock.c
75740 @@ -341,7 +341,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75741 struct sk_buff_head *list = &sk->sk_receive_queue;
75742
75743 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
75744 - atomic_inc(&sk->sk_drops);
75745 + atomic_inc_unchecked(&sk->sk_drops);
75746 trace_sock_rcvqueue_full(sk, skb);
75747 return -ENOMEM;
75748 }
75749 @@ -351,7 +351,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75750 return err;
75751
75752 if (!sk_rmem_schedule(sk, skb->truesize)) {
75753 - atomic_inc(&sk->sk_drops);
75754 + atomic_inc_unchecked(&sk->sk_drops);
75755 return -ENOBUFS;
75756 }
75757
75758 @@ -371,7 +371,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75759 skb_dst_force(skb);
75760
75761 spin_lock_irqsave(&list->lock, flags);
75762 - skb->dropcount = atomic_read(&sk->sk_drops);
75763 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
75764 __skb_queue_tail(list, skb);
75765 spin_unlock_irqrestore(&list->lock, flags);
75766
75767 @@ -391,7 +391,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
75768 skb->dev = NULL;
75769
75770 if (sk_rcvqueues_full(sk, skb)) {
75771 - atomic_inc(&sk->sk_drops);
75772 + atomic_inc_unchecked(&sk->sk_drops);
75773 goto discard_and_relse;
75774 }
75775 if (nested)
75776 @@ -409,7 +409,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
75777 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
75778 } else if (sk_add_backlog(sk, skb)) {
75779 bh_unlock_sock(sk);
75780 - atomic_inc(&sk->sk_drops);
75781 + atomic_inc_unchecked(&sk->sk_drops);
75782 goto discard_and_relse;
75783 }
75784
75785 @@ -974,7 +974,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
75786 if (len > sizeof(peercred))
75787 len = sizeof(peercred);
75788 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
75789 - if (copy_to_user(optval, &peercred, len))
75790 + if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
75791 return -EFAULT;
75792 goto lenout;
75793 }
75794 @@ -987,7 +987,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
75795 return -ENOTCONN;
75796 if (lv < len)
75797 return -EINVAL;
75798 - if (copy_to_user(optval, address, len))
75799 + if (len > sizeof(address) || copy_to_user(optval, address, len))
75800 return -EFAULT;
75801 goto lenout;
75802 }
75803 @@ -1024,7 +1024,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
75804
75805 if (len > lv)
75806 len = lv;
75807 - if (copy_to_user(optval, &v, len))
75808 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
75809 return -EFAULT;
75810 lenout:
75811 if (put_user(len, optlen))
75812 @@ -2108,7 +2108,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
75813 */
75814 smp_wmb();
75815 atomic_set(&sk->sk_refcnt, 1);
75816 - atomic_set(&sk->sk_drops, 0);
75817 + atomic_set_unchecked(&sk->sk_drops, 0);
75818 }
75819 EXPORT_SYMBOL(sock_init_data);
75820
75821 diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
75822 index b9868e1..849f809 100644
75823 --- a/net/core/sock_diag.c
75824 +++ b/net/core/sock_diag.c
75825 @@ -16,20 +16,27 @@ static DEFINE_MUTEX(sock_diag_table_mutex);
75826
75827 int sock_diag_check_cookie(void *sk, __u32 *cookie)
75828 {
75829 +#ifndef CONFIG_GRKERNSEC_HIDESYM
75830 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
75831 cookie[1] != INET_DIAG_NOCOOKIE) &&
75832 ((u32)(unsigned long)sk != cookie[0] ||
75833 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
75834 return -ESTALE;
75835 else
75836 +#endif
75837 return 0;
75838 }
75839 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
75840
75841 void sock_diag_save_cookie(void *sk, __u32 *cookie)
75842 {
75843 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75844 + cookie[0] = 0;
75845 + cookie[1] = 0;
75846 +#else
75847 cookie[0] = (u32)(unsigned long)sk;
75848 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
75849 +#endif
75850 }
75851 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
75852
75853 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
75854 index 02e75d1..9a57a7c 100644
75855 --- a/net/decnet/sysctl_net_decnet.c
75856 +++ b/net/decnet/sysctl_net_decnet.c
75857 @@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
75858
75859 if (len > *lenp) len = *lenp;
75860
75861 - if (copy_to_user(buffer, addr, len))
75862 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
75863 return -EFAULT;
75864
75865 *lenp = len;
75866 @@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
75867
75868 if (len > *lenp) len = *lenp;
75869
75870 - if (copy_to_user(buffer, devname, len))
75871 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
75872 return -EFAULT;
75873
75874 *lenp = len;
75875 diff --git a/net/econet/Kconfig b/net/econet/Kconfig
75876 index 39a2d29..f39c0fe 100644
75877 --- a/net/econet/Kconfig
75878 +++ b/net/econet/Kconfig
75879 @@ -4,7 +4,7 @@
75880
75881 config ECONET
75882 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
75883 - depends on EXPERIMENTAL && INET
75884 + depends on EXPERIMENTAL && INET && BROKEN
75885 ---help---
75886 Econet is a fairly old and slow networking protocol mainly used by
75887 Acorn computers to access file and print servers. It uses native
75888 diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
75889 index 36d1440..44ff28b 100644
75890 --- a/net/ipv4/ah4.c
75891 +++ b/net/ipv4/ah4.c
75892 @@ -19,6 +19,8 @@ struct ah_skb_cb {
75893 #define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
75894
75895 static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
75896 + unsigned int size) __size_overflow(3);
75897 +static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
75898 unsigned int size)
75899 {
75900 unsigned int len;
75901 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
75902 index 92fc5f6..b790d91 100644
75903 --- a/net/ipv4/fib_frontend.c
75904 +++ b/net/ipv4/fib_frontend.c
75905 @@ -970,12 +970,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
75906 #ifdef CONFIG_IP_ROUTE_MULTIPATH
75907 fib_sync_up(dev);
75908 #endif
75909 - atomic_inc(&net->ipv4.dev_addr_genid);
75910 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
75911 rt_cache_flush(dev_net(dev), -1);
75912 break;
75913 case NETDEV_DOWN:
75914 fib_del_ifaddr(ifa, NULL);
75915 - atomic_inc(&net->ipv4.dev_addr_genid);
75916 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
75917 if (ifa->ifa_dev->ifa_list == NULL) {
75918 /* Last address was deleted from this interface.
75919 * Disable IP.
75920 @@ -1011,7 +1011,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
75921 #ifdef CONFIG_IP_ROUTE_MULTIPATH
75922 fib_sync_up(dev);
75923 #endif
75924 - atomic_inc(&net->ipv4.dev_addr_genid);
75925 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
75926 rt_cache_flush(dev_net(dev), -1);
75927 break;
75928 case NETDEV_DOWN:
75929 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
75930 index 80106d8..232e898 100644
75931 --- a/net/ipv4/fib_semantics.c
75932 +++ b/net/ipv4/fib_semantics.c
75933 @@ -699,7 +699,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
75934 nh->nh_saddr = inet_select_addr(nh->nh_dev,
75935 nh->nh_gw,
75936 nh->nh_parent->fib_scope);
75937 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
75938 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
75939
75940 return nh->nh_saddr;
75941 }
75942 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
75943 index 984ec65..97ac518 100644
75944 --- a/net/ipv4/inet_hashtables.c
75945 +++ b/net/ipv4/inet_hashtables.c
75946 @@ -18,12 +18,15 @@
75947 #include <linux/sched.h>
75948 #include <linux/slab.h>
75949 #include <linux/wait.h>
75950 +#include <linux/security.h>
75951
75952 #include <net/inet_connection_sock.h>
75953 #include <net/inet_hashtables.h>
75954 #include <net/secure_seq.h>
75955 #include <net/ip.h>
75956
75957 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
75958 +
75959 /*
75960 * Allocate and initialize a new local port bind bucket.
75961 * The bindhash mutex for snum's hash chain must be held here.
75962 @@ -530,6 +533,8 @@ ok:
75963 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
75964 spin_unlock(&head->lock);
75965
75966 + gr_update_task_in_ip_table(current, inet_sk(sk));
75967 +
75968 if (tw) {
75969 inet_twsk_deschedule(tw, death_row);
75970 while (twrefcnt) {
75971 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
75972 index d4d61b6..b81aec8 100644
75973 --- a/net/ipv4/inetpeer.c
75974 +++ b/net/ipv4/inetpeer.c
75975 @@ -487,8 +487,8 @@ relookup:
75976 if (p) {
75977 p->daddr = *daddr;
75978 atomic_set(&p->refcnt, 1);
75979 - atomic_set(&p->rid, 0);
75980 - atomic_set(&p->ip_id_count,
75981 + atomic_set_unchecked(&p->rid, 0);
75982 + atomic_set_unchecked(&p->ip_id_count,
75983 (daddr->family == AF_INET) ?
75984 secure_ip_id(daddr->addr.a4) :
75985 secure_ipv6_id(daddr->addr.a6));
75986 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
75987 index 1f23a57..7180dfe 100644
75988 --- a/net/ipv4/ip_fragment.c
75989 +++ b/net/ipv4/ip_fragment.c
75990 @@ -316,7 +316,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
75991 return 0;
75992
75993 start = qp->rid;
75994 - end = atomic_inc_return(&peer->rid);
75995 + end = atomic_inc_return_unchecked(&peer->rid);
75996 qp->rid = end;
75997
75998 rc = qp->q.fragments && (end - start) > max;
75999 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
76000 index 8aa87c1..35c3248 100644
76001 --- a/net/ipv4/ip_sockglue.c
76002 +++ b/net/ipv4/ip_sockglue.c
76003 @@ -1112,7 +1112,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
76004 len = min_t(unsigned int, len, opt->optlen);
76005 if (put_user(len, optlen))
76006 return -EFAULT;
76007 - if (copy_to_user(optval, opt->__data, len))
76008 + if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
76009 + copy_to_user(optval, opt->__data, len))
76010 return -EFAULT;
76011 return 0;
76012 }
76013 @@ -1240,7 +1241,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
76014 if (sk->sk_type != SOCK_STREAM)
76015 return -ENOPROTOOPT;
76016
76017 - msg.msg_control = optval;
76018 + msg.msg_control = (void __force_kernel *)optval;
76019 msg.msg_controllen = len;
76020 msg.msg_flags = flags;
76021
76022 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
76023 index 6e412a6..6640538 100644
76024 --- a/net/ipv4/ipconfig.c
76025 +++ b/net/ipv4/ipconfig.c
76026 @@ -318,7 +318,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
76027
76028 mm_segment_t oldfs = get_fs();
76029 set_fs(get_ds());
76030 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
76031 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
76032 set_fs(oldfs);
76033 return res;
76034 }
76035 @@ -329,7 +329,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
76036
76037 mm_segment_t oldfs = get_fs();
76038 set_fs(get_ds());
76039 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
76040 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
76041 set_fs(oldfs);
76042 return res;
76043 }
76044 @@ -340,7 +340,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
76045
76046 mm_segment_t oldfs = get_fs();
76047 set_fs(get_ds());
76048 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
76049 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
76050 set_fs(oldfs);
76051 return res;
76052 }
76053 diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
76054 index fd7a3f6..a1b1013 100644
76055 --- a/net/ipv4/netfilter/arp_tables.c
76056 +++ b/net/ipv4/netfilter/arp_tables.c
76057 @@ -757,6 +757,9 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
76058
76059 static int copy_entries_to_user(unsigned int total_size,
76060 const struct xt_table *table,
76061 + void __user *userptr) __size_overflow(1);
76062 +static int copy_entries_to_user(unsigned int total_size,
76063 + const struct xt_table *table,
76064 void __user *userptr)
76065 {
76066 unsigned int off, num;
76067 @@ -984,6 +987,11 @@ static int __do_replace(struct net *net, const char *name,
76068 unsigned int valid_hooks,
76069 struct xt_table_info *newinfo,
76070 unsigned int num_counters,
76071 + void __user *counters_ptr) __size_overflow(5);
76072 +static int __do_replace(struct net *net, const char *name,
76073 + unsigned int valid_hooks,
76074 + struct xt_table_info *newinfo,
76075 + unsigned int num_counters,
76076 void __user *counters_ptr)
76077 {
76078 int ret;
76079 @@ -1104,6 +1112,8 @@ static int do_replace(struct net *net, const void __user *user,
76080 }
76081
76082 static int do_add_counters(struct net *net, const void __user *user,
76083 + unsigned int len, int compat) __size_overflow(3);
76084 +static int do_add_counters(struct net *net, const void __user *user,
76085 unsigned int len, int compat)
76086 {
76087 unsigned int i, curcpu;
76088 diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
76089 index 24e556e..b073356 100644
76090 --- a/net/ipv4/netfilter/ip_tables.c
76091 +++ b/net/ipv4/netfilter/ip_tables.c
76092 @@ -923,6 +923,10 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
76093 static int
76094 copy_entries_to_user(unsigned int total_size,
76095 const struct xt_table *table,
76096 + void __user *userptr) __size_overflow(1);
76097 +static int
76098 +copy_entries_to_user(unsigned int total_size,
76099 + const struct xt_table *table,
76100 void __user *userptr)
76101 {
76102 unsigned int off, num;
76103 @@ -1172,6 +1176,10 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr,
76104 static int
76105 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
76106 struct xt_table_info *newinfo, unsigned int num_counters,
76107 + void __user *counters_ptr) __size_overflow(5);
76108 +static int
76109 +__do_replace(struct net *net, const char *name, unsigned int valid_hooks,
76110 + struct xt_table_info *newinfo, unsigned int num_counters,
76111 void __user *counters_ptr)
76112 {
76113 int ret;
76114 @@ -1293,6 +1301,9 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
76115
76116 static int
76117 do_add_counters(struct net *net, const void __user *user,
76118 + unsigned int len, int compat) __size_overflow(3);
76119 +static int
76120 +do_add_counters(struct net *net, const void __user *user,
76121 unsigned int len, int compat)
76122 {
76123 unsigned int i, curcpu;
76124 diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
76125 index ba5756d..8d34d74 100644
76126 --- a/net/ipv4/netfilter/ipt_ULOG.c
76127 +++ b/net/ipv4/netfilter/ipt_ULOG.c
76128 @@ -125,6 +125,7 @@ static void ulog_timer(unsigned long data)
76129 spin_unlock_bh(&ulog_lock);
76130 }
76131
76132 +static struct sk_buff *ulog_alloc_skb(unsigned int size) __size_overflow(1);
76133 static struct sk_buff *ulog_alloc_skb(unsigned int size)
76134 {
76135 struct sk_buff *skb;
76136 diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
76137 index 2133c30..0e8047e 100644
76138 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
76139 +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
76140 @@ -435,6 +435,10 @@ static unsigned char asn1_subid_decode(struct asn1_ctx *ctx,
76141 static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
76142 unsigned char *eoc,
76143 unsigned long **oid,
76144 + unsigned int *len) __size_overflow(2);
76145 +static unsigned char asn1_oid_decode(struct asn1_ctx *ctx,
76146 + unsigned char *eoc,
76147 + unsigned long **oid,
76148 unsigned int *len)
76149 {
76150 unsigned long subid;
76151 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
76152 index b072386..abdebcf 100644
76153 --- a/net/ipv4/ping.c
76154 +++ b/net/ipv4/ping.c
76155 @@ -838,7 +838,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
76156 sk_rmem_alloc_get(sp),
76157 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
76158 atomic_read(&sp->sk_refcnt), sp,
76159 - atomic_read(&sp->sk_drops), len);
76160 + atomic_read_unchecked(&sp->sk_drops), len);
76161 }
76162
76163 static int ping_seq_show(struct seq_file *seq, void *v)
76164 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
76165 index 3ccda5a..3c1e61d 100644
76166 --- a/net/ipv4/raw.c
76167 +++ b/net/ipv4/raw.c
76168 @@ -304,7 +304,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
76169 int raw_rcv(struct sock *sk, struct sk_buff *skb)
76170 {
76171 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
76172 - atomic_inc(&sk->sk_drops);
76173 + atomic_inc_unchecked(&sk->sk_drops);
76174 kfree_skb(skb);
76175 return NET_RX_DROP;
76176 }
76177 @@ -742,16 +742,20 @@ static int raw_init(struct sock *sk)
76178
76179 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
76180 {
76181 + struct icmp_filter filter;
76182 +
76183 if (optlen > sizeof(struct icmp_filter))
76184 optlen = sizeof(struct icmp_filter);
76185 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
76186 + if (copy_from_user(&filter, optval, optlen))
76187 return -EFAULT;
76188 + raw_sk(sk)->filter = filter;
76189 return 0;
76190 }
76191
76192 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
76193 {
76194 int len, ret = -EFAULT;
76195 + struct icmp_filter filter;
76196
76197 if (get_user(len, optlen))
76198 goto out;
76199 @@ -761,8 +765,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
76200 if (len > sizeof(struct icmp_filter))
76201 len = sizeof(struct icmp_filter);
76202 ret = -EFAULT;
76203 - if (put_user(len, optlen) ||
76204 - copy_to_user(optval, &raw_sk(sk)->filter, len))
76205 + filter = raw_sk(sk)->filter;
76206 + if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
76207 goto out;
76208 ret = 0;
76209 out: return ret;
76210 @@ -990,7 +994,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
76211 sk_wmem_alloc_get(sp),
76212 sk_rmem_alloc_get(sp),
76213 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
76214 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
76215 + atomic_read(&sp->sk_refcnt),
76216 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76217 + NULL,
76218 +#else
76219 + sp,
76220 +#endif
76221 + atomic_read_unchecked(&sp->sk_drops));
76222 }
76223
76224 static int raw_seq_show(struct seq_file *seq, void *v)
76225 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
76226 index 0197747..7adb0dc 100644
76227 --- a/net/ipv4/route.c
76228 +++ b/net/ipv4/route.c
76229 @@ -311,7 +311,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
76230
76231 static inline int rt_genid(struct net *net)
76232 {
76233 - return atomic_read(&net->ipv4.rt_genid);
76234 + return atomic_read_unchecked(&net->ipv4.rt_genid);
76235 }
76236
76237 #ifdef CONFIG_PROC_FS
76238 @@ -935,7 +935,7 @@ static void rt_cache_invalidate(struct net *net)
76239 unsigned char shuffle;
76240
76241 get_random_bytes(&shuffle, sizeof(shuffle));
76242 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
76243 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
76244 inetpeer_invalidate_tree(AF_INET);
76245 }
76246
76247 @@ -3010,7 +3010,7 @@ static int rt_fill_info(struct net *net,
76248 error = rt->dst.error;
76249 if (peer) {
76250 inet_peer_refcheck(rt->peer);
76251 - id = atomic_read(&peer->ip_id_count) & 0xffff;
76252 + id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
76253 if (peer->tcp_ts_stamp) {
76254 ts = peer->tcp_ts;
76255 tsage = get_seconds() - peer->tcp_ts_stamp;
76256 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
76257 index fd54c5f..96d6407 100644
76258 --- a/net/ipv4/tcp_ipv4.c
76259 +++ b/net/ipv4/tcp_ipv4.c
76260 @@ -88,6 +88,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
76261 int sysctl_tcp_low_latency __read_mostly;
76262 EXPORT_SYMBOL(sysctl_tcp_low_latency);
76263
76264 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76265 +extern int grsec_enable_blackhole;
76266 +#endif
76267
76268 #ifdef CONFIG_TCP_MD5SIG
76269 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
76270 @@ -1638,6 +1641,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
76271 return 0;
76272
76273 reset:
76274 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76275 + if (!grsec_enable_blackhole)
76276 +#endif
76277 tcp_v4_send_reset(rsk, skb);
76278 discard:
76279 kfree_skb(skb);
76280 @@ -1700,12 +1706,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
76281 TCP_SKB_CB(skb)->sacked = 0;
76282
76283 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
76284 - if (!sk)
76285 + if (!sk) {
76286 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76287 + ret = 1;
76288 +#endif
76289 goto no_tcp_socket;
76290 -
76291 + }
76292 process:
76293 - if (sk->sk_state == TCP_TIME_WAIT)
76294 + if (sk->sk_state == TCP_TIME_WAIT) {
76295 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76296 + ret = 2;
76297 +#endif
76298 goto do_time_wait;
76299 + }
76300
76301 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
76302 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
76303 @@ -1755,6 +1768,10 @@ no_tcp_socket:
76304 bad_packet:
76305 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
76306 } else {
76307 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76308 + if (!grsec_enable_blackhole || (ret == 1 &&
76309 + (skb->dev->flags & IFF_LOOPBACK)))
76310 +#endif
76311 tcp_v4_send_reset(NULL, skb);
76312 }
76313
76314 @@ -2417,7 +2434,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
76315 0, /* non standard timer */
76316 0, /* open_requests have no inode */
76317 atomic_read(&sk->sk_refcnt),
76318 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76319 + NULL,
76320 +#else
76321 req,
76322 +#endif
76323 len);
76324 }
76325
76326 @@ -2467,7 +2488,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
76327 sock_i_uid(sk),
76328 icsk->icsk_probes_out,
76329 sock_i_ino(sk),
76330 - atomic_read(&sk->sk_refcnt), sk,
76331 + atomic_read(&sk->sk_refcnt),
76332 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76333 + NULL,
76334 +#else
76335 + sk,
76336 +#endif
76337 jiffies_to_clock_t(icsk->icsk_rto),
76338 jiffies_to_clock_t(icsk->icsk_ack.ato),
76339 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
76340 @@ -2495,7 +2521,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
76341 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
76342 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
76343 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
76344 - atomic_read(&tw->tw_refcnt), tw, len);
76345 + atomic_read(&tw->tw_refcnt),
76346 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76347 + NULL,
76348 +#else
76349 + tw,
76350 +#endif
76351 + len);
76352 }
76353
76354 #define TMPSZ 150
76355 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
76356 index 550e755..25721b3 100644
76357 --- a/net/ipv4/tcp_minisocks.c
76358 +++ b/net/ipv4/tcp_minisocks.c
76359 @@ -27,6 +27,10 @@
76360 #include <net/inet_common.h>
76361 #include <net/xfrm.h>
76362
76363 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76364 +extern int grsec_enable_blackhole;
76365 +#endif
76366 +
76367 int sysctl_tcp_syncookies __read_mostly = 1;
76368 EXPORT_SYMBOL(sysctl_tcp_syncookies);
76369
76370 @@ -753,6 +757,10 @@ listen_overflow:
76371
76372 embryonic_reset:
76373 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
76374 +
76375 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76376 + if (!grsec_enable_blackhole)
76377 +#endif
76378 if (!(flg & TCP_FLAG_RST))
76379 req->rsk_ops->send_reset(sk, skb);
76380
76381 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
76382 index 85ee7eb..53277ab 100644
76383 --- a/net/ipv4/tcp_probe.c
76384 +++ b/net/ipv4/tcp_probe.c
76385 @@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
76386 if (cnt + width >= len)
76387 break;
76388
76389 - if (copy_to_user(buf + cnt, tbuf, width))
76390 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
76391 return -EFAULT;
76392 cnt += width;
76393 }
76394 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
76395 index cd2e072..1fffee2 100644
76396 --- a/net/ipv4/tcp_timer.c
76397 +++ b/net/ipv4/tcp_timer.c
76398 @@ -22,6 +22,10 @@
76399 #include <linux/gfp.h>
76400 #include <net/tcp.h>
76401
76402 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76403 +extern int grsec_lastack_retries;
76404 +#endif
76405 +
76406 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
76407 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
76408 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
76409 @@ -196,6 +200,13 @@ static int tcp_write_timeout(struct sock *sk)
76410 }
76411 }
76412
76413 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76414 + if ((sk->sk_state == TCP_LAST_ACK) &&
76415 + (grsec_lastack_retries > 0) &&
76416 + (grsec_lastack_retries < retry_until))
76417 + retry_until = grsec_lastack_retries;
76418 +#endif
76419 +
76420 if (retransmits_timed_out(sk, retry_until,
76421 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
76422 /* Has it gone just too far? */
76423 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
76424 index 5d075b5..d907d5f 100644
76425 --- a/net/ipv4/udp.c
76426 +++ b/net/ipv4/udp.c
76427 @@ -86,6 +86,7 @@
76428 #include <linux/types.h>
76429 #include <linux/fcntl.h>
76430 #include <linux/module.h>
76431 +#include <linux/security.h>
76432 #include <linux/socket.h>
76433 #include <linux/sockios.h>
76434 #include <linux/igmp.h>
76435 @@ -108,6 +109,10 @@
76436 #include <trace/events/udp.h>
76437 #include "udp_impl.h"
76438
76439 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76440 +extern int grsec_enable_blackhole;
76441 +#endif
76442 +
76443 struct udp_table udp_table __read_mostly;
76444 EXPORT_SYMBOL(udp_table);
76445
76446 @@ -566,6 +571,9 @@ found:
76447 return s;
76448 }
76449
76450 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
76451 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
76452 +
76453 /*
76454 * This routine is called by the ICMP module when it gets some
76455 * sort of error condition. If err < 0 then the socket should
76456 @@ -857,9 +865,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
76457 dport = usin->sin_port;
76458 if (dport == 0)
76459 return -EINVAL;
76460 +
76461 + err = gr_search_udp_sendmsg(sk, usin);
76462 + if (err)
76463 + return err;
76464 } else {
76465 if (sk->sk_state != TCP_ESTABLISHED)
76466 return -EDESTADDRREQ;
76467 +
76468 + err = gr_search_udp_sendmsg(sk, NULL);
76469 + if (err)
76470 + return err;
76471 +
76472 daddr = inet->inet_daddr;
76473 dport = inet->inet_dport;
76474 /* Open fast path for connected socket.
76475 @@ -1100,7 +1117,7 @@ static unsigned int first_packet_length(struct sock *sk)
76476 udp_lib_checksum_complete(skb)) {
76477 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
76478 IS_UDPLITE(sk));
76479 - atomic_inc(&sk->sk_drops);
76480 + atomic_inc_unchecked(&sk->sk_drops);
76481 __skb_unlink(skb, rcvq);
76482 __skb_queue_tail(&list_kill, skb);
76483 }
76484 @@ -1186,6 +1203,10 @@ try_again:
76485 if (!skb)
76486 goto out;
76487
76488 + err = gr_search_udp_recvmsg(sk, skb);
76489 + if (err)
76490 + goto out_free;
76491 +
76492 ulen = skb->len - sizeof(struct udphdr);
76493 copied = len;
76494 if (copied > ulen)
76495 @@ -1489,7 +1510,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
76496
76497 drop:
76498 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
76499 - atomic_inc(&sk->sk_drops);
76500 + atomic_inc_unchecked(&sk->sk_drops);
76501 kfree_skb(skb);
76502 return -1;
76503 }
76504 @@ -1508,7 +1529,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
76505 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
76506
76507 if (!skb1) {
76508 - atomic_inc(&sk->sk_drops);
76509 + atomic_inc_unchecked(&sk->sk_drops);
76510 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
76511 IS_UDPLITE(sk));
76512 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
76513 @@ -1677,6 +1698,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
76514 goto csum_error;
76515
76516 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
76517 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76518 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
76519 +#endif
76520 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
76521
76522 /*
76523 @@ -2100,8 +2124,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
76524 sk_wmem_alloc_get(sp),
76525 sk_rmem_alloc_get(sp),
76526 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
76527 - atomic_read(&sp->sk_refcnt), sp,
76528 - atomic_read(&sp->sk_drops), len);
76529 + atomic_read(&sp->sk_refcnt),
76530 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76531 + NULL,
76532 +#else
76533 + sp,
76534 +#endif
76535 + atomic_read_unchecked(&sp->sk_drops), len);
76536 }
76537
76538 int udp4_seq_show(struct seq_file *seq, void *v)
76539 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
76540 index 6b8ebc5..1d624f4 100644
76541 --- a/net/ipv6/addrconf.c
76542 +++ b/net/ipv6/addrconf.c
76543 @@ -2145,7 +2145,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
76544 p.iph.ihl = 5;
76545 p.iph.protocol = IPPROTO_IPV6;
76546 p.iph.ttl = 64;
76547 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
76548 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
76549
76550 if (ops->ndo_do_ioctl) {
76551 mm_segment_t oldfs = get_fs();
76552 diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
76553 index 2ae79db..8f101bf 100644
76554 --- a/net/ipv6/ah6.c
76555 +++ b/net/ipv6/ah6.c
76556 @@ -56,6 +56,8 @@ struct ah_skb_cb {
76557 #define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
76558
76559 static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
76560 + unsigned int size) __size_overflow(3);
76561 +static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
76562 unsigned int size)
76563 {
76564 unsigned int len;
76565 diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
76566 index 02dd203..e03fcc9 100644
76567 --- a/net/ipv6/inet6_connection_sock.c
76568 +++ b/net/ipv6/inet6_connection_sock.c
76569 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
76570 #ifdef CONFIG_XFRM
76571 {
76572 struct rt6_info *rt = (struct rt6_info *)dst;
76573 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
76574 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
76575 }
76576 #endif
76577 }
76578 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
76579 #ifdef CONFIG_XFRM
76580 if (dst) {
76581 struct rt6_info *rt = (struct rt6_info *)dst;
76582 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
76583 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
76584 __sk_dst_reset(sk);
76585 dst = NULL;
76586 }
76587 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
76588 index 18a2719..779f36a 100644
76589 --- a/net/ipv6/ipv6_sockglue.c
76590 +++ b/net/ipv6/ipv6_sockglue.c
76591 @@ -960,7 +960,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
76592 if (sk->sk_type != SOCK_STREAM)
76593 return -ENOPROTOOPT;
76594
76595 - msg.msg_control = optval;
76596 + msg.msg_control = (void __force_kernel *)optval;
76597 msg.msg_controllen = len;
76598 msg.msg_flags = flags;
76599
76600 diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
76601 index 94874b0..108a94d 100644
76602 --- a/net/ipv6/netfilter/ip6_tables.c
76603 +++ b/net/ipv6/netfilter/ip6_tables.c
76604 @@ -945,6 +945,10 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
76605 static int
76606 copy_entries_to_user(unsigned int total_size,
76607 const struct xt_table *table,
76608 + void __user *userptr) __size_overflow(1);
76609 +static int
76610 +copy_entries_to_user(unsigned int total_size,
76611 + const struct xt_table *table,
76612 void __user *userptr)
76613 {
76614 unsigned int off, num;
76615 @@ -1194,6 +1198,10 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
76616 static int
76617 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
76618 struct xt_table_info *newinfo, unsigned int num_counters,
76619 + void __user *counters_ptr) __size_overflow(5);
76620 +static int
76621 +__do_replace(struct net *net, const char *name, unsigned int valid_hooks,
76622 + struct xt_table_info *newinfo, unsigned int num_counters,
76623 void __user *counters_ptr)
76624 {
76625 int ret;
76626 @@ -1315,6 +1323,9 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
76627
76628 static int
76629 do_add_counters(struct net *net, const void __user *user, unsigned int len,
76630 + int compat) __size_overflow(3);
76631 +static int
76632 +do_add_counters(struct net *net, const void __user *user, unsigned int len,
76633 int compat)
76634 {
76635 unsigned int i, curcpu;
76636 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
76637 index d02f7e4..2d2a0f1 100644
76638 --- a/net/ipv6/raw.c
76639 +++ b/net/ipv6/raw.c
76640 @@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
76641 {
76642 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
76643 skb_checksum_complete(skb)) {
76644 - atomic_inc(&sk->sk_drops);
76645 + atomic_inc_unchecked(&sk->sk_drops);
76646 kfree_skb(skb);
76647 return NET_RX_DROP;
76648 }
76649 @@ -405,7 +405,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
76650 struct raw6_sock *rp = raw6_sk(sk);
76651
76652 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
76653 - atomic_inc(&sk->sk_drops);
76654 + atomic_inc_unchecked(&sk->sk_drops);
76655 kfree_skb(skb);
76656 return NET_RX_DROP;
76657 }
76658 @@ -429,7 +429,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
76659
76660 if (inet->hdrincl) {
76661 if (skb_checksum_complete(skb)) {
76662 - atomic_inc(&sk->sk_drops);
76663 + atomic_inc_unchecked(&sk->sk_drops);
76664 kfree_skb(skb);
76665 return NET_RX_DROP;
76666 }
76667 @@ -602,7 +602,7 @@ out:
76668 return err;
76669 }
76670
76671 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
76672 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
76673 struct flowi6 *fl6, struct dst_entry **dstp,
76674 unsigned int flags)
76675 {
76676 @@ -912,12 +912,15 @@ do_confirm:
76677 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
76678 char __user *optval, int optlen)
76679 {
76680 + struct icmp6_filter filter;
76681 +
76682 switch (optname) {
76683 case ICMPV6_FILTER:
76684 if (optlen > sizeof(struct icmp6_filter))
76685 optlen = sizeof(struct icmp6_filter);
76686 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
76687 + if (copy_from_user(&filter, optval, optlen))
76688 return -EFAULT;
76689 + raw6_sk(sk)->filter = filter;
76690 return 0;
76691 default:
76692 return -ENOPROTOOPT;
76693 @@ -930,6 +933,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
76694 char __user *optval, int __user *optlen)
76695 {
76696 int len;
76697 + struct icmp6_filter filter;
76698
76699 switch (optname) {
76700 case ICMPV6_FILTER:
76701 @@ -941,7 +945,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
76702 len = sizeof(struct icmp6_filter);
76703 if (put_user(len, optlen))
76704 return -EFAULT;
76705 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
76706 + filter = raw6_sk(sk)->filter;
76707 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
76708 return -EFAULT;
76709 return 0;
76710 default:
76711 @@ -1248,7 +1253,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
76712 0, 0L, 0,
76713 sock_i_uid(sp), 0,
76714 sock_i_ino(sp),
76715 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
76716 + atomic_read(&sp->sk_refcnt),
76717 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76718 + NULL,
76719 +#else
76720 + sp,
76721 +#endif
76722 + atomic_read_unchecked(&sp->sk_drops));
76723 }
76724
76725 static int raw6_seq_show(struct seq_file *seq, void *v)
76726 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
76727 index a89ca8d..12e66b0 100644
76728 --- a/net/ipv6/tcp_ipv6.c
76729 +++ b/net/ipv6/tcp_ipv6.c
76730 @@ -94,6 +94,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
76731 }
76732 #endif
76733
76734 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76735 +extern int grsec_enable_blackhole;
76736 +#endif
76737 +
76738 static void tcp_v6_hash(struct sock *sk)
76739 {
76740 if (sk->sk_state != TCP_CLOSE) {
76741 @@ -1654,6 +1658,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
76742 return 0;
76743
76744 reset:
76745 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76746 + if (!grsec_enable_blackhole)
76747 +#endif
76748 tcp_v6_send_reset(sk, skb);
76749 discard:
76750 if (opt_skb)
76751 @@ -1733,12 +1740,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
76752 TCP_SKB_CB(skb)->sacked = 0;
76753
76754 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
76755 - if (!sk)
76756 + if (!sk) {
76757 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76758 + ret = 1;
76759 +#endif
76760 goto no_tcp_socket;
76761 + }
76762
76763 process:
76764 - if (sk->sk_state == TCP_TIME_WAIT)
76765 + if (sk->sk_state == TCP_TIME_WAIT) {
76766 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76767 + ret = 2;
76768 +#endif
76769 goto do_time_wait;
76770 + }
76771
76772 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
76773 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
76774 @@ -1786,6 +1801,10 @@ no_tcp_socket:
76775 bad_packet:
76776 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
76777 } else {
76778 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76779 + if (!grsec_enable_blackhole || (ret == 1 &&
76780 + (skb->dev->flags & IFF_LOOPBACK)))
76781 +#endif
76782 tcp_v6_send_reset(NULL, skb);
76783 }
76784
76785 @@ -2047,7 +2066,13 @@ static void get_openreq6(struct seq_file *seq,
76786 uid,
76787 0, /* non standard timer */
76788 0, /* open_requests have no inode */
76789 - 0, req);
76790 + 0,
76791 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76792 + NULL
76793 +#else
76794 + req
76795 +#endif
76796 + );
76797 }
76798
76799 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
76800 @@ -2097,7 +2122,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
76801 sock_i_uid(sp),
76802 icsk->icsk_probes_out,
76803 sock_i_ino(sp),
76804 - atomic_read(&sp->sk_refcnt), sp,
76805 + atomic_read(&sp->sk_refcnt),
76806 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76807 + NULL,
76808 +#else
76809 + sp,
76810 +#endif
76811 jiffies_to_clock_t(icsk->icsk_rto),
76812 jiffies_to_clock_t(icsk->icsk_ack.ato),
76813 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
76814 @@ -2132,7 +2162,13 @@ static void get_timewait6_sock(struct seq_file *seq,
76815 dest->s6_addr32[2], dest->s6_addr32[3], destp,
76816 tw->tw_substate, 0, 0,
76817 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
76818 - atomic_read(&tw->tw_refcnt), tw);
76819 + atomic_read(&tw->tw_refcnt),
76820 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76821 + NULL
76822 +#else
76823 + tw
76824 +#endif
76825 + );
76826 }
76827
76828 static int tcp6_seq_show(struct seq_file *seq, void *v)
76829 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
76830 index 4f96b5c..75543d7 100644
76831 --- a/net/ipv6/udp.c
76832 +++ b/net/ipv6/udp.c
76833 @@ -50,6 +50,10 @@
76834 #include <linux/seq_file.h>
76835 #include "udp_impl.h"
76836
76837 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76838 +extern int grsec_enable_blackhole;
76839 +#endif
76840 +
76841 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
76842 {
76843 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
76844 @@ -551,7 +555,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
76845
76846 return 0;
76847 drop:
76848 - atomic_inc(&sk->sk_drops);
76849 + atomic_inc_unchecked(&sk->sk_drops);
76850 drop_no_sk_drops_inc:
76851 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
76852 kfree_skb(skb);
76853 @@ -627,7 +631,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
76854 continue;
76855 }
76856 drop:
76857 - atomic_inc(&sk->sk_drops);
76858 + atomic_inc_unchecked(&sk->sk_drops);
76859 UDP6_INC_STATS_BH(sock_net(sk),
76860 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
76861 UDP6_INC_STATS_BH(sock_net(sk),
76862 @@ -782,6 +786,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
76863 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
76864 proto == IPPROTO_UDPLITE);
76865
76866 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76867 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
76868 +#endif
76869 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
76870
76871 kfree_skb(skb);
76872 @@ -798,7 +805,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
76873 if (!sock_owned_by_user(sk))
76874 udpv6_queue_rcv_skb(sk, skb);
76875 else if (sk_add_backlog(sk, skb)) {
76876 - atomic_inc(&sk->sk_drops);
76877 + atomic_inc_unchecked(&sk->sk_drops);
76878 bh_unlock_sock(sk);
76879 sock_put(sk);
76880 goto discard;
76881 @@ -1410,8 +1417,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
76882 0, 0L, 0,
76883 sock_i_uid(sp), 0,
76884 sock_i_ino(sp),
76885 - atomic_read(&sp->sk_refcnt), sp,
76886 - atomic_read(&sp->sk_drops));
76887 + atomic_read(&sp->sk_refcnt),
76888 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76889 + NULL,
76890 +#else
76891 + sp,
76892 +#endif
76893 + atomic_read_unchecked(&sp->sk_drops));
76894 }
76895
76896 int udp6_seq_show(struct seq_file *seq, void *v)
76897 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
76898 index 253695d..9481ce8 100644
76899 --- a/net/irda/ircomm/ircomm_tty.c
76900 +++ b/net/irda/ircomm/ircomm_tty.c
76901 @@ -282,16 +282,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
76902 add_wait_queue(&self->open_wait, &wait);
76903
76904 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
76905 - __FILE__,__LINE__, tty->driver->name, self->open_count );
76906 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
76907
76908 /* As far as I can see, we protect open_count - Jean II */
76909 spin_lock_irqsave(&self->spinlock, flags);
76910 if (!tty_hung_up_p(filp)) {
76911 extra_count = 1;
76912 - self->open_count--;
76913 + local_dec(&self->open_count);
76914 }
76915 spin_unlock_irqrestore(&self->spinlock, flags);
76916 - self->blocked_open++;
76917 + local_inc(&self->blocked_open);
76918
76919 while (1) {
76920 if (tty->termios->c_cflag & CBAUD) {
76921 @@ -331,7 +331,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
76922 }
76923
76924 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
76925 - __FILE__,__LINE__, tty->driver->name, self->open_count );
76926 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
76927
76928 schedule();
76929 }
76930 @@ -342,13 +342,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
76931 if (extra_count) {
76932 /* ++ is not atomic, so this should be protected - Jean II */
76933 spin_lock_irqsave(&self->spinlock, flags);
76934 - self->open_count++;
76935 + local_inc(&self->open_count);
76936 spin_unlock_irqrestore(&self->spinlock, flags);
76937 }
76938 - self->blocked_open--;
76939 + local_dec(&self->blocked_open);
76940
76941 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
76942 - __FILE__,__LINE__, tty->driver->name, self->open_count);
76943 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
76944
76945 if (!retval)
76946 self->flags |= ASYNC_NORMAL_ACTIVE;
76947 @@ -417,14 +417,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
76948 }
76949 /* ++ is not atomic, so this should be protected - Jean II */
76950 spin_lock_irqsave(&self->spinlock, flags);
76951 - self->open_count++;
76952 + local_inc(&self->open_count);
76953
76954 tty->driver_data = self;
76955 self->tty = tty;
76956 spin_unlock_irqrestore(&self->spinlock, flags);
76957
76958 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
76959 - self->line, self->open_count);
76960 + self->line, local_read(&self->open_count));
76961
76962 /* Not really used by us, but lets do it anyway */
76963 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
76964 @@ -510,7 +510,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
76965 return;
76966 }
76967
76968 - if ((tty->count == 1) && (self->open_count != 1)) {
76969 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
76970 /*
76971 * Uh, oh. tty->count is 1, which means that the tty
76972 * structure will be freed. state->count should always
76973 @@ -520,16 +520,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
76974 */
76975 IRDA_DEBUG(0, "%s(), bad serial port count; "
76976 "tty->count is 1, state->count is %d\n", __func__ ,
76977 - self->open_count);
76978 - self->open_count = 1;
76979 + local_read(&self->open_count));
76980 + local_set(&self->open_count, 1);
76981 }
76982
76983 - if (--self->open_count < 0) {
76984 + if (local_dec_return(&self->open_count) < 0) {
76985 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
76986 - __func__, self->line, self->open_count);
76987 - self->open_count = 0;
76988 + __func__, self->line, local_read(&self->open_count));
76989 + local_set(&self->open_count, 0);
76990 }
76991 - if (self->open_count) {
76992 + if (local_read(&self->open_count)) {
76993 spin_unlock_irqrestore(&self->spinlock, flags);
76994
76995 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
76996 @@ -561,7 +561,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
76997 tty->closing = 0;
76998 self->tty = NULL;
76999
77000 - if (self->blocked_open) {
77001 + if (local_read(&self->blocked_open)) {
77002 if (self->close_delay)
77003 schedule_timeout_interruptible(self->close_delay);
77004 wake_up_interruptible(&self->open_wait);
77005 @@ -1013,7 +1013,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
77006 spin_lock_irqsave(&self->spinlock, flags);
77007 self->flags &= ~ASYNC_NORMAL_ACTIVE;
77008 self->tty = NULL;
77009 - self->open_count = 0;
77010 + local_set(&self->open_count, 0);
77011 spin_unlock_irqrestore(&self->spinlock, flags);
77012
77013 wake_up_interruptible(&self->open_wait);
77014 @@ -1360,7 +1360,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
77015 seq_putc(m, '\n');
77016
77017 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
77018 - seq_printf(m, "Open count: %d\n", self->open_count);
77019 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
77020 seq_printf(m, "Max data size: %d\n", self->max_data_size);
77021 seq_printf(m, "Max header size: %d\n", self->max_header_size);
77022
77023 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
77024 index d5c5b8f..33beff0 100644
77025 --- a/net/iucv/af_iucv.c
77026 +++ b/net/iucv/af_iucv.c
77027 @@ -764,10 +764,10 @@ static int iucv_sock_autobind(struct sock *sk)
77028
77029 write_lock_bh(&iucv_sk_list.lock);
77030
77031 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
77032 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
77033 while (__iucv_get_sock_by_name(name)) {
77034 sprintf(name, "%08x",
77035 - atomic_inc_return(&iucv_sk_list.autobind_name));
77036 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
77037 }
77038
77039 write_unlock_bh(&iucv_sk_list.lock);
77040 diff --git a/net/key/af_key.c b/net/key/af_key.c
77041 index 11dbb22..c20f667 100644
77042 --- a/net/key/af_key.c
77043 +++ b/net/key/af_key.c
77044 @@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
77045 static u32 get_acqseq(void)
77046 {
77047 u32 res;
77048 - static atomic_t acqseq;
77049 + static atomic_unchecked_t acqseq;
77050
77051 do {
77052 - res = atomic_inc_return(&acqseq);
77053 + res = atomic_inc_return_unchecked(&acqseq);
77054 } while (!res);
77055 return res;
77056 }
77057 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
77058 index 2f0642d..e5c6fba 100644
77059 --- a/net/mac80211/ieee80211_i.h
77060 +++ b/net/mac80211/ieee80211_i.h
77061 @@ -28,6 +28,7 @@
77062 #include <net/ieee80211_radiotap.h>
77063 #include <net/cfg80211.h>
77064 #include <net/mac80211.h>
77065 +#include <asm/local.h>
77066 #include "key.h"
77067 #include "sta_info.h"
77068
77069 @@ -781,7 +782,7 @@ struct ieee80211_local {
77070 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
77071 spinlock_t queue_stop_reason_lock;
77072
77073 - int open_count;
77074 + local_t open_count;
77075 int monitors, cooked_mntrs;
77076 /* number of interfaces with corresponding FIF_ flags */
77077 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
77078 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
77079 index 8e2137b..2974283 100644
77080 --- a/net/mac80211/iface.c
77081 +++ b/net/mac80211/iface.c
77082 @@ -222,7 +222,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
77083 break;
77084 }
77085
77086 - if (local->open_count == 0) {
77087 + if (local_read(&local->open_count) == 0) {
77088 res = drv_start(local);
77089 if (res)
77090 goto err_del_bss;
77091 @@ -246,7 +246,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
77092 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
77093
77094 if (!is_valid_ether_addr(dev->dev_addr)) {
77095 - if (!local->open_count)
77096 + if (!local_read(&local->open_count))
77097 drv_stop(local);
77098 return -EADDRNOTAVAIL;
77099 }
77100 @@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
77101 mutex_unlock(&local->mtx);
77102
77103 if (coming_up)
77104 - local->open_count++;
77105 + local_inc(&local->open_count);
77106
77107 if (hw_reconf_flags)
77108 ieee80211_hw_config(local, hw_reconf_flags);
77109 @@ -360,7 +360,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
77110 err_del_interface:
77111 drv_remove_interface(local, sdata);
77112 err_stop:
77113 - if (!local->open_count)
77114 + if (!local_read(&local->open_count))
77115 drv_stop(local);
77116 err_del_bss:
77117 sdata->bss = NULL;
77118 @@ -489,7 +489,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
77119 }
77120
77121 if (going_down)
77122 - local->open_count--;
77123 + local_dec(&local->open_count);
77124
77125 switch (sdata->vif.type) {
77126 case NL80211_IFTYPE_AP_VLAN:
77127 @@ -548,7 +548,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
77128
77129 ieee80211_recalc_ps(local, -1);
77130
77131 - if (local->open_count == 0) {
77132 + if (local_read(&local->open_count) == 0) {
77133 if (local->ops->napi_poll)
77134 napi_disable(&local->napi);
77135 ieee80211_clear_tx_pending(local);
77136 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
77137 index b142bd4..a651749 100644
77138 --- a/net/mac80211/main.c
77139 +++ b/net/mac80211/main.c
77140 @@ -166,7 +166,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
77141 local->hw.conf.power_level = power;
77142 }
77143
77144 - if (changed && local->open_count) {
77145 + if (changed && local_read(&local->open_count)) {
77146 ret = drv_config(local, changed);
77147 /*
77148 * Goal:
77149 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
77150 index 596efaf..8f1911f 100644
77151 --- a/net/mac80211/pm.c
77152 +++ b/net/mac80211/pm.c
77153 @@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
77154 struct ieee80211_sub_if_data *sdata;
77155 struct sta_info *sta;
77156
77157 - if (!local->open_count)
77158 + if (!local_read(&local->open_count))
77159 goto suspend;
77160
77161 ieee80211_scan_cancel(local);
77162 @@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
77163 cancel_work_sync(&local->dynamic_ps_enable_work);
77164 del_timer_sync(&local->dynamic_ps_timer);
77165
77166 - local->wowlan = wowlan && local->open_count;
77167 + local->wowlan = wowlan && local_read(&local->open_count);
77168 if (local->wowlan) {
77169 int err = drv_suspend(local, wowlan);
77170 if (err < 0) {
77171 @@ -129,7 +129,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
77172 }
77173
77174 /* stop hardware - this must stop RX */
77175 - if (local->open_count)
77176 + if (local_read(&local->open_count))
77177 ieee80211_stop_device(local);
77178
77179 suspend:
77180 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
77181 index f9b8e81..bb89b46 100644
77182 --- a/net/mac80211/rate.c
77183 +++ b/net/mac80211/rate.c
77184 @@ -401,7 +401,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
77185
77186 ASSERT_RTNL();
77187
77188 - if (local->open_count)
77189 + if (local_read(&local->open_count))
77190 return -EBUSY;
77191
77192 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
77193 diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
77194 index c97a065..ff61928 100644
77195 --- a/net/mac80211/rc80211_pid_debugfs.c
77196 +++ b/net/mac80211/rc80211_pid_debugfs.c
77197 @@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
77198
77199 spin_unlock_irqrestore(&events->lock, status);
77200
77201 - if (copy_to_user(buf, pb, p))
77202 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
77203 return -EFAULT;
77204
77205 return p;
77206 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
77207 index 9919892..8c49803 100644
77208 --- a/net/mac80211/util.c
77209 +++ b/net/mac80211/util.c
77210 @@ -1143,7 +1143,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
77211 }
77212 #endif
77213 /* everything else happens only if HW was up & running */
77214 - if (!local->open_count)
77215 + if (!local_read(&local->open_count))
77216 goto wake_up;
77217
77218 /*
77219 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
77220 index f8ac4ef..b02560b 100644
77221 --- a/net/netfilter/Kconfig
77222 +++ b/net/netfilter/Kconfig
77223 @@ -806,6 +806,16 @@ config NETFILTER_XT_MATCH_ESP
77224
77225 To compile it as a module, choose M here. If unsure, say N.
77226
77227 +config NETFILTER_XT_MATCH_GRADM
77228 + tristate '"gradm" match support'
77229 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
77230 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
77231 + ---help---
77232 + The gradm match allows to match on grsecurity RBAC being enabled.
77233 + It is useful when iptables rules are applied early on bootup to
77234 + prevent connections to the machine (except from a trusted host)
77235 + while the RBAC system is disabled.
77236 +
77237 config NETFILTER_XT_MATCH_HASHLIMIT
77238 tristate '"hashlimit" match support'
77239 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
77240 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
77241 index 40f4c3d..0d5dd6b 100644
77242 --- a/net/netfilter/Makefile
77243 +++ b/net/netfilter/Makefile
77244 @@ -83,6 +83,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
77245 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
77246 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
77247 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
77248 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
77249 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
77250 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
77251 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
77252 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
77253 index 29fa5ba..8debc79 100644
77254 --- a/net/netfilter/ipvs/ip_vs_conn.c
77255 +++ b/net/netfilter/ipvs/ip_vs_conn.c
77256 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
77257 /* Increase the refcnt counter of the dest */
77258 atomic_inc(&dest->refcnt);
77259
77260 - conn_flags = atomic_read(&dest->conn_flags);
77261 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
77262 if (cp->protocol != IPPROTO_UDP)
77263 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
77264 /* Bind with the destination and its corresponding transmitter */
77265 @@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
77266 atomic_set(&cp->refcnt, 1);
77267
77268 atomic_set(&cp->n_control, 0);
77269 - atomic_set(&cp->in_pkts, 0);
77270 + atomic_set_unchecked(&cp->in_pkts, 0);
77271
77272 atomic_inc(&ipvs->conn_count);
77273 if (flags & IP_VS_CONN_F_NO_CPORT)
77274 @@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
77275
77276 /* Don't drop the entry if its number of incoming packets is not
77277 located in [0, 8] */
77278 - i = atomic_read(&cp->in_pkts);
77279 + i = atomic_read_unchecked(&cp->in_pkts);
77280 if (i > 8 || i < 0) return 0;
77281
77282 if (!todrop_rate[i]) return 0;
77283 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
77284 index 2555816..31492d9 100644
77285 --- a/net/netfilter/ipvs/ip_vs_core.c
77286 +++ b/net/netfilter/ipvs/ip_vs_core.c
77287 @@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
77288 ret = cp->packet_xmit(skb, cp, pd->pp);
77289 /* do not touch skb anymore */
77290
77291 - atomic_inc(&cp->in_pkts);
77292 + atomic_inc_unchecked(&cp->in_pkts);
77293 ip_vs_conn_put(cp);
77294 return ret;
77295 }
77296 @@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
77297 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
77298 pkts = sysctl_sync_threshold(ipvs);
77299 else
77300 - pkts = atomic_add_return(1, &cp->in_pkts);
77301 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
77302
77303 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
77304 cp->protocol == IPPROTO_SCTP) {
77305 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
77306 index b3afe18..08ec940 100644
77307 --- a/net/netfilter/ipvs/ip_vs_ctl.c
77308 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
77309 @@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
77310 ip_vs_rs_hash(ipvs, dest);
77311 write_unlock_bh(&ipvs->rs_lock);
77312 }
77313 - atomic_set(&dest->conn_flags, conn_flags);
77314 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
77315
77316 /* bind the service */
77317 if (!dest->svc) {
77318 @@ -2028,7 +2028,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
77319 " %-7s %-6d %-10d %-10d\n",
77320 &dest->addr.in6,
77321 ntohs(dest->port),
77322 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
77323 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
77324 atomic_read(&dest->weight),
77325 atomic_read(&dest->activeconns),
77326 atomic_read(&dest->inactconns));
77327 @@ -2039,7 +2039,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
77328 "%-7s %-6d %-10d %-10d\n",
77329 ntohl(dest->addr.ip),
77330 ntohs(dest->port),
77331 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
77332 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
77333 atomic_read(&dest->weight),
77334 atomic_read(&dest->activeconns),
77335 atomic_read(&dest->inactconns));
77336 @@ -2509,7 +2509,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
77337
77338 entry.addr = dest->addr.ip;
77339 entry.port = dest->port;
77340 - entry.conn_flags = atomic_read(&dest->conn_flags);
77341 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
77342 entry.weight = atomic_read(&dest->weight);
77343 entry.u_threshold = dest->u_threshold;
77344 entry.l_threshold = dest->l_threshold;
77345 @@ -3042,7 +3042,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
77346 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
77347
77348 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
77349 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
77350 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
77351 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
77352 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
77353 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
77354 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
77355 index 8a0d6d6..90ec197 100644
77356 --- a/net/netfilter/ipvs/ip_vs_sync.c
77357 +++ b/net/netfilter/ipvs/ip_vs_sync.c
77358 @@ -649,7 +649,7 @@ control:
77359 * i.e only increment in_pkts for Templates.
77360 */
77361 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
77362 - int pkts = atomic_add_return(1, &cp->in_pkts);
77363 + int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
77364
77365 if (pkts % sysctl_sync_period(ipvs) != 1)
77366 return;
77367 @@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
77368
77369 if (opt)
77370 memcpy(&cp->in_seq, opt, sizeof(*opt));
77371 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
77372 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
77373 cp->state = state;
77374 cp->old_state = cp->state;
77375 /*
77376 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
77377 index 7fd66de..e6fb361 100644
77378 --- a/net/netfilter/ipvs/ip_vs_xmit.c
77379 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
77380 @@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
77381 else
77382 rc = NF_ACCEPT;
77383 /* do not touch skb anymore */
77384 - atomic_inc(&cp->in_pkts);
77385 + atomic_inc_unchecked(&cp->in_pkts);
77386 goto out;
77387 }
77388
77389 @@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
77390 else
77391 rc = NF_ACCEPT;
77392 /* do not touch skb anymore */
77393 - atomic_inc(&cp->in_pkts);
77394 + atomic_inc_unchecked(&cp->in_pkts);
77395 goto out;
77396 }
77397
77398 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
77399 index 66b2c54..c7884e3 100644
77400 --- a/net/netfilter/nfnetlink_log.c
77401 +++ b/net/netfilter/nfnetlink_log.c
77402 @@ -70,7 +70,7 @@ struct nfulnl_instance {
77403 };
77404
77405 static DEFINE_SPINLOCK(instances_lock);
77406 -static atomic_t global_seq;
77407 +static atomic_unchecked_t global_seq;
77408
77409 #define INSTANCE_BUCKETS 16
77410 static struct hlist_head instance_table[INSTANCE_BUCKETS];
77411 @@ -502,7 +502,7 @@ __build_packet_message(struct nfulnl_instance *inst,
77412 /* global sequence number */
77413 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
77414 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
77415 - htonl(atomic_inc_return(&global_seq)));
77416 + htonl(atomic_inc_return_unchecked(&global_seq)));
77417
77418 if (data_len) {
77419 struct nlattr *nla;
77420 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
77421 new file mode 100644
77422 index 0000000..6905327
77423 --- /dev/null
77424 +++ b/net/netfilter/xt_gradm.c
77425 @@ -0,0 +1,51 @@
77426 +/*
77427 + * gradm match for netfilter
77428 + * Copyright © Zbigniew Krzystolik, 2010
77429 + *
77430 + * This program is free software; you can redistribute it and/or modify
77431 + * it under the terms of the GNU General Public License; either version
77432 + * 2 or 3 as published by the Free Software Foundation.
77433 + */
77434 +#include <linux/module.h>
77435 +#include <linux/moduleparam.h>
77436 +#include <linux/skbuff.h>
77437 +#include <linux/netfilter/x_tables.h>
77438 +#include <linux/grsecurity.h>
77439 +#include <linux/netfilter/xt_gradm.h>
77440 +
77441 +static bool
77442 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
77443 +{
77444 + const struct xt_gradm_mtinfo *info = par->matchinfo;
77445 + bool retval = false;
77446 + if (gr_acl_is_enabled())
77447 + retval = true;
77448 + return retval ^ info->invflags;
77449 +}
77450 +
77451 +static struct xt_match gradm_mt_reg __read_mostly = {
77452 + .name = "gradm",
77453 + .revision = 0,
77454 + .family = NFPROTO_UNSPEC,
77455 + .match = gradm_mt,
77456 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
77457 + .me = THIS_MODULE,
77458 +};
77459 +
77460 +static int __init gradm_mt_init(void)
77461 +{
77462 + return xt_register_match(&gradm_mt_reg);
77463 +}
77464 +
77465 +static void __exit gradm_mt_exit(void)
77466 +{
77467 + xt_unregister_match(&gradm_mt_reg);
77468 +}
77469 +
77470 +module_init(gradm_mt_init);
77471 +module_exit(gradm_mt_exit);
77472 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
77473 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
77474 +MODULE_LICENSE("GPL");
77475 +MODULE_ALIAS("ipt_gradm");
77476 +MODULE_ALIAS("ip6t_gradm");
77477 diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
77478 index 4fe4fb4..87a89e5 100644
77479 --- a/net/netfilter/xt_statistic.c
77480 +++ b/net/netfilter/xt_statistic.c
77481 @@ -19,7 +19,7 @@
77482 #include <linux/module.h>
77483
77484 struct xt_statistic_priv {
77485 - atomic_t count;
77486 + atomic_unchecked_t count;
77487 } ____cacheline_aligned_in_smp;
77488
77489 MODULE_LICENSE("GPL");
77490 @@ -42,9 +42,9 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
77491 break;
77492 case XT_STATISTIC_MODE_NTH:
77493 do {
77494 - oval = atomic_read(&info->master->count);
77495 + oval = atomic_read_unchecked(&info->master->count);
77496 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
77497 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
77498 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
77499 if (nval == 0)
77500 ret = !ret;
77501 break;
77502 @@ -64,7 +64,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
77503 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
77504 if (info->master == NULL)
77505 return -ENOMEM;
77506 - atomic_set(&info->master->count, info->u.nth.count);
77507 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
77508
77509 return 0;
77510 }
77511 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
77512 index 467af9c..8f415cc 100644
77513 --- a/net/netlink/af_netlink.c
77514 +++ b/net/netlink/af_netlink.c
77515 @@ -741,7 +741,7 @@ static void netlink_overrun(struct sock *sk)
77516 sk->sk_error_report(sk);
77517 }
77518 }
77519 - atomic_inc(&sk->sk_drops);
77520 + atomic_inc_unchecked(&sk->sk_drops);
77521 }
77522
77523 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
77524 @@ -1997,7 +1997,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
77525 sk_wmem_alloc_get(s),
77526 nlk->cb,
77527 atomic_read(&s->sk_refcnt),
77528 - atomic_read(&s->sk_drops),
77529 + atomic_read_unchecked(&s->sk_drops),
77530 sock_i_ino(s)
77531 );
77532
77533 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
77534 index 7dab229..212156f 100644
77535 --- a/net/netrom/af_netrom.c
77536 +++ b/net/netrom/af_netrom.c
77537 @@ -839,6 +839,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
77538 struct sock *sk = sock->sk;
77539 struct nr_sock *nr = nr_sk(sk);
77540
77541 + memset(sax, 0, sizeof(*sax));
77542 lock_sock(sk);
77543 if (peer != 0) {
77544 if (sk->sk_state != TCP_ESTABLISHED) {
77545 @@ -853,7 +854,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
77546 *uaddr_len = sizeof(struct full_sockaddr_ax25);
77547 } else {
77548 sax->fsa_ax25.sax25_family = AF_NETROM;
77549 - sax->fsa_ax25.sax25_ndigis = 0;
77550 sax->fsa_ax25.sax25_call = nr->source_addr;
77551 *uaddr_len = sizeof(struct sockaddr_ax25);
77552 }
77553 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
77554 index 2dbb32b..a1b4722 100644
77555 --- a/net/packet/af_packet.c
77556 +++ b/net/packet/af_packet.c
77557 @@ -1676,7 +1676,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
77558
77559 spin_lock(&sk->sk_receive_queue.lock);
77560 po->stats.tp_packets++;
77561 - skb->dropcount = atomic_read(&sk->sk_drops);
77562 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
77563 __skb_queue_tail(&sk->sk_receive_queue, skb);
77564 spin_unlock(&sk->sk_receive_queue.lock);
77565 sk->sk_data_ready(sk, skb->len);
77566 @@ -1685,7 +1685,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
77567 drop_n_acct:
77568 spin_lock(&sk->sk_receive_queue.lock);
77569 po->stats.tp_drops++;
77570 - atomic_inc(&sk->sk_drops);
77571 + atomic_inc_unchecked(&sk->sk_drops);
77572 spin_unlock(&sk->sk_receive_queue.lock);
77573
77574 drop_n_restore:
77575 @@ -3271,7 +3271,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
77576 case PACKET_HDRLEN:
77577 if (len > sizeof(int))
77578 len = sizeof(int);
77579 - if (copy_from_user(&val, optval, len))
77580 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
77581 return -EFAULT;
77582 switch (val) {
77583 case TPACKET_V1:
77584 @@ -3321,7 +3321,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
77585
77586 if (put_user(len, optlen))
77587 return -EFAULT;
77588 - if (copy_to_user(optval, data, len))
77589 + if (len > sizeof(st) || copy_to_user(optval, data, len))
77590 return -EFAULT;
77591 return 0;
77592 }
77593 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
77594 index d65f699..05aa6ce 100644
77595 --- a/net/phonet/af_phonet.c
77596 +++ b/net/phonet/af_phonet.c
77597 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(unsigned int protocol)
77598 {
77599 struct phonet_protocol *pp;
77600
77601 - if (protocol >= PHONET_NPROTO)
77602 + if (protocol < 0 || protocol >= PHONET_NPROTO)
77603 return NULL;
77604
77605 rcu_read_lock();
77606 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
77607 {
77608 int err = 0;
77609
77610 - if (protocol >= PHONET_NPROTO)
77611 + if (protocol < 0 || protocol >= PHONET_NPROTO)
77612 return -EINVAL;
77613
77614 err = proto_register(pp->prot, 1);
77615 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
77616 index 9726fe6..fc4e3a4 100644
77617 --- a/net/phonet/pep.c
77618 +++ b/net/phonet/pep.c
77619 @@ -388,7 +388,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
77620
77621 case PNS_PEP_CTRL_REQ:
77622 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
77623 - atomic_inc(&sk->sk_drops);
77624 + atomic_inc_unchecked(&sk->sk_drops);
77625 break;
77626 }
77627 __skb_pull(skb, 4);
77628 @@ -409,7 +409,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
77629 }
77630
77631 if (pn->rx_credits == 0) {
77632 - atomic_inc(&sk->sk_drops);
77633 + atomic_inc_unchecked(&sk->sk_drops);
77634 err = -ENOBUFS;
77635 break;
77636 }
77637 @@ -580,7 +580,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
77638 }
77639
77640 if (pn->rx_credits == 0) {
77641 - atomic_inc(&sk->sk_drops);
77642 + atomic_inc_unchecked(&sk->sk_drops);
77643 err = NET_RX_DROP;
77644 break;
77645 }
77646 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
77647 index 4c7eff3..59c727f 100644
77648 --- a/net/phonet/socket.c
77649 +++ b/net/phonet/socket.c
77650 @@ -613,8 +613,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
77651 pn->resource, sk->sk_state,
77652 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
77653 sock_i_uid(sk), sock_i_ino(sk),
77654 - atomic_read(&sk->sk_refcnt), sk,
77655 - atomic_read(&sk->sk_drops), &len);
77656 + atomic_read(&sk->sk_refcnt),
77657 +#ifdef CONFIG_GRKERNSEC_HIDESYM
77658 + NULL,
77659 +#else
77660 + sk,
77661 +#endif
77662 + atomic_read_unchecked(&sk->sk_drops), &len);
77663 }
77664 seq_printf(seq, "%*s\n", 127 - len, "");
77665 return 0;
77666 diff --git a/net/rds/cong.c b/net/rds/cong.c
77667 index e5b65ac..f3b6fb7 100644
77668 --- a/net/rds/cong.c
77669 +++ b/net/rds/cong.c
77670 @@ -78,7 +78,7 @@
77671 * finds that the saved generation number is smaller than the global generation
77672 * number, it wakes up the process.
77673 */
77674 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
77675 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
77676
77677 /*
77678 * Congestion monitoring
77679 @@ -233,7 +233,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
77680 rdsdebug("waking map %p for %pI4\n",
77681 map, &map->m_addr);
77682 rds_stats_inc(s_cong_update_received);
77683 - atomic_inc(&rds_cong_generation);
77684 + atomic_inc_unchecked(&rds_cong_generation);
77685 if (waitqueue_active(&map->m_waitq))
77686 wake_up(&map->m_waitq);
77687 if (waitqueue_active(&rds_poll_waitq))
77688 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
77689
77690 int rds_cong_updated_since(unsigned long *recent)
77691 {
77692 - unsigned long gen = atomic_read(&rds_cong_generation);
77693 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
77694
77695 if (likely(*recent == gen))
77696 return 0;
77697 diff --git a/net/rds/ib.h b/net/rds/ib.h
77698 index edfaaaf..8c89879 100644
77699 --- a/net/rds/ib.h
77700 +++ b/net/rds/ib.h
77701 @@ -128,7 +128,7 @@ struct rds_ib_connection {
77702 /* sending acks */
77703 unsigned long i_ack_flags;
77704 #ifdef KERNEL_HAS_ATOMIC64
77705 - atomic64_t i_ack_next; /* next ACK to send */
77706 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
77707 #else
77708 spinlock_t i_ack_lock; /* protect i_ack_next */
77709 u64 i_ack_next; /* next ACK to send */
77710 diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
77711 index 51c8689..36c555f 100644
77712 --- a/net/rds/ib_cm.c
77713 +++ b/net/rds/ib_cm.c
77714 @@ -718,7 +718,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
77715 /* Clear the ACK state */
77716 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
77717 #ifdef KERNEL_HAS_ATOMIC64
77718 - atomic64_set(&ic->i_ack_next, 0);
77719 + atomic64_set_unchecked(&ic->i_ack_next, 0);
77720 #else
77721 ic->i_ack_next = 0;
77722 #endif
77723 diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
77724 index e29e0ca..fa3a6a3 100644
77725 --- a/net/rds/ib_recv.c
77726 +++ b/net/rds/ib_recv.c
77727 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
77728 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
77729 int ack_required)
77730 {
77731 - atomic64_set(&ic->i_ack_next, seq);
77732 + atomic64_set_unchecked(&ic->i_ack_next, seq);
77733 if (ack_required) {
77734 smp_mb__before_clear_bit();
77735 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
77736 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
77737 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
77738 smp_mb__after_clear_bit();
77739
77740 - return atomic64_read(&ic->i_ack_next);
77741 + return atomic64_read_unchecked(&ic->i_ack_next);
77742 }
77743 #endif
77744
77745 diff --git a/net/rds/iw.h b/net/rds/iw.h
77746 index 04ce3b1..48119a6 100644
77747 --- a/net/rds/iw.h
77748 +++ b/net/rds/iw.h
77749 @@ -134,7 +134,7 @@ struct rds_iw_connection {
77750 /* sending acks */
77751 unsigned long i_ack_flags;
77752 #ifdef KERNEL_HAS_ATOMIC64
77753 - atomic64_t i_ack_next; /* next ACK to send */
77754 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
77755 #else
77756 spinlock_t i_ack_lock; /* protect i_ack_next */
77757 u64 i_ack_next; /* next ACK to send */
77758 diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
77759 index 9556d28..f046d0e 100644
77760 --- a/net/rds/iw_cm.c
77761 +++ b/net/rds/iw_cm.c
77762 @@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_connection *conn)
77763 /* Clear the ACK state */
77764 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
77765 #ifdef KERNEL_HAS_ATOMIC64
77766 - atomic64_set(&ic->i_ack_next, 0);
77767 + atomic64_set_unchecked(&ic->i_ack_next, 0);
77768 #else
77769 ic->i_ack_next = 0;
77770 #endif
77771 diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
77772 index 5e57347..3916042 100644
77773 --- a/net/rds/iw_recv.c
77774 +++ b/net/rds/iw_recv.c
77775 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
77776 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
77777 int ack_required)
77778 {
77779 - atomic64_set(&ic->i_ack_next, seq);
77780 + atomic64_set_unchecked(&ic->i_ack_next, seq);
77781 if (ack_required) {
77782 smp_mb__before_clear_bit();
77783 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
77784 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
77785 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
77786 smp_mb__after_clear_bit();
77787
77788 - return atomic64_read(&ic->i_ack_next);
77789 + return atomic64_read_unchecked(&ic->i_ack_next);
77790 }
77791 #endif
77792
77793 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
77794 index edac9ef..16bcb98 100644
77795 --- a/net/rds/tcp.c
77796 +++ b/net/rds/tcp.c
77797 @@ -59,7 +59,7 @@ void rds_tcp_nonagle(struct socket *sock)
77798 int val = 1;
77799
77800 set_fs(KERNEL_DS);
77801 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
77802 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
77803 sizeof(val));
77804 set_fs(oldfs);
77805 }
77806 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
77807 index 1b4fd68..2234175 100644
77808 --- a/net/rds/tcp_send.c
77809 +++ b/net/rds/tcp_send.c
77810 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
77811
77812 oldfs = get_fs();
77813 set_fs(KERNEL_DS);
77814 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
77815 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
77816 sizeof(val));
77817 set_fs(oldfs);
77818 }
77819 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
77820 index 74c064c..fdec26f 100644
77821 --- a/net/rxrpc/af_rxrpc.c
77822 +++ b/net/rxrpc/af_rxrpc.c
77823 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_ops;
77824 __be32 rxrpc_epoch;
77825
77826 /* current debugging ID */
77827 -atomic_t rxrpc_debug_id;
77828 +atomic_unchecked_t rxrpc_debug_id;
77829
77830 /* count of skbs currently in use */
77831 atomic_t rxrpc_n_skbs;
77832 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
77833 index c3126e8..21facc7 100644
77834 --- a/net/rxrpc/ar-ack.c
77835 +++ b/net/rxrpc/ar-ack.c
77836 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
77837
77838 _enter("{%d,%d,%d,%d},",
77839 call->acks_hard, call->acks_unacked,
77840 - atomic_read(&call->sequence),
77841 + atomic_read_unchecked(&call->sequence),
77842 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
77843
77844 stop = 0;
77845 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
77846
77847 /* each Tx packet has a new serial number */
77848 sp->hdr.serial =
77849 - htonl(atomic_inc_return(&call->conn->serial));
77850 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
77851
77852 hdr = (struct rxrpc_header *) txb->head;
77853 hdr->serial = sp->hdr.serial;
77854 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
77855 */
77856 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
77857 {
77858 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
77859 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
77860 }
77861
77862 /*
77863 @@ -629,7 +629,7 @@ process_further:
77864
77865 latest = ntohl(sp->hdr.serial);
77866 hard = ntohl(ack.firstPacket);
77867 - tx = atomic_read(&call->sequence);
77868 + tx = atomic_read_unchecked(&call->sequence);
77869
77870 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
77871 latest,
77872 @@ -1161,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
77873 goto maybe_reschedule;
77874
77875 send_ACK_with_skew:
77876 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
77877 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
77878 ntohl(ack.serial));
77879 send_ACK:
77880 mtu = call->conn->trans->peer->if_mtu;
77881 @@ -1173,7 +1173,7 @@ send_ACK:
77882 ackinfo.rxMTU = htonl(5692);
77883 ackinfo.jumbo_max = htonl(4);
77884
77885 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
77886 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
77887 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
77888 ntohl(hdr.serial),
77889 ntohs(ack.maxSkew),
77890 @@ -1191,7 +1191,7 @@ send_ACK:
77891 send_message:
77892 _debug("send message");
77893
77894 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
77895 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
77896 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
77897 send_message_2:
77898
77899 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
77900 index bf656c2..48f9d27 100644
77901 --- a/net/rxrpc/ar-call.c
77902 +++ b/net/rxrpc/ar-call.c
77903 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
77904 spin_lock_init(&call->lock);
77905 rwlock_init(&call->state_lock);
77906 atomic_set(&call->usage, 1);
77907 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
77908 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77909 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
77910
77911 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
77912 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
77913 index 4106ca9..a338d7a 100644
77914 --- a/net/rxrpc/ar-connection.c
77915 +++ b/net/rxrpc/ar-connection.c
77916 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
77917 rwlock_init(&conn->lock);
77918 spin_lock_init(&conn->state_lock);
77919 atomic_set(&conn->usage, 1);
77920 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
77921 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77922 conn->avail_calls = RXRPC_MAXCALLS;
77923 conn->size_align = 4;
77924 conn->header_size = sizeof(struct rxrpc_header);
77925 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
77926 index e7ed43a..6afa140 100644
77927 --- a/net/rxrpc/ar-connevent.c
77928 +++ b/net/rxrpc/ar-connevent.c
77929 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
77930
77931 len = iov[0].iov_len + iov[1].iov_len;
77932
77933 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
77934 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
77935 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
77936
77937 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
77938 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
77939 index 1a2b0633..e8d1382 100644
77940 --- a/net/rxrpc/ar-input.c
77941 +++ b/net/rxrpc/ar-input.c
77942 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
77943 /* track the latest serial number on this connection for ACK packet
77944 * information */
77945 serial = ntohl(sp->hdr.serial);
77946 - hi_serial = atomic_read(&call->conn->hi_serial);
77947 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
77948 while (serial > hi_serial)
77949 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
77950 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
77951 serial);
77952
77953 /* request ACK generation for any ACK or DATA packet that requests
77954 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
77955 index 8e22bd3..f66d1c0 100644
77956 --- a/net/rxrpc/ar-internal.h
77957 +++ b/net/rxrpc/ar-internal.h
77958 @@ -272,8 +272,8 @@ struct rxrpc_connection {
77959 int error; /* error code for local abort */
77960 int debug_id; /* debug ID for printks */
77961 unsigned call_counter; /* call ID counter */
77962 - atomic_t serial; /* packet serial number counter */
77963 - atomic_t hi_serial; /* highest serial number received */
77964 + atomic_unchecked_t serial; /* packet serial number counter */
77965 + atomic_unchecked_t hi_serial; /* highest serial number received */
77966 u8 avail_calls; /* number of calls available */
77967 u8 size_align; /* data size alignment (for security) */
77968 u8 header_size; /* rxrpc + security header size */
77969 @@ -346,7 +346,7 @@ struct rxrpc_call {
77970 spinlock_t lock;
77971 rwlock_t state_lock; /* lock for state transition */
77972 atomic_t usage;
77973 - atomic_t sequence; /* Tx data packet sequence counter */
77974 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
77975 u32 abort_code; /* local/remote abort code */
77976 enum { /* current state of call */
77977 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
77978 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
77979 */
77980 extern atomic_t rxrpc_n_skbs;
77981 extern __be32 rxrpc_epoch;
77982 -extern atomic_t rxrpc_debug_id;
77983 +extern atomic_unchecked_t rxrpc_debug_id;
77984 extern struct workqueue_struct *rxrpc_workqueue;
77985
77986 /*
77987 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
77988 index 87f7135..74d3703 100644
77989 --- a/net/rxrpc/ar-local.c
77990 +++ b/net/rxrpc/ar-local.c
77991 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
77992 spin_lock_init(&local->lock);
77993 rwlock_init(&local->services_lock);
77994 atomic_set(&local->usage, 1);
77995 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
77996 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77997 memcpy(&local->srx, srx, sizeof(*srx));
77998 }
77999
78000 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
78001 index 16ae887..d24f12b 100644
78002 --- a/net/rxrpc/ar-output.c
78003 +++ b/net/rxrpc/ar-output.c
78004 @@ -682,9 +682,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
78005 sp->hdr.cid = call->cid;
78006 sp->hdr.callNumber = call->call_id;
78007 sp->hdr.seq =
78008 - htonl(atomic_inc_return(&call->sequence));
78009 + htonl(atomic_inc_return_unchecked(&call->sequence));
78010 sp->hdr.serial =
78011 - htonl(atomic_inc_return(&conn->serial));
78012 + htonl(atomic_inc_return_unchecked(&conn->serial));
78013 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
78014 sp->hdr.userStatus = 0;
78015 sp->hdr.securityIndex = conn->security_ix;
78016 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
78017 index 2754f09..b20e38f 100644
78018 --- a/net/rxrpc/ar-peer.c
78019 +++ b/net/rxrpc/ar-peer.c
78020 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
78021 INIT_LIST_HEAD(&peer->error_targets);
78022 spin_lock_init(&peer->lock);
78023 atomic_set(&peer->usage, 1);
78024 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
78025 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
78026 memcpy(&peer->srx, srx, sizeof(*srx));
78027
78028 rxrpc_assess_MTU_size(peer);
78029 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
78030 index 38047f7..9f48511 100644
78031 --- a/net/rxrpc/ar-proc.c
78032 +++ b/net/rxrpc/ar-proc.c
78033 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
78034 atomic_read(&conn->usage),
78035 rxrpc_conn_states[conn->state],
78036 key_serial(conn->key),
78037 - atomic_read(&conn->serial),
78038 - atomic_read(&conn->hi_serial));
78039 + atomic_read_unchecked(&conn->serial),
78040 + atomic_read_unchecked(&conn->hi_serial));
78041
78042 return 0;
78043 }
78044 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
78045 index 92df566..87ec1bf 100644
78046 --- a/net/rxrpc/ar-transport.c
78047 +++ b/net/rxrpc/ar-transport.c
78048 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
78049 spin_lock_init(&trans->client_lock);
78050 rwlock_init(&trans->conn_lock);
78051 atomic_set(&trans->usage, 1);
78052 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
78053 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
78054
78055 if (peer->srx.transport.family == AF_INET) {
78056 switch (peer->srx.transport_type) {
78057 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
78058 index 7635107..4670276 100644
78059 --- a/net/rxrpc/rxkad.c
78060 +++ b/net/rxrpc/rxkad.c
78061 @@ -610,7 +610,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
78062
78063 len = iov[0].iov_len + iov[1].iov_len;
78064
78065 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
78066 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
78067 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
78068
78069 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
78070 @@ -660,7 +660,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
78071
78072 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
78073
78074 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
78075 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
78076 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
78077
78078 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
78079 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
78080 index 1e2eee8..ce3967e 100644
78081 --- a/net/sctp/proc.c
78082 +++ b/net/sctp/proc.c
78083 @@ -319,7 +319,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
78084 seq_printf(seq,
78085 "%8pK %8pK %-3d %-3d %-2d %-4d "
78086 "%4d %8d %8d %7d %5lu %-5d %5d ",
78087 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
78088 + assoc, sk,
78089 + sctp_sk(sk)->type, sk->sk_state,
78090 assoc->state, hash,
78091 assoc->assoc_id,
78092 assoc->sndbuf_used,
78093 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
78094 index d043722..6903416 100644
78095 --- a/net/sctp/socket.c
78096 +++ b/net/sctp/socket.c
78097 @@ -4575,7 +4575,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
78098 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
78099 if (space_left < addrlen)
78100 return -ENOMEM;
78101 - if (copy_to_user(to, &temp, addrlen))
78102 + if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
78103 return -EFAULT;
78104 to += addrlen;
78105 cnt++;
78106 diff --git a/net/socket.c b/net/socket.c
78107 index 0de4131..7e7ddab 100644
78108 --- a/net/socket.c
78109 +++ b/net/socket.c
78110 @@ -88,6 +88,7 @@
78111 #include <linux/nsproxy.h>
78112 #include <linux/magic.h>
78113 #include <linux/slab.h>
78114 +#include <linux/in.h>
78115
78116 #include <asm/uaccess.h>
78117 #include <asm/unistd.h>
78118 @@ -105,6 +106,8 @@
78119 #include <linux/sockios.h>
78120 #include <linux/atalk.h>
78121
78122 +#include <linux/grsock.h>
78123 +
78124 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
78125 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
78126 unsigned long nr_segs, loff_t pos);
78127 @@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type,
78128 &sockfs_dentry_operations, SOCKFS_MAGIC);
78129 }
78130
78131 -static struct vfsmount *sock_mnt __read_mostly;
78132 +struct vfsmount *sock_mnt __read_mostly;
78133
78134 static struct file_system_type sock_fs_type = {
78135 .name = "sockfs",
78136 @@ -1207,6 +1210,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
78137 return -EAFNOSUPPORT;
78138 if (type < 0 || type >= SOCK_MAX)
78139 return -EINVAL;
78140 + if (protocol < 0)
78141 + return -EINVAL;
78142
78143 /* Compatibility.
78144
78145 @@ -1339,6 +1344,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
78146 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
78147 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
78148
78149 + if(!gr_search_socket(family, type, protocol)) {
78150 + retval = -EACCES;
78151 + goto out;
78152 + }
78153 +
78154 + if (gr_handle_sock_all(family, type, protocol)) {
78155 + retval = -EACCES;
78156 + goto out;
78157 + }
78158 +
78159 retval = sock_create(family, type, protocol, &sock);
78160 if (retval < 0)
78161 goto out;
78162 @@ -1451,6 +1466,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
78163 if (sock) {
78164 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
78165 if (err >= 0) {
78166 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
78167 + err = -EACCES;
78168 + goto error;
78169 + }
78170 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
78171 + if (err)
78172 + goto error;
78173 +
78174 err = security_socket_bind(sock,
78175 (struct sockaddr *)&address,
78176 addrlen);
78177 @@ -1459,6 +1482,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
78178 (struct sockaddr *)
78179 &address, addrlen);
78180 }
78181 +error:
78182 fput_light(sock->file, fput_needed);
78183 }
78184 return err;
78185 @@ -1482,10 +1506,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
78186 if ((unsigned)backlog > somaxconn)
78187 backlog = somaxconn;
78188
78189 + if (gr_handle_sock_server_other(sock->sk)) {
78190 + err = -EPERM;
78191 + goto error;
78192 + }
78193 +
78194 + err = gr_search_listen(sock);
78195 + if (err)
78196 + goto error;
78197 +
78198 err = security_socket_listen(sock, backlog);
78199 if (!err)
78200 err = sock->ops->listen(sock, backlog);
78201
78202 +error:
78203 fput_light(sock->file, fput_needed);
78204 }
78205 return err;
78206 @@ -1529,6 +1563,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
78207 newsock->type = sock->type;
78208 newsock->ops = sock->ops;
78209
78210 + if (gr_handle_sock_server_other(sock->sk)) {
78211 + err = -EPERM;
78212 + sock_release(newsock);
78213 + goto out_put;
78214 + }
78215 +
78216 + err = gr_search_accept(sock);
78217 + if (err) {
78218 + sock_release(newsock);
78219 + goto out_put;
78220 + }
78221 +
78222 /*
78223 * We don't need try_module_get here, as the listening socket (sock)
78224 * has the protocol module (sock->ops->owner) held.
78225 @@ -1567,6 +1613,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
78226 fd_install(newfd, newfile);
78227 err = newfd;
78228
78229 + gr_attach_curr_ip(newsock->sk);
78230 +
78231 out_put:
78232 fput_light(sock->file, fput_needed);
78233 out:
78234 @@ -1599,6 +1647,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
78235 int, addrlen)
78236 {
78237 struct socket *sock;
78238 + struct sockaddr *sck;
78239 struct sockaddr_storage address;
78240 int err, fput_needed;
78241
78242 @@ -1609,6 +1658,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
78243 if (err < 0)
78244 goto out_put;
78245
78246 + sck = (struct sockaddr *)&address;
78247 +
78248 + if (gr_handle_sock_client(sck)) {
78249 + err = -EACCES;
78250 + goto out_put;
78251 + }
78252 +
78253 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
78254 + if (err)
78255 + goto out_put;
78256 +
78257 err =
78258 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
78259 if (err)
78260 @@ -1970,7 +2030,7 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
78261 * checking falls down on this.
78262 */
78263 if (copy_from_user(ctl_buf,
78264 - (void __user __force *)msg_sys->msg_control,
78265 + (void __force_user *)msg_sys->msg_control,
78266 ctl_len))
78267 goto out_freectl;
78268 msg_sys->msg_control = ctl_buf;
78269 @@ -2140,7 +2200,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
78270 * kernel msghdr to use the kernel address space)
78271 */
78272
78273 - uaddr = (__force void __user *)msg_sys->msg_name;
78274 + uaddr = (void __force_user *)msg_sys->msg_name;
78275 uaddr_len = COMPAT_NAMELEN(msg);
78276 if (MSG_CMSG_COMPAT & flags) {
78277 err = verify_compat_iovec(msg_sys, iov,
78278 @@ -2768,7 +2828,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
78279 }
78280
78281 ifr = compat_alloc_user_space(buf_size);
78282 - rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8);
78283 + rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
78284
78285 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
78286 return -EFAULT;
78287 @@ -2792,12 +2852,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
78288 offsetof(struct ethtool_rxnfc, fs.ring_cookie));
78289
78290 if (copy_in_user(rxnfc, compat_rxnfc,
78291 - (void *)(&rxnfc->fs.m_ext + 1) -
78292 - (void *)rxnfc) ||
78293 + (void __user *)(&rxnfc->fs.m_ext + 1) -
78294 + (void __user *)rxnfc) ||
78295 copy_in_user(&rxnfc->fs.ring_cookie,
78296 &compat_rxnfc->fs.ring_cookie,
78297 - (void *)(&rxnfc->fs.location + 1) -
78298 - (void *)&rxnfc->fs.ring_cookie) ||
78299 + (void __user *)(&rxnfc->fs.location + 1) -
78300 + (void __user *)&rxnfc->fs.ring_cookie) ||
78301 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
78302 sizeof(rxnfc->rule_cnt)))
78303 return -EFAULT;
78304 @@ -2809,12 +2869,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
78305
78306 if (convert_out) {
78307 if (copy_in_user(compat_rxnfc, rxnfc,
78308 - (const void *)(&rxnfc->fs.m_ext + 1) -
78309 - (const void *)rxnfc) ||
78310 + (const void __user *)(&rxnfc->fs.m_ext + 1) -
78311 + (const void __user *)rxnfc) ||
78312 copy_in_user(&compat_rxnfc->fs.ring_cookie,
78313 &rxnfc->fs.ring_cookie,
78314 - (const void *)(&rxnfc->fs.location + 1) -
78315 - (const void *)&rxnfc->fs.ring_cookie) ||
78316 + (const void __user *)(&rxnfc->fs.location + 1) -
78317 + (const void __user *)&rxnfc->fs.ring_cookie) ||
78318 copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
78319 sizeof(rxnfc->rule_cnt)))
78320 return -EFAULT;
78321 @@ -2884,7 +2944,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
78322 old_fs = get_fs();
78323 set_fs(KERNEL_DS);
78324 err = dev_ioctl(net, cmd,
78325 - (struct ifreq __user __force *) &kifr);
78326 + (struct ifreq __force_user *) &kifr);
78327 set_fs(old_fs);
78328
78329 return err;
78330 @@ -2993,7 +3053,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
78331
78332 old_fs = get_fs();
78333 set_fs(KERNEL_DS);
78334 - err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
78335 + err = dev_ioctl(net, cmd, (void __force_user *)&ifr);
78336 set_fs(old_fs);
78337
78338 if (cmd == SIOCGIFMAP && !err) {
78339 @@ -3098,7 +3158,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
78340 ret |= __get_user(rtdev, &(ur4->rt_dev));
78341 if (rtdev) {
78342 ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
78343 - r4.rt_dev = (char __user __force *)devname;
78344 + r4.rt_dev = (char __force_user *)devname;
78345 devname[15] = 0;
78346 } else
78347 r4.rt_dev = NULL;
78348 @@ -3324,8 +3384,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
78349 int __user *uoptlen;
78350 int err;
78351
78352 - uoptval = (char __user __force *) optval;
78353 - uoptlen = (int __user __force *) optlen;
78354 + uoptval = (char __force_user *) optval;
78355 + uoptlen = (int __force_user *) optlen;
78356
78357 set_fs(KERNEL_DS);
78358 if (level == SOL_SOCKET)
78359 @@ -3345,7 +3405,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
78360 char __user *uoptval;
78361 int err;
78362
78363 - uoptval = (char __user __force *) optval;
78364 + uoptval = (char __force_user *) optval;
78365
78366 set_fs(KERNEL_DS);
78367 if (level == SOL_SOCKET)
78368 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
78369 index 8efd96c..b492ab2 100644
78370 --- a/net/sunrpc/sched.c
78371 +++ b/net/sunrpc/sched.c
78372 @@ -239,9 +239,9 @@ static int rpc_wait_bit_killable(void *word)
78373 #ifdef RPC_DEBUG
78374 static void rpc_task_set_debuginfo(struct rpc_task *task)
78375 {
78376 - static atomic_t rpc_pid;
78377 + static atomic_unchecked_t rpc_pid;
78378
78379 - task->tk_pid = atomic_inc_return(&rpc_pid);
78380 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
78381 }
78382 #else
78383 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
78384 diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
78385 index 4645709..d41d668 100644
78386 --- a/net/sunrpc/svcsock.c
78387 +++ b/net/sunrpc/svcsock.c
78388 @@ -396,7 +396,7 @@ static int svc_partial_recvfrom(struct svc_rqst *rqstp,
78389 int buflen, unsigned int base)
78390 {
78391 size_t save_iovlen;
78392 - void __user *save_iovbase;
78393 + void *save_iovbase;
78394 unsigned int i;
78395 int ret;
78396
78397 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
78398 index 09af4fa..77110a9 100644
78399 --- a/net/sunrpc/xprtrdma/svc_rdma.c
78400 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
78401 @@ -61,15 +61,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
78402 static unsigned int min_max_inline = 4096;
78403 static unsigned int max_max_inline = 65536;
78404
78405 -atomic_t rdma_stat_recv;
78406 -atomic_t rdma_stat_read;
78407 -atomic_t rdma_stat_write;
78408 -atomic_t rdma_stat_sq_starve;
78409 -atomic_t rdma_stat_rq_starve;
78410 -atomic_t rdma_stat_rq_poll;
78411 -atomic_t rdma_stat_rq_prod;
78412 -atomic_t rdma_stat_sq_poll;
78413 -atomic_t rdma_stat_sq_prod;
78414 +atomic_unchecked_t rdma_stat_recv;
78415 +atomic_unchecked_t rdma_stat_read;
78416 +atomic_unchecked_t rdma_stat_write;
78417 +atomic_unchecked_t rdma_stat_sq_starve;
78418 +atomic_unchecked_t rdma_stat_rq_starve;
78419 +atomic_unchecked_t rdma_stat_rq_poll;
78420 +atomic_unchecked_t rdma_stat_rq_prod;
78421 +atomic_unchecked_t rdma_stat_sq_poll;
78422 +atomic_unchecked_t rdma_stat_sq_prod;
78423
78424 /* Temporary NFS request map and context caches */
78425 struct kmem_cache *svc_rdma_map_cachep;
78426 @@ -109,7 +109,7 @@ static int read_reset_stat(ctl_table *table, int write,
78427 len -= *ppos;
78428 if (len > *lenp)
78429 len = *lenp;
78430 - if (len && copy_to_user(buffer, str_buf, len))
78431 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
78432 return -EFAULT;
78433 *lenp = len;
78434 *ppos += len;
78435 @@ -150,63 +150,63 @@ static ctl_table svcrdma_parm_table[] = {
78436 {
78437 .procname = "rdma_stat_read",
78438 .data = &rdma_stat_read,
78439 - .maxlen = sizeof(atomic_t),
78440 + .maxlen = sizeof(atomic_unchecked_t),
78441 .mode = 0644,
78442 .proc_handler = read_reset_stat,
78443 },
78444 {
78445 .procname = "rdma_stat_recv",
78446 .data = &rdma_stat_recv,
78447 - .maxlen = sizeof(atomic_t),
78448 + .maxlen = sizeof(atomic_unchecked_t),
78449 .mode = 0644,
78450 .proc_handler = read_reset_stat,
78451 },
78452 {
78453 .procname = "rdma_stat_write",
78454 .data = &rdma_stat_write,
78455 - .maxlen = sizeof(atomic_t),
78456 + .maxlen = sizeof(atomic_unchecked_t),
78457 .mode = 0644,
78458 .proc_handler = read_reset_stat,
78459 },
78460 {
78461 .procname = "rdma_stat_sq_starve",
78462 .data = &rdma_stat_sq_starve,
78463 - .maxlen = sizeof(atomic_t),
78464 + .maxlen = sizeof(atomic_unchecked_t),
78465 .mode = 0644,
78466 .proc_handler = read_reset_stat,
78467 },
78468 {
78469 .procname = "rdma_stat_rq_starve",
78470 .data = &rdma_stat_rq_starve,
78471 - .maxlen = sizeof(atomic_t),
78472 + .maxlen = sizeof(atomic_unchecked_t),
78473 .mode = 0644,
78474 .proc_handler = read_reset_stat,
78475 },
78476 {
78477 .procname = "rdma_stat_rq_poll",
78478 .data = &rdma_stat_rq_poll,
78479 - .maxlen = sizeof(atomic_t),
78480 + .maxlen = sizeof(atomic_unchecked_t),
78481 .mode = 0644,
78482 .proc_handler = read_reset_stat,
78483 },
78484 {
78485 .procname = "rdma_stat_rq_prod",
78486 .data = &rdma_stat_rq_prod,
78487 - .maxlen = sizeof(atomic_t),
78488 + .maxlen = sizeof(atomic_unchecked_t),
78489 .mode = 0644,
78490 .proc_handler = read_reset_stat,
78491 },
78492 {
78493 .procname = "rdma_stat_sq_poll",
78494 .data = &rdma_stat_sq_poll,
78495 - .maxlen = sizeof(atomic_t),
78496 + .maxlen = sizeof(atomic_unchecked_t),
78497 .mode = 0644,
78498 .proc_handler = read_reset_stat,
78499 },
78500 {
78501 .procname = "rdma_stat_sq_prod",
78502 .data = &rdma_stat_sq_prod,
78503 - .maxlen = sizeof(atomic_t),
78504 + .maxlen = sizeof(atomic_unchecked_t),
78505 .mode = 0644,
78506 .proc_handler = read_reset_stat,
78507 },
78508 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
78509 index df67211..c354b13 100644
78510 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
78511 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
78512 @@ -499,7 +499,7 @@ next_sge:
78513 svc_rdma_put_context(ctxt, 0);
78514 goto out;
78515 }
78516 - atomic_inc(&rdma_stat_read);
78517 + atomic_inc_unchecked(&rdma_stat_read);
78518
78519 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
78520 chl_map->ch[ch_no].count -= read_wr.num_sge;
78521 @@ -609,7 +609,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
78522 dto_q);
78523 list_del_init(&ctxt->dto_q);
78524 } else {
78525 - atomic_inc(&rdma_stat_rq_starve);
78526 + atomic_inc_unchecked(&rdma_stat_rq_starve);
78527 clear_bit(XPT_DATA, &xprt->xpt_flags);
78528 ctxt = NULL;
78529 }
78530 @@ -629,7 +629,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
78531 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
78532 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
78533 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
78534 - atomic_inc(&rdma_stat_recv);
78535 + atomic_inc_unchecked(&rdma_stat_recv);
78536
78537 /* Build up the XDR from the receive buffers. */
78538 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
78539 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
78540 index 249a835..fb2794b 100644
78541 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
78542 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
78543 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
78544 write_wr.wr.rdma.remote_addr = to;
78545
78546 /* Post It */
78547 - atomic_inc(&rdma_stat_write);
78548 + atomic_inc_unchecked(&rdma_stat_write);
78549 if (svc_rdma_send(xprt, &write_wr))
78550 goto err;
78551 return 0;
78552 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
78553 index 894cb42..cf5bafb 100644
78554 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
78555 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
78556 @@ -300,7 +300,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
78557 return;
78558
78559 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
78560 - atomic_inc(&rdma_stat_rq_poll);
78561 + atomic_inc_unchecked(&rdma_stat_rq_poll);
78562
78563 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
78564 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
78565 @@ -322,7 +322,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
78566 }
78567
78568 if (ctxt)
78569 - atomic_inc(&rdma_stat_rq_prod);
78570 + atomic_inc_unchecked(&rdma_stat_rq_prod);
78571
78572 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
78573 /*
78574 @@ -394,7 +394,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
78575 return;
78576
78577 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
78578 - atomic_inc(&rdma_stat_sq_poll);
78579 + atomic_inc_unchecked(&rdma_stat_sq_poll);
78580 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
78581 if (wc.status != IB_WC_SUCCESS)
78582 /* Close the transport */
78583 @@ -412,7 +412,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
78584 }
78585
78586 if (ctxt)
78587 - atomic_inc(&rdma_stat_sq_prod);
78588 + atomic_inc_unchecked(&rdma_stat_sq_prod);
78589 }
78590
78591 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
78592 @@ -1274,7 +1274,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
78593 spin_lock_bh(&xprt->sc_lock);
78594 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
78595 spin_unlock_bh(&xprt->sc_lock);
78596 - atomic_inc(&rdma_stat_sq_starve);
78597 + atomic_inc_unchecked(&rdma_stat_sq_starve);
78598
78599 /* See if we can opportunistically reap SQ WR to make room */
78600 sq_cq_reap(xprt);
78601 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
78602 index e758139..d29ea47 100644
78603 --- a/net/sysctl_net.c
78604 +++ b/net/sysctl_net.c
78605 @@ -47,7 +47,7 @@ static int net_ctl_permissions(struct ctl_table_root *root,
78606 struct ctl_table *table)
78607 {
78608 /* Allow network administrator to have same access as root. */
78609 - if (capable(CAP_NET_ADMIN)) {
78610 + if (capable_nolog(CAP_NET_ADMIN)) {
78611 int mode = (table->mode >> 6) & 7;
78612 return (mode << 6) | (mode << 3) | mode;
78613 }
78614 diff --git a/net/tipc/link.c b/net/tipc/link.c
78615 index ac1832a..533ed97 100644
78616 --- a/net/tipc/link.c
78617 +++ b/net/tipc/link.c
78618 @@ -1205,7 +1205,7 @@ static int link_send_sections_long(struct tipc_port *sender,
78619 struct tipc_msg fragm_hdr;
78620 struct sk_buff *buf, *buf_chain, *prev;
78621 u32 fragm_crs, fragm_rest, hsz, sect_rest;
78622 - const unchar *sect_crs;
78623 + const unchar __user *sect_crs;
78624 int curr_sect;
78625 u32 fragm_no;
78626
78627 @@ -1249,7 +1249,7 @@ again:
78628
78629 if (!sect_rest) {
78630 sect_rest = msg_sect[++curr_sect].iov_len;
78631 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
78632 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
78633 }
78634
78635 if (sect_rest < fragm_rest)
78636 @@ -1268,7 +1268,7 @@ error:
78637 }
78638 } else
78639 skb_copy_to_linear_data_offset(buf, fragm_crs,
78640 - sect_crs, sz);
78641 + (const void __force_kernel *)sect_crs, sz);
78642 sect_crs += sz;
78643 sect_rest -= sz;
78644 fragm_crs += sz;
78645 diff --git a/net/tipc/msg.c b/net/tipc/msg.c
78646 index 3e4d3e2..27b55dc 100644
78647 --- a/net/tipc/msg.c
78648 +++ b/net/tipc/msg.c
78649 @@ -99,7 +99,7 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
78650 msg_sect[cnt].iov_len);
78651 else
78652 skb_copy_to_linear_data_offset(*buf, pos,
78653 - msg_sect[cnt].iov_base,
78654 + (const void __force_kernel *)msg_sect[cnt].iov_base,
78655 msg_sect[cnt].iov_len);
78656 pos += msg_sect[cnt].iov_len;
78657 }
78658 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
78659 index 8c49566..14510cb 100644
78660 --- a/net/tipc/subscr.c
78661 +++ b/net/tipc/subscr.c
78662 @@ -101,7 +101,7 @@ static void subscr_send_event(struct tipc_subscription *sub,
78663 {
78664 struct iovec msg_sect;
78665
78666 - msg_sect.iov_base = (void *)&sub->evt;
78667 + msg_sect.iov_base = (void __force_user *)&sub->evt;
78668 msg_sect.iov_len = sizeof(struct tipc_event);
78669
78670 sub->evt.event = htohl(event, sub->swap);
78671 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
78672 index 85d3bb7..79f4487 100644
78673 --- a/net/unix/af_unix.c
78674 +++ b/net/unix/af_unix.c
78675 @@ -770,6 +770,12 @@ static struct sock *unix_find_other(struct net *net,
78676 err = -ECONNREFUSED;
78677 if (!S_ISSOCK(inode->i_mode))
78678 goto put_fail;
78679 +
78680 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
78681 + err = -EACCES;
78682 + goto put_fail;
78683 + }
78684 +
78685 u = unix_find_socket_byinode(inode);
78686 if (!u)
78687 goto put_fail;
78688 @@ -790,6 +796,13 @@ static struct sock *unix_find_other(struct net *net,
78689 if (u) {
78690 struct dentry *dentry;
78691 dentry = unix_sk(u)->dentry;
78692 +
78693 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
78694 + err = -EPERM;
78695 + sock_put(u);
78696 + goto fail;
78697 + }
78698 +
78699 if (dentry)
78700 touch_atime(unix_sk(u)->mnt, dentry);
78701 } else
78702 @@ -872,11 +885,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
78703 err = security_path_mknod(&path, dentry, mode, 0);
78704 if (err)
78705 goto out_mknod_drop_write;
78706 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
78707 + err = -EACCES;
78708 + goto out_mknod_drop_write;
78709 + }
78710 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
78711 out_mknod_drop_write:
78712 mnt_drop_write(path.mnt);
78713 if (err)
78714 goto out_mknod_dput;
78715 +
78716 + gr_handle_create(dentry, path.mnt);
78717 +
78718 mutex_unlock(&path.dentry->d_inode->i_mutex);
78719 dput(path.dentry);
78720 path.dentry = dentry;
78721 diff --git a/net/wireless/core.h b/net/wireless/core.h
78722 index 43ad9c8..ab5127c 100644
78723 --- a/net/wireless/core.h
78724 +++ b/net/wireless/core.h
78725 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
78726 struct mutex mtx;
78727
78728 /* rfkill support */
78729 - struct rfkill_ops rfkill_ops;
78730 + rfkill_ops_no_const rfkill_ops;
78731 struct rfkill *rfkill;
78732 struct work_struct rfkill_sync;
78733
78734 diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
78735 index 0af7f54..c916d2f 100644
78736 --- a/net/wireless/wext-core.c
78737 +++ b/net/wireless/wext-core.c
78738 @@ -747,8 +747,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
78739 */
78740
78741 /* Support for very large requests */
78742 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
78743 - (user_length > descr->max_tokens)) {
78744 + if (user_length > descr->max_tokens) {
78745 /* Allow userspace to GET more than max so
78746 * we can support any size GET requests.
78747 * There is still a limit : -ENOMEM.
78748 @@ -785,22 +784,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
78749 }
78750 }
78751
78752 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
78753 - /*
78754 - * If this is a GET, but not NOMAX, it means that the extra
78755 - * data is not bounded by userspace, but by max_tokens. Thus
78756 - * set the length to max_tokens. This matches the extra data
78757 - * allocation.
78758 - * The driver should fill it with the number of tokens it
78759 - * provided, and it may check iwp->length rather than having
78760 - * knowledge of max_tokens. If the driver doesn't change the
78761 - * iwp->length, this ioctl just copies back max_token tokens
78762 - * filled with zeroes. Hopefully the driver isn't claiming
78763 - * them to be valid data.
78764 - */
78765 - iwp->length = descr->max_tokens;
78766 - }
78767 -
78768 err = handler(dev, info, (union iwreq_data *) iwp, extra);
78769
78770 iwp->length += essid_compat;
78771 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
78772 index 7661576..80f7627 100644
78773 --- a/net/xfrm/xfrm_policy.c
78774 +++ b/net/xfrm/xfrm_policy.c
78775 @@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
78776 {
78777 policy->walk.dead = 1;
78778
78779 - atomic_inc(&policy->genid);
78780 + atomic_inc_unchecked(&policy->genid);
78781
78782 if (del_timer(&policy->timer))
78783 xfrm_pol_put(policy);
78784 @@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
78785 hlist_add_head(&policy->bydst, chain);
78786 xfrm_pol_hold(policy);
78787 net->xfrm.policy_count[dir]++;
78788 - atomic_inc(&flow_cache_genid);
78789 + atomic_inc_unchecked(&flow_cache_genid);
78790 if (delpol)
78791 __xfrm_policy_unlink(delpol, dir);
78792 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
78793 @@ -1530,7 +1530,7 @@ free_dst:
78794 goto out;
78795 }
78796
78797 -static int inline
78798 +static inline int
78799 xfrm_dst_alloc_copy(void **target, const void *src, int size)
78800 {
78801 if (!*target) {
78802 @@ -1542,7 +1542,7 @@ xfrm_dst_alloc_copy(void **target, const void *src, int size)
78803 return 0;
78804 }
78805
78806 -static int inline
78807 +static inline int
78808 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
78809 {
78810 #ifdef CONFIG_XFRM_SUB_POLICY
78811 @@ -1554,7 +1554,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
78812 #endif
78813 }
78814
78815 -static int inline
78816 +static inline int
78817 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
78818 {
78819 #ifdef CONFIG_XFRM_SUB_POLICY
78820 @@ -1648,7 +1648,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
78821
78822 xdst->num_pols = num_pols;
78823 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
78824 - xdst->policy_genid = atomic_read(&pols[0]->genid);
78825 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
78826
78827 return xdst;
78828 }
78829 @@ -2345,7 +2345,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
78830 if (xdst->xfrm_genid != dst->xfrm->genid)
78831 return 0;
78832 if (xdst->num_pols > 0 &&
78833 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
78834 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
78835 return 0;
78836
78837 mtu = dst_mtu(dst->child);
78838 @@ -2882,7 +2882,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
78839 sizeof(pol->xfrm_vec[i].saddr));
78840 pol->xfrm_vec[i].encap_family = mp->new_family;
78841 /* flush bundles */
78842 - atomic_inc(&pol->genid);
78843 + atomic_inc_unchecked(&pol->genid);
78844 }
78845 }
78846
78847 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
78848 index d2b366c..51ff91ebc 100644
78849 --- a/scripts/Makefile.build
78850 +++ b/scripts/Makefile.build
78851 @@ -109,7 +109,7 @@ endif
78852 endif
78853
78854 # Do not include host rules unless needed
78855 -ifneq ($(hostprogs-y)$(hostprogs-m),)
78856 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
78857 include scripts/Makefile.host
78858 endif
78859
78860 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
78861 index 686cb0d..9d653bf 100644
78862 --- a/scripts/Makefile.clean
78863 +++ b/scripts/Makefile.clean
78864 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
78865 __clean-files := $(extra-y) $(always) \
78866 $(targets) $(clean-files) \
78867 $(host-progs) \
78868 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
78869 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
78870 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
78871
78872 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
78873
78874 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
78875 index 1ac414f..a1c1451 100644
78876 --- a/scripts/Makefile.host
78877 +++ b/scripts/Makefile.host
78878 @@ -31,6 +31,7 @@
78879 # Note: Shared libraries consisting of C++ files are not supported
78880
78881 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
78882 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
78883
78884 # C code
78885 # Executables compiled from a single .c file
78886 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
78887 # Shared libaries (only .c supported)
78888 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
78889 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
78890 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
78891 # Remove .so files from "xxx-objs"
78892 host-cobjs := $(filter-out %.so,$(host-cobjs))
78893
78894 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
78895 index cb1f50c..cef2a7c 100644
78896 --- a/scripts/basic/fixdep.c
78897 +++ b/scripts/basic/fixdep.c
78898 @@ -161,7 +161,7 @@ static unsigned int strhash(const char *str, unsigned int sz)
78899 /*
78900 * Lookup a value in the configuration string.
78901 */
78902 -static int is_defined_config(const char *name, int len, unsigned int hash)
78903 +static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
78904 {
78905 struct item *aux;
78906
78907 @@ -211,10 +211,10 @@ static void clear_config(void)
78908 /*
78909 * Record the use of a CONFIG_* word.
78910 */
78911 -static void use_config(const char *m, int slen)
78912 +static void use_config(const char *m, unsigned int slen)
78913 {
78914 unsigned int hash = strhash(m, slen);
78915 - int c, i;
78916 + unsigned int c, i;
78917
78918 if (is_defined_config(m, slen, hash))
78919 return;
78920 @@ -235,9 +235,9 @@ static void use_config(const char *m, int slen)
78921
78922 static void parse_config_file(const char *map, size_t len)
78923 {
78924 - const int *end = (const int *) (map + len);
78925 + const unsigned int *end = (const unsigned int *) (map + len);
78926 /* start at +1, so that p can never be < map */
78927 - const int *m = (const int *) map + 1;
78928 + const unsigned int *m = (const unsigned int *) map + 1;
78929 const char *p, *q;
78930
78931 for (; m < end; m++) {
78932 @@ -406,7 +406,7 @@ static void print_deps(void)
78933 static void traps(void)
78934 {
78935 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
78936 - int *p = (int *)test;
78937 + unsigned int *p = (unsigned int *)test;
78938
78939 if (*p != INT_CONF) {
78940 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
78941 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
78942 new file mode 100644
78943 index 0000000..8729101
78944 --- /dev/null
78945 +++ b/scripts/gcc-plugin.sh
78946 @@ -0,0 +1,2 @@
78947 +#!/bin/sh
78948 +echo -e "#include \"gcc-plugin.h\"\n#include \"tree.h\"\n#include \"tm.h\"\n#include \"rtl.h\"" | $1 -x c -shared - -o /dev/null -I`$2 -print-file-name=plugin`/include >/dev/null 2>&1 && echo "y"
78949 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
78950 index e047e17..ea646ec 100644
78951 --- a/scripts/mod/file2alias.c
78952 +++ b/scripts/mod/file2alias.c
78953 @@ -128,7 +128,7 @@ static void device_id_check(const char *modname, const char *device_id,
78954 unsigned long size, unsigned long id_size,
78955 void *symval)
78956 {
78957 - int i;
78958 + unsigned int i;
78959
78960 if (size % id_size || size < id_size) {
78961 if (cross_build != 0)
78962 @@ -158,7 +158,7 @@ static void device_id_check(const char *modname, const char *device_id,
78963 /* USB is special because the bcdDevice can be matched against a numeric range */
78964 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
78965 static void do_usb_entry(struct usb_device_id *id,
78966 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
78967 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
78968 unsigned char range_lo, unsigned char range_hi,
78969 unsigned char max, struct module *mod)
78970 {
78971 @@ -259,7 +259,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
78972 {
78973 unsigned int devlo, devhi;
78974 unsigned char chi, clo, max;
78975 - int ndigits;
78976 + unsigned int ndigits;
78977
78978 id->match_flags = TO_NATIVE(id->match_flags);
78979 id->idVendor = TO_NATIVE(id->idVendor);
78980 @@ -501,7 +501,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
78981 for (i = 0; i < count; i++) {
78982 const char *id = (char *)devs[i].id;
78983 char acpi_id[sizeof(devs[0].id)];
78984 - int j;
78985 + unsigned int j;
78986
78987 buf_printf(&mod->dev_table_buf,
78988 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
78989 @@ -531,7 +531,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
78990
78991 for (j = 0; j < PNP_MAX_DEVICES; j++) {
78992 const char *id = (char *)card->devs[j].id;
78993 - int i2, j2;
78994 + unsigned int i2, j2;
78995 int dup = 0;
78996
78997 if (!id[0])
78998 @@ -557,7 +557,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
78999 /* add an individual alias for every device entry */
79000 if (!dup) {
79001 char acpi_id[sizeof(card->devs[0].id)];
79002 - int k;
79003 + unsigned int k;
79004
79005 buf_printf(&mod->dev_table_buf,
79006 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
79007 @@ -882,7 +882,7 @@ static void dmi_ascii_filter(char *d, const char *s)
79008 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
79009 char *alias)
79010 {
79011 - int i, j;
79012 + unsigned int i, j;
79013
79014 sprintf(alias, "dmi*");
79015
79016 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
79017 index c4e7d15..4241aef 100644
79018 --- a/scripts/mod/modpost.c
79019 +++ b/scripts/mod/modpost.c
79020 @@ -922,6 +922,7 @@ enum mismatch {
79021 ANY_INIT_TO_ANY_EXIT,
79022 ANY_EXIT_TO_ANY_INIT,
79023 EXPORT_TO_INIT_EXIT,
79024 + DATA_TO_TEXT
79025 };
79026
79027 struct sectioncheck {
79028 @@ -1030,6 +1031,12 @@ const struct sectioncheck sectioncheck[] = {
79029 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
79030 .mismatch = EXPORT_TO_INIT_EXIT,
79031 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
79032 +},
79033 +/* Do not reference code from writable data */
79034 +{
79035 + .fromsec = { DATA_SECTIONS, NULL },
79036 + .tosec = { TEXT_SECTIONS, NULL },
79037 + .mismatch = DATA_TO_TEXT
79038 }
79039 };
79040
79041 @@ -1152,10 +1159,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
79042 continue;
79043 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
79044 continue;
79045 - if (sym->st_value == addr)
79046 - return sym;
79047 /* Find a symbol nearby - addr are maybe negative */
79048 d = sym->st_value - addr;
79049 + if (d == 0)
79050 + return sym;
79051 if (d < 0)
79052 d = addr - sym->st_value;
79053 if (d < distance) {
79054 @@ -1434,6 +1441,14 @@ static void report_sec_mismatch(const char *modname,
79055 tosym, prl_to, prl_to, tosym);
79056 free(prl_to);
79057 break;
79058 + case DATA_TO_TEXT:
79059 +/*
79060 + fprintf(stderr,
79061 + "The variable %s references\n"
79062 + "the %s %s%s%s\n",
79063 + fromsym, to, sec2annotation(tosec), tosym, to_p);
79064 +*/
79065 + break;
79066 }
79067 fprintf(stderr, "\n");
79068 }
79069 @@ -1668,7 +1683,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
79070 static void check_sec_ref(struct module *mod, const char *modname,
79071 struct elf_info *elf)
79072 {
79073 - int i;
79074 + unsigned int i;
79075 Elf_Shdr *sechdrs = elf->sechdrs;
79076
79077 /* Walk through all sections */
79078 @@ -1766,7 +1781,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
79079 va_end(ap);
79080 }
79081
79082 -void buf_write(struct buffer *buf, const char *s, int len)
79083 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
79084 {
79085 if (buf->size - buf->pos < len) {
79086 buf->size += len + SZ;
79087 @@ -1984,7 +1999,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
79088 if (fstat(fileno(file), &st) < 0)
79089 goto close_write;
79090
79091 - if (st.st_size != b->pos)
79092 + if (st.st_size != (off_t)b->pos)
79093 goto close_write;
79094
79095 tmp = NOFAIL(malloc(b->pos));
79096 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
79097 index 51207e4..f7d603d 100644
79098 --- a/scripts/mod/modpost.h
79099 +++ b/scripts/mod/modpost.h
79100 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
79101
79102 struct buffer {
79103 char *p;
79104 - int pos;
79105 - int size;
79106 + unsigned int pos;
79107 + unsigned int size;
79108 };
79109
79110 void __attribute__((format(printf, 2, 3)))
79111 buf_printf(struct buffer *buf, const char *fmt, ...);
79112
79113 void
79114 -buf_write(struct buffer *buf, const char *s, int len);
79115 +buf_write(struct buffer *buf, const char *s, unsigned int len);
79116
79117 struct module {
79118 struct module *next;
79119 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
79120 index 9dfcd6d..099068e 100644
79121 --- a/scripts/mod/sumversion.c
79122 +++ b/scripts/mod/sumversion.c
79123 @@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum,
79124 goto out;
79125 }
79126
79127 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
79128 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
79129 warn("writing sum in %s failed: %s\n",
79130 filename, strerror(errno));
79131 goto out;
79132 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
79133 index 5c11312..72742b5 100644
79134 --- a/scripts/pnmtologo.c
79135 +++ b/scripts/pnmtologo.c
79136 @@ -237,14 +237,14 @@ static void write_header(void)
79137 fprintf(out, " * Linux logo %s\n", logoname);
79138 fputs(" */\n\n", out);
79139 fputs("#include <linux/linux_logo.h>\n\n", out);
79140 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
79141 + fprintf(out, "static unsigned char %s_data[] = {\n",
79142 logoname);
79143 }
79144
79145 static void write_footer(void)
79146 {
79147 fputs("\n};\n\n", out);
79148 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
79149 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
79150 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
79151 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
79152 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
79153 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
79154 fputs("\n};\n\n", out);
79155
79156 /* write logo clut */
79157 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
79158 + fprintf(out, "static unsigned char %s_clut[] = {\n",
79159 logoname);
79160 write_hex_cnt = 0;
79161 for (i = 0; i < logo_clutsize; i++) {
79162 diff --git a/scripts/tags.sh b/scripts/tags.sh
79163 index 833813a..0bc8588 100755
79164 --- a/scripts/tags.sh
79165 +++ b/scripts/tags.sh
79166 @@ -116,7 +116,7 @@ docscope()
79167
79168 dogtags()
79169 {
79170 - all_sources | gtags -f -
79171 + all_sources | gtags -i -f -
79172 }
79173
79174 exuberant()
79175 diff --git a/security/Kconfig b/security/Kconfig
79176 index 51bd5a0..c37f5e6 100644
79177 --- a/security/Kconfig
79178 +++ b/security/Kconfig
79179 @@ -4,6 +4,640 @@
79180
79181 menu "Security options"
79182
79183 +source grsecurity/Kconfig
79184 +
79185 +menu "PaX"
79186 +
79187 + config ARCH_TRACK_EXEC_LIMIT
79188 + bool
79189 +
79190 + config PAX_KERNEXEC_PLUGIN
79191 + bool
79192 +
79193 + config PAX_PER_CPU_PGD
79194 + bool
79195 +
79196 + config TASK_SIZE_MAX_SHIFT
79197 + int
79198 + depends on X86_64
79199 + default 47 if !PAX_PER_CPU_PGD
79200 + default 42 if PAX_PER_CPU_PGD
79201 +
79202 + config PAX_ENABLE_PAE
79203 + bool
79204 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
79205 +
79206 +config PAX
79207 + bool "Enable various PaX features"
79208 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
79209 + help
79210 + This allows you to enable various PaX features. PaX adds
79211 + intrusion prevention mechanisms to the kernel that reduce
79212 + the risks posed by exploitable memory corruption bugs.
79213 +
79214 +menu "PaX Control"
79215 + depends on PAX
79216 +
79217 +config PAX_SOFTMODE
79218 + bool 'Support soft mode'
79219 + help
79220 + Enabling this option will allow you to run PaX in soft mode, that
79221 + is, PaX features will not be enforced by default, only on executables
79222 + marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
79223 + support as they are the only way to mark executables for soft mode use.
79224 +
79225 + Soft mode can be activated by using the "pax_softmode=1" kernel command
79226 + line option on boot. Furthermore you can control various PaX features
79227 + at runtime via the entries in /proc/sys/kernel/pax.
79228 +
79229 +config PAX_EI_PAX
79230 + bool 'Use legacy ELF header marking'
79231 + help
79232 + Enabling this option will allow you to control PaX features on
79233 + a per executable basis via the 'chpax' utility available at
79234 + http://pax.grsecurity.net/. The control flags will be read from
79235 + an otherwise reserved part of the ELF header. This marking has
79236 + numerous drawbacks (no support for soft-mode, toolchain does not
79237 + know about the non-standard use of the ELF header) therefore it
79238 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
79239 + support.
79240 +
79241 + If you have applications not marked by the PT_PAX_FLAGS ELF program
79242 + header and you cannot use XATTR_PAX_FLAGS then you MUST enable this
79243 + option otherwise they will not get any protection.
79244 +
79245 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
79246 + support as well, they will override the legacy EI_PAX marks.
79247 +
79248 +config PAX_PT_PAX_FLAGS
79249 + bool 'Use ELF program header marking'
79250 + help
79251 + Enabling this option will allow you to control PaX features on
79252 + a per executable basis via the 'paxctl' utility available at
79253 + http://pax.grsecurity.net/. The control flags will be read from
79254 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
79255 + has the benefits of supporting both soft mode and being fully
79256 + integrated into the toolchain (the binutils patch is available
79257 + from http://pax.grsecurity.net).
79258 +
79259 + If you have applications not marked by the PT_PAX_FLAGS ELF program
79260 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
79261 + support otherwise they will not get any protection.
79262 +
79263 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
79264 + must make sure that the marks are the same if a binary has both marks.
79265 +
79266 + Note that if you enable the legacy EI_PAX marking support as well,
79267 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
79268 +
79269 +config PAX_XATTR_PAX_FLAGS
79270 + bool 'Use filesystem extended attributes marking'
79271 + select CIFS_XATTR if CIFS
79272 + select EXT2_FS_XATTR if EXT2_FS
79273 + select EXT3_FS_XATTR if EXT3_FS
79274 + select EXT4_FS_XATTR if EXT4_FS
79275 + select JFFS2_FS_XATTR if JFFS2_FS
79276 + select REISERFS_FS_XATTR if REISERFS_FS
79277 + select SQUASHFS_XATTR if SQUASHFS
79278 + select TMPFS_XATTR if TMPFS
79279 + select UBIFS_FS_XATTR if UBIFS_FS
79280 + help
79281 + Enabling this option will allow you to control PaX features on
79282 + a per executable basis via the 'setfattr' utility. The control
79283 + flags will be read from the user.pax.flags extended attribute of
79284 + the file. This marking has the benefit of supporting binary-only
79285 + applications that self-check themselves (e.g., skype) and would
79286 + not tolerate chpax/paxctl changes. The main drawback is that
79287 + extended attributes are not supported by some filesystems (e.g.,
79288 + isofs, udf, vfat) so copying files through such filesystems will
79289 + lose the extended attributes and these PaX markings.
79290 +
79291 + If you have applications not marked by the PT_PAX_FLAGS ELF program
79292 + header then you MUST enable either XATTR_PAX_FLAGS or EI_PAX marking
79293 + support otherwise they will not get any protection.
79294 +
79295 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
79296 + must make sure that the marks are the same if a binary has both marks.
79297 +
79298 + Note that if you enable the legacy EI_PAX marking support as well,
79299 + the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
79300 +
79301 +choice
79302 + prompt 'MAC system integration'
79303 + default PAX_HAVE_ACL_FLAGS
79304 + help
79305 + Mandatory Access Control systems have the option of controlling
79306 + PaX flags on a per executable basis, choose the method supported
79307 + by your particular system.
79308 +
79309 + - "none": if your MAC system does not interact with PaX,
79310 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
79311 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
79312 +
79313 + NOTE: this option is for developers/integrators only.
79314 +
79315 + config PAX_NO_ACL_FLAGS
79316 + bool 'none'
79317 +
79318 + config PAX_HAVE_ACL_FLAGS
79319 + bool 'direct'
79320 +
79321 + config PAX_HOOK_ACL_FLAGS
79322 + bool 'hook'
79323 +endchoice
79324 +
79325 +endmenu
79326 +
79327 +menu "Non-executable pages"
79328 + depends on PAX
79329 +
79330 +config PAX_NOEXEC
79331 + bool "Enforce non-executable pages"
79332 + depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
79333 + help
79334 + By design some architectures do not allow for protecting memory
79335 + pages against execution or even if they do, Linux does not make
79336 + use of this feature. In practice this means that if a page is
79337 + readable (such as the stack or heap) it is also executable.
79338 +
79339 + There is a well known exploit technique that makes use of this
79340 + fact and a common programming mistake where an attacker can
79341 + introduce code of his choice somewhere in the attacked program's
79342 + memory (typically the stack or the heap) and then execute it.
79343 +
79344 + If the attacked program was running with different (typically
79345 + higher) privileges than that of the attacker, then he can elevate
79346 + his own privilege level (e.g. get a root shell, write to files for
79347 + which he does not have write access to, etc).
79348 +
79349 + Enabling this option will let you choose from various features
79350 + that prevent the injection and execution of 'foreign' code in
79351 + a program.
79352 +
79353 + This will also break programs that rely on the old behaviour and
79354 + expect that dynamically allocated memory via the malloc() family
79355 + of functions is executable (which it is not). Notable examples
79356 + are the XFree86 4.x server, the java runtime and wine.
79357 +
79358 +config PAX_PAGEEXEC
79359 + bool "Paging based non-executable pages"
79360 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
79361 + select S390_SWITCH_AMODE if S390
79362 + select S390_EXEC_PROTECT if S390
79363 + select ARCH_TRACK_EXEC_LIMIT if X86_32
79364 + help
79365 + This implementation is based on the paging feature of the CPU.
79366 + On i386 without hardware non-executable bit support there is a
79367 + variable but usually low performance impact, however on Intel's
79368 + P4 core based CPUs it is very high so you should not enable this
79369 + for kernels meant to be used on such CPUs.
79370 +
79371 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
79372 + with hardware non-executable bit support there is no performance
79373 + impact, on ppc the impact is negligible.
79374 +
79375 + Note that several architectures require various emulations due to
79376 + badly designed userland ABIs, this will cause a performance impact
79377 + but will disappear as soon as userland is fixed. For example, ppc
79378 + userland MUST have been built with secure-plt by a recent toolchain.
79379 +
79380 +config PAX_SEGMEXEC
79381 + bool "Segmentation based non-executable pages"
79382 + depends on PAX_NOEXEC && X86_32
79383 + help
79384 + This implementation is based on the segmentation feature of the
79385 + CPU and has a very small performance impact, however applications
79386 + will be limited to a 1.5 GB address space instead of the normal
79387 + 3 GB.
79388 +
79389 +config PAX_EMUTRAMP
79390 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
79391 + default y if PARISC
79392 + help
79393 + There are some programs and libraries that for one reason or
79394 + another attempt to execute special small code snippets from
79395 + non-executable memory pages. Most notable examples are the
79396 + signal handler return code generated by the kernel itself and
79397 + the GCC trampolines.
79398 +
79399 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
79400 + such programs will no longer work under your kernel.
79401 +
79402 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
79403 + utilities to enable trampoline emulation for the affected programs
79404 + yet still have the protection provided by the non-executable pages.
79405 +
79406 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
79407 + your system will not even boot.
79408 +
79409 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
79410 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
79411 + for the affected files.
79412 +
79413 + NOTE: enabling this feature *may* open up a loophole in the
79414 + protection provided by non-executable pages that an attacker
79415 + could abuse. Therefore the best solution is to not have any
79416 + files on your system that would require this option. This can
79417 + be achieved by not using libc5 (which relies on the kernel
79418 + signal handler return code) and not using or rewriting programs
79419 + that make use of the nested function implementation of GCC.
79420 + Skilled users can just fix GCC itself so that it implements
79421 + nested function calls in a way that does not interfere with PaX.
79422 +
79423 +config PAX_EMUSIGRT
79424 + bool "Automatically emulate sigreturn trampolines"
79425 + depends on PAX_EMUTRAMP && PARISC
79426 + default y
79427 + help
79428 + Enabling this option will have the kernel automatically detect
79429 + and emulate signal return trampolines executing on the stack
79430 + that would otherwise lead to task termination.
79431 +
79432 + This solution is intended as a temporary one for users with
79433 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
79434 + Modula-3 runtime, etc) or executables linked to such, basically
79435 + everything that does not specify its own SA_RESTORER function in
79436 + normal executable memory like glibc 2.1+ does.
79437 +
79438 + On parisc you MUST enable this option, otherwise your system will
79439 + not even boot.
79440 +
79441 + NOTE: this feature cannot be disabled on a per executable basis
79442 + and since it *does* open up a loophole in the protection provided
79443 + by non-executable pages, the best solution is to not have any
79444 + files on your system that would require this option.
79445 +
79446 +config PAX_MPROTECT
79447 + bool "Restrict mprotect()"
79448 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
79449 + help
79450 + Enabling this option will prevent programs from
79451 + - changing the executable status of memory pages that were
79452 + not originally created as executable,
79453 + - making read-only executable pages writable again,
79454 + - creating executable pages from anonymous memory,
79455 + - making read-only-after-relocations (RELRO) data pages writable again.
79456 +
79457 + You should say Y here to complete the protection provided by
79458 + the enforcement of non-executable pages.
79459 +
79460 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
79461 + this feature on a per file basis.
79462 +
79463 +config PAX_MPROTECT_COMPAT
79464 + bool "Use legacy/compat protection demoting (read help)"
79465 + depends on PAX_MPROTECT
79466 + default n
79467 + help
79468 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
79469 + by sending the proper error code to the application. For some broken
79470 + userland, this can cause problems with Python or other applications. The
79471 + current implementation however allows for applications like clamav to
79472 + detect if JIT compilation/execution is allowed and to fall back gracefully
79473 + to an interpreter-based mode if it does not. While we encourage everyone
79474 + to use the current implementation as-is and push upstream to fix broken
79475 + userland (note that the RWX logging option can assist with this), in some
79476 + environments this may not be possible. Having to disable MPROTECT
79477 + completely on certain binaries reduces the security benefit of PaX,
79478 + so this option is provided for those environments to revert to the old
79479 + behavior.
79480 +
79481 +config PAX_ELFRELOCS
79482 + bool "Allow ELF text relocations (read help)"
79483 + depends on PAX_MPROTECT
79484 + default n
79485 + help
79486 + Non-executable pages and mprotect() restrictions are effective
79487 + in preventing the introduction of new executable code into an
79488 + attacked task's address space. There remain only two venues
79489 + for this kind of attack: if the attacker can execute already
79490 + existing code in the attacked task then he can either have it
79491 + create and mmap() a file containing his code or have it mmap()
79492 + an already existing ELF library that does not have position
79493 + independent code in it and use mprotect() on it to make it
79494 + writable and copy his code there. While protecting against
79495 + the former approach is beyond PaX, the latter can be prevented
79496 + by having only PIC ELF libraries on one's system (which do not
79497 + need to relocate their code). If you are sure this is your case,
79498 + as is the case with all modern Linux distributions, then leave
79499 + this option disabled. You should say 'n' here.
79500 +
79501 +config PAX_ETEXECRELOCS
79502 + bool "Allow ELF ET_EXEC text relocations"
79503 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
79504 + select PAX_ELFRELOCS
79505 + default y
79506 + help
79507 + On some architectures there are incorrectly created applications
79508 + that require text relocations and would not work without enabling
79509 + this option. If you are an alpha, ia64 or parisc user, you should
79510 + enable this option and disable it once you have made sure that
79511 + none of your applications need it.
79512 +
79513 +config PAX_EMUPLT
79514 + bool "Automatically emulate ELF PLT"
79515 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
79516 + default y
79517 + help
79518 + Enabling this option will have the kernel automatically detect
79519 + and emulate the Procedure Linkage Table entries in ELF files.
79520 + On some architectures such entries are in writable memory, and
79521 + become non-executable leading to task termination. Therefore
79522 + it is mandatory that you enable this option on alpha, parisc,
79523 + sparc and sparc64, otherwise your system would not even boot.
79524 +
79525 + NOTE: this feature *does* open up a loophole in the protection
79526 + provided by the non-executable pages, therefore the proper
79527 + solution is to modify the toolchain to produce a PLT that does
79528 + not need to be writable.
79529 +
79530 +config PAX_DLRESOLVE
79531 + bool 'Emulate old glibc resolver stub'
79532 + depends on PAX_EMUPLT && SPARC
79533 + default n
79534 + help
79535 + This option is needed if userland has an old glibc (before 2.4)
79536 + that puts a 'save' instruction into the runtime generated resolver
79537 + stub that needs special emulation.
79538 +
79539 +config PAX_KERNEXEC
79540 + bool "Enforce non-executable kernel pages"
79541 + depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
79542 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
79543 + select PAX_KERNEXEC_PLUGIN if X86_64
79544 + help
79545 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
79546 + that is, enabling this option will make it harder to inject
79547 + and execute 'foreign' code in kernel memory itself.
79548 +
79549 + Note that on x86_64 kernels there is a known regression when
79550 + this feature and KVM/VMX are both enabled in the host kernel.
79551 +
79552 +choice
79553 + prompt "Return Address Instrumentation Method"
79554 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
79555 + depends on PAX_KERNEXEC_PLUGIN
79556 + help
79557 + Select the method used to instrument function pointer dereferences.
79558 + Note that binary modules cannot be instrumented by this approach.
79559 +
79560 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
79561 + bool "bts"
79562 + help
79563 + This method is compatible with binary only modules but has
79564 + a higher runtime overhead.
79565 +
79566 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
79567 + bool "or"
79568 + depends on !PARAVIRT
79569 + help
79570 + This method is incompatible with binary only modules but has
79571 + a lower runtime overhead.
79572 +endchoice
79573 +
79574 +config PAX_KERNEXEC_PLUGIN_METHOD
79575 + string
79576 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
79577 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
79578 + default ""
79579 +
79580 +config PAX_KERNEXEC_MODULE_TEXT
79581 + int "Minimum amount of memory reserved for module code"
79582 + default "4"
79583 + depends on PAX_KERNEXEC && X86_32 && MODULES
79584 + help
79585 + Due to implementation details the kernel must reserve a fixed
79586 + amount of memory for module code at compile time that cannot be
79587 + changed at runtime. Here you can specify the minimum amount
79588 + in MB that will be reserved. Due to the same implementation
79589 + details this size will always be rounded up to the next 2/4 MB
79590 + boundary (depends on PAE) so the actually available memory for
79591 + module code will usually be more than this minimum.
79592 +
79593 + The default 4 MB should be enough for most users but if you have
79594 + an excessive number of modules (e.g., most distribution configs
79595 + compile many drivers as modules) or use huge modules such as
79596 + nvidia's kernel driver, you will need to adjust this amount.
79597 + A good rule of thumb is to look at your currently loaded kernel
79598 + modules and add up their sizes.
79599 +
79600 +endmenu
79601 +
79602 +menu "Address Space Layout Randomization"
79603 + depends on PAX
79604 +
79605 +config PAX_ASLR
79606 + bool "Address Space Layout Randomization"
79607 + help
79608 + Many if not most exploit techniques rely on the knowledge of
79609 + certain addresses in the attacked program. The following options
79610 + will allow the kernel to apply a certain amount of randomization
79611 + to specific parts of the program thereby forcing an attacker to
79612 + guess them in most cases. Any failed guess will most likely crash
79613 + the attacked program which allows the kernel to detect such attempts
79614 + and react on them. PaX itself provides no reaction mechanisms,
79615 + instead it is strongly encouraged that you make use of Nergal's
79616 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
79617 + (http://www.grsecurity.net/) built-in crash detection features or
79618 + develop one yourself.
79619 +
79620 + By saying Y here you can choose to randomize the following areas:
79621 + - top of the task's kernel stack
79622 + - top of the task's userland stack
79623 + - base address for mmap() requests that do not specify one
79624 + (this includes all libraries)
79625 + - base address of the main executable
79626 +
79627 + It is strongly recommended to say Y here as address space layout
79628 + randomization has negligible impact on performance yet it provides
79629 + a very effective protection.
79630 +
79631 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
79632 + this feature on a per file basis.
79633 +
79634 +config PAX_RANDKSTACK
79635 + bool "Randomize kernel stack base"
79636 + depends on X86_TSC && X86
79637 + help
79638 + By saying Y here the kernel will randomize every task's kernel
79639 + stack on every system call. This will not only force an attacker
79640 + to guess it but also prevent him from making use of possible
79641 + leaked information about it.
79642 +
79643 + Since the kernel stack is a rather scarce resource, randomization
79644 + may cause unexpected stack overflows, therefore you should very
79645 + carefully test your system. Note that once enabled in the kernel
79646 + configuration, this feature cannot be disabled on a per file basis.
79647 +
79648 +config PAX_RANDUSTACK
79649 + bool "Randomize user stack base"
79650 + depends on PAX_ASLR
79651 + help
79652 + By saying Y here the kernel will randomize every task's userland
79653 + stack. The randomization is done in two steps where the second
79654 + one may apply a big amount of shift to the top of the stack and
79655 + cause problems for programs that want to use lots of memory (more
79656 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
79657 + For this reason the second step can be controlled by 'chpax' or
79658 + 'paxctl' on a per file basis.
79659 +
79660 +config PAX_RANDMMAP
79661 + bool "Randomize mmap() base"
79662 + depends on PAX_ASLR
79663 + help
79664 + By saying Y here the kernel will use a randomized base address for
79665 + mmap() requests that do not specify one themselves. As a result
79666 + all dynamically loaded libraries will appear at random addresses
79667 + and therefore be harder to exploit by a technique where an attacker
79668 + attempts to execute library code for his purposes (e.g. spawn a
79669 + shell from an exploited program that is running at an elevated
79670 + privilege level).
79671 +
79672 + Furthermore, if a program is relinked as a dynamic ELF file, its
79673 + base address will be randomized as well, completing the full
79674 + randomization of the address space layout. Attacking such programs
79675 + becomes a guess game. You can find an example of doing this at
79676 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
79677 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
79678 +
79679 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
79680 + feature on a per file basis.
79681 +
79682 +endmenu
79683 +
79684 +menu "Miscellaneous hardening features"
79685 +
79686 +config PAX_MEMORY_SANITIZE
79687 + bool "Sanitize all freed memory"
79688 + depends on !HIBERNATION
79689 + help
79690 + By saying Y here the kernel will erase memory pages as soon as they
79691 + are freed. This in turn reduces the lifetime of data stored in the
79692 + pages, making it less likely that sensitive information such as
79693 + passwords, cryptographic secrets, etc stay in memory for too long.
79694 +
79695 + This is especially useful for programs whose runtime is short, long
79696 + lived processes and the kernel itself benefit from this as long as
79697 + they operate on whole memory pages and ensure timely freeing of pages
79698 + that may hold sensitive information.
79699 +
79700 + The tradeoff is performance impact, on a single CPU system kernel
79701 + compilation sees a 3% slowdown, other systems and workloads may vary
79702 + and you are advised to test this feature on your expected workload
79703 + before deploying it.
79704 +
79705 + Note that this feature does not protect data stored in live pages,
79706 + e.g., process memory swapped to disk may stay there for a long time.
79707 +
79708 +config PAX_MEMORY_STACKLEAK
79709 + bool "Sanitize kernel stack"
79710 + depends on X86
79711 + help
79712 + By saying Y here the kernel will erase the kernel stack before it
79713 + returns from a system call. This in turn reduces the information
79714 + that a kernel stack leak bug can reveal.
79715 +
79716 + Note that such a bug can still leak information that was put on
79717 + the stack by the current system call (the one eventually triggering
79718 + the bug) but traces of earlier system calls on the kernel stack
79719 + cannot leak anymore.
79720 +
79721 + The tradeoff is performance impact: on a single CPU system kernel
79722 + compilation sees a 1% slowdown, other systems and workloads may vary
79723 + and you are advised to test this feature on your expected workload
79724 + before deploying it.
79725 +
79726 + Note: full support for this feature requires gcc with plugin support
79727 + so make sure your compiler is at least gcc 4.5.0. Using older gcc
79728 + versions means that functions with large enough stack frames may
79729 + leave uninitialized memory behind that may be exposed to a later
79730 + syscall leaking the stack.
79731 +
79732 +config PAX_MEMORY_UDEREF
79733 + bool "Prevent invalid userland pointer dereference"
79734 + depends on X86 && !UML_X86 && !XEN
79735 + select PAX_PER_CPU_PGD if X86_64
79736 + help
79737 + By saying Y here the kernel will be prevented from dereferencing
79738 + userland pointers in contexts where the kernel expects only kernel
79739 + pointers. This is both a useful runtime debugging feature and a
79740 + security measure that prevents exploiting a class of kernel bugs.
79741 +
79742 + The tradeoff is that some virtualization solutions may experience
79743 + a huge slowdown and therefore you should not enable this feature
79744 + for kernels meant to run in such environments. Whether a given VM
79745 + solution is affected or not is best determined by simply trying it
79746 + out, the performance impact will be obvious right on boot as this
79747 + mechanism engages from very early on. A good rule of thumb is that
79748 + VMs running on CPUs without hardware virtualization support (i.e.,
79749 + the majority of IA-32 CPUs) will likely experience the slowdown.
79750 +
79751 +config PAX_REFCOUNT
79752 + bool "Prevent various kernel object reference counter overflows"
79753 + depends on GRKERNSEC && ((ARM && (CPU_32v6 || CPU_32v6K || CPU_32v7)) || SPARC64 || X86)
79754 + help
79755 + By saying Y here the kernel will detect and prevent overflowing
79756 + various (but not all) kinds of object reference counters. Such
79757 + overflows can normally occur due to bugs only and are often, if
79758 + not always, exploitable.
79759 +
79760 + The tradeoff is that data structures protected by an overflowed
79761 + refcount will never be freed and therefore will leak memory. Note
79762 + that this leak also happens even without this protection but in
79763 + that case the overflow can eventually trigger the freeing of the
79764 + data structure while it is still being used elsewhere, resulting
79765 + in the exploitable situation that this feature prevents.
79766 +
79767 + Since this has a negligible performance impact, you should enable
79768 + this feature.
79769 +
79770 +config PAX_USERCOPY
79771 + bool "Harden heap object copies between kernel and userland"
79772 + depends on X86 || PPC || SPARC || ARM
79773 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
79774 + help
79775 + By saying Y here the kernel will enforce the size of heap objects
79776 + when they are copied in either direction between the kernel and
79777 + userland, even if only a part of the heap object is copied.
79778 +
79779 + Specifically, this checking prevents information leaking from the
79780 + kernel heap during kernel to userland copies (if the kernel heap
79781 + object is otherwise fully initialized) and prevents kernel heap
79782 + overflows during userland to kernel copies.
79783 +
79784 + Note that the current implementation provides the strictest bounds
79785 + checks for the SLUB allocator.
79786 +
79787 + Enabling this option also enables per-slab cache protection against
79788 + data in a given cache being copied into/out of via userland
79789 + accessors. Though the whitelist of regions will be reduced over
79790 + time, it notably protects important data structures like task structs.
79791 +
79792 + If frame pointers are enabled on x86, this option will also restrict
79793 + copies into and out of the kernel stack to local variables within a
79794 + single frame.
79795 +
79796 + Since this has a negligible performance impact, you should enable
79797 + this feature.
79798 +
79799 +config PAX_SIZE_OVERFLOW
79800 + bool "Prevent various integer overflows in function size parameters"
79801 + depends on X86
79802 + help
79803 + By saying Y here the kernel recomputes expressions of function
79804 + arguments marked by a size_overflow attribute with double integer
79805 + precision (DImode/TImode for 32/64 bit integer types).
79806 +
79807 + The recomputed argument is checked against INT_MAX and an event
79808 + is logged on overflow and the triggering process is killed.
79809 +
79810 + Homepage:
79811 + http://www.grsecurity.net/~ephox/overflow_plugin/
79812 +
79813 +endmenu
79814 +
79815 +endmenu
79816 +
79817 config KEYS
79818 bool "Enable access key retention support"
79819 help
79820 @@ -169,7 +803,7 @@ config INTEL_TXT
79821 config LSM_MMAP_MIN_ADDR
79822 int "Low address space for LSM to protect from user allocation"
79823 depends on SECURITY && SECURITY_SELINUX
79824 - default 32768 if ARM
79825 + default 32768 if ALPHA || ARM || PARISC || SPARC32
79826 default 65536
79827 help
79828 This is the portion of low virtual memory which should be protected
79829 diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
79830 index 97ce8fa..23dad96 100644
79831 --- a/security/apparmor/lsm.c
79832 +++ b/security/apparmor/lsm.c
79833 @@ -620,7 +620,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
79834 return error;
79835 }
79836
79837 -static struct security_operations apparmor_ops = {
79838 +static struct security_operations apparmor_ops __read_only = {
79839 .name = "apparmor",
79840
79841 .ptrace_access_check = apparmor_ptrace_access_check,
79842 diff --git a/security/commoncap.c b/security/commoncap.c
79843 index b8d2bb9..980069e 100644
79844 --- a/security/commoncap.c
79845 +++ b/security/commoncap.c
79846 @@ -29,6 +29,7 @@
79847 #include <linux/securebits.h>
79848 #include <linux/user_namespace.h>
79849 #include <linux/personality.h>
79850 +#include <net/sock.h>
79851
79852 /*
79853 * If a non-root user executes a setuid-root binary in
79854 @@ -575,6 +576,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
79855 {
79856 const struct cred *cred = current_cred();
79857
79858 + if (gr_acl_enable_at_secure())
79859 + return 1;
79860 +
79861 if (cred->uid != 0) {
79862 if (bprm->cap_effective)
79863 return 1;
79864 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
79865 index 3ccf7ac..d73ad64 100644
79866 --- a/security/integrity/ima/ima.h
79867 +++ b/security/integrity/ima/ima.h
79868 @@ -86,8 +86,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
79869 extern spinlock_t ima_queue_lock;
79870
79871 struct ima_h_table {
79872 - atomic_long_t len; /* number of stored measurements in the list */
79873 - atomic_long_t violations;
79874 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
79875 + atomic_long_unchecked_t violations;
79876 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
79877 };
79878 extern struct ima_h_table ima_htable;
79879 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
79880 index 88a2788..581ab92 100644
79881 --- a/security/integrity/ima/ima_api.c
79882 +++ b/security/integrity/ima/ima_api.c
79883 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
79884 int result;
79885
79886 /* can overflow, only indicator */
79887 - atomic_long_inc(&ima_htable.violations);
79888 + atomic_long_inc_unchecked(&ima_htable.violations);
79889
79890 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
79891 if (!entry) {
79892 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
79893 index e1aa2b4..52027bf 100644
79894 --- a/security/integrity/ima/ima_fs.c
79895 +++ b/security/integrity/ima/ima_fs.c
79896 @@ -28,12 +28,12 @@
79897 static int valid_policy = 1;
79898 #define TMPBUFLEN 12
79899 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
79900 - loff_t *ppos, atomic_long_t *val)
79901 + loff_t *ppos, atomic_long_unchecked_t *val)
79902 {
79903 char tmpbuf[TMPBUFLEN];
79904 ssize_t len;
79905
79906 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
79907 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
79908 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
79909 }
79910
79911 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
79912 index 55a6271..ad829c3 100644
79913 --- a/security/integrity/ima/ima_queue.c
79914 +++ b/security/integrity/ima/ima_queue.c
79915 @@ -81,7 +81,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
79916 INIT_LIST_HEAD(&qe->later);
79917 list_add_tail_rcu(&qe->later, &ima_measurements);
79918
79919 - atomic_long_inc(&ima_htable.len);
79920 + atomic_long_inc_unchecked(&ima_htable.len);
79921 key = ima_hash_key(entry->digest);
79922 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
79923 return 0;
79924 diff --git a/security/keys/compat.c b/security/keys/compat.c
79925 index 4c48e13..7abdac9 100644
79926 --- a/security/keys/compat.c
79927 +++ b/security/keys/compat.c
79928 @@ -44,7 +44,7 @@ long compat_keyctl_instantiate_key_iov(
79929 if (ret == 0)
79930 goto no_payload_free;
79931
79932 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
79933 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
79934
79935 if (iov != iovstack)
79936 kfree(iov);
79937 diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
79938 index 0b3f5d7..892c8a6 100644
79939 --- a/security/keys/keyctl.c
79940 +++ b/security/keys/keyctl.c
79941 @@ -921,7 +921,7 @@ static int keyctl_change_reqkey_auth(struct key *key)
79942 /*
79943 * Copy the iovec data from userspace
79944 */
79945 -static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
79946 +static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
79947 unsigned ioc)
79948 {
79949 for (; ioc > 0; ioc--) {
79950 @@ -943,7 +943,7 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
79951 * If successful, 0 will be returned.
79952 */
79953 long keyctl_instantiate_key_common(key_serial_t id,
79954 - const struct iovec *payload_iov,
79955 + const struct iovec __user *payload_iov,
79956 unsigned ioc,
79957 size_t plen,
79958 key_serial_t ringid)
79959 @@ -1038,7 +1038,7 @@ long keyctl_instantiate_key(key_serial_t id,
79960 [0].iov_len = plen
79961 };
79962
79963 - return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
79964 + return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
79965 }
79966
79967 return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
79968 @@ -1071,7 +1071,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
79969 if (ret == 0)
79970 goto no_payload_free;
79971
79972 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
79973 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
79974
79975 if (iov != iovstack)
79976 kfree(iov);
79977 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
79978 index d605f75..2bc6be9 100644
79979 --- a/security/keys/keyring.c
79980 +++ b/security/keys/keyring.c
79981 @@ -214,15 +214,15 @@ static long keyring_read(const struct key *keyring,
79982 ret = -EFAULT;
79983
79984 for (loop = 0; loop < klist->nkeys; loop++) {
79985 + key_serial_t serial;
79986 key = klist->keys[loop];
79987 + serial = key->serial;
79988
79989 tmp = sizeof(key_serial_t);
79990 if (tmp > buflen)
79991 tmp = buflen;
79992
79993 - if (copy_to_user(buffer,
79994 - &key->serial,
79995 - tmp) != 0)
79996 + if (copy_to_user(buffer, &serial, tmp))
79997 goto error;
79998
79999 buflen -= tmp;
80000 diff --git a/security/min_addr.c b/security/min_addr.c
80001 index f728728..6457a0c 100644
80002 --- a/security/min_addr.c
80003 +++ b/security/min_addr.c
80004 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
80005 */
80006 static void update_mmap_min_addr(void)
80007 {
80008 +#ifndef SPARC
80009 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
80010 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
80011 mmap_min_addr = dac_mmap_min_addr;
80012 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
80013 #else
80014 mmap_min_addr = dac_mmap_min_addr;
80015 #endif
80016 +#endif
80017 }
80018
80019 /*
80020 diff --git a/security/security.c b/security/security.c
80021 index d754249..8bf426e 100644
80022 --- a/security/security.c
80023 +++ b/security/security.c
80024 @@ -26,8 +26,8 @@
80025 static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
80026 CONFIG_DEFAULT_SECURITY;
80027
80028 -static struct security_operations *security_ops;
80029 -static struct security_operations default_security_ops = {
80030 +static struct security_operations *security_ops __read_only;
80031 +static struct security_operations default_security_ops __read_only = {
80032 .name = "default",
80033 };
80034
80035 @@ -68,7 +68,9 @@ int __init security_init(void)
80036
80037 void reset_security_ops(void)
80038 {
80039 + pax_open_kernel();
80040 security_ops = &default_security_ops;
80041 + pax_close_kernel();
80042 }
80043
80044 /* Save user chosen LSM */
80045 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
80046 index 6a3683e..f52f4c0 100644
80047 --- a/security/selinux/hooks.c
80048 +++ b/security/selinux/hooks.c
80049 @@ -94,8 +94,6 @@
80050
80051 #define NUM_SEL_MNT_OPTS 5
80052
80053 -extern struct security_operations *security_ops;
80054 -
80055 /* SECMARK reference count */
80056 static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
80057
80058 @@ -5429,7 +5427,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
80059
80060 #endif
80061
80062 -static struct security_operations selinux_ops = {
80063 +static struct security_operations selinux_ops __read_only = {
80064 .name = "selinux",
80065
80066 .ptrace_access_check = selinux_ptrace_access_check,
80067 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
80068 index b43813c..74be837 100644
80069 --- a/security/selinux/include/xfrm.h
80070 +++ b/security/selinux/include/xfrm.h
80071 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
80072
80073 static inline void selinux_xfrm_notify_policyload(void)
80074 {
80075 - atomic_inc(&flow_cache_genid);
80076 + atomic_inc_unchecked(&flow_cache_genid);
80077 }
80078 #else
80079 static inline int selinux_xfrm_enabled(void)
80080 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
80081 index e8af5b0b..78527ef 100644
80082 --- a/security/smack/smack_lsm.c
80083 +++ b/security/smack/smack_lsm.c
80084 @@ -3481,7 +3481,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
80085 return 0;
80086 }
80087
80088 -struct security_operations smack_ops = {
80089 +struct security_operations smack_ops __read_only = {
80090 .name = "smack",
80091
80092 .ptrace_access_check = smack_ptrace_access_check,
80093 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
80094 index 620d37c..e2ad89b 100644
80095 --- a/security/tomoyo/tomoyo.c
80096 +++ b/security/tomoyo/tomoyo.c
80097 @@ -501,7 +501,7 @@ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
80098 * tomoyo_security_ops is a "struct security_operations" which is used for
80099 * registering TOMOYO.
80100 */
80101 -static struct security_operations tomoyo_security_ops = {
80102 +static struct security_operations tomoyo_security_ops __read_only = {
80103 .name = "tomoyo",
80104 .cred_alloc_blank = tomoyo_cred_alloc_blank,
80105 .cred_prepare = tomoyo_cred_prepare,
80106 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
80107 index 762af68..7103453 100644
80108 --- a/sound/aoa/codecs/onyx.c
80109 +++ b/sound/aoa/codecs/onyx.c
80110 @@ -54,7 +54,7 @@ struct onyx {
80111 spdif_locked:1,
80112 analog_locked:1,
80113 original_mute:2;
80114 - int open_count;
80115 + local_t open_count;
80116 struct codec_info *codec_info;
80117
80118 /* mutex serializes concurrent access to the device
80119 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_item *cii,
80120 struct onyx *onyx = cii->codec_data;
80121
80122 mutex_lock(&onyx->mutex);
80123 - onyx->open_count++;
80124 + local_inc(&onyx->open_count);
80125 mutex_unlock(&onyx->mutex);
80126
80127 return 0;
80128 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_item *cii,
80129 struct onyx *onyx = cii->codec_data;
80130
80131 mutex_lock(&onyx->mutex);
80132 - onyx->open_count--;
80133 - if (!onyx->open_count)
80134 + if (local_dec_and_test(&onyx->open_count))
80135 onyx->spdif_locked = onyx->analog_locked = 0;
80136 mutex_unlock(&onyx->mutex);
80137
80138 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
80139 index ffd2025..df062c9 100644
80140 --- a/sound/aoa/codecs/onyx.h
80141 +++ b/sound/aoa/codecs/onyx.h
80142 @@ -11,6 +11,7 @@
80143 #include <linux/i2c.h>
80144 #include <asm/pmac_low_i2c.h>
80145 #include <asm/prom.h>
80146 +#include <asm/local.h>
80147
80148 /* PCM3052 register definitions */
80149
80150 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
80151 index 08fde00..0bf641a 100644
80152 --- a/sound/core/oss/pcm_oss.c
80153 +++ b/sound/core/oss/pcm_oss.c
80154 @@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
80155 if (in_kernel) {
80156 mm_segment_t fs;
80157 fs = snd_enter_user();
80158 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
80159 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
80160 snd_leave_user(fs);
80161 } else {
80162 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
80163 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
80164 }
80165 if (ret != -EPIPE && ret != -ESTRPIPE)
80166 break;
80167 @@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
80168 if (in_kernel) {
80169 mm_segment_t fs;
80170 fs = snd_enter_user();
80171 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
80172 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
80173 snd_leave_user(fs);
80174 } else {
80175 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
80176 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
80177 }
80178 if (ret == -EPIPE) {
80179 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
80180 @@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
80181 struct snd_pcm_plugin_channel *channels;
80182 size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
80183 if (!in_kernel) {
80184 - if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
80185 + if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
80186 return -EFAULT;
80187 buf = runtime->oss.buffer;
80188 }
80189 @@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
80190 }
80191 } else {
80192 tmp = snd_pcm_oss_write2(substream,
80193 - (const char __force *)buf,
80194 + (const char __force_kernel *)buf,
80195 runtime->oss.period_bytes, 0);
80196 if (tmp <= 0)
80197 goto err;
80198 @@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
80199 struct snd_pcm_runtime *runtime = substream->runtime;
80200 snd_pcm_sframes_t frames, frames1;
80201 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
80202 - char __user *final_dst = (char __force __user *)buf;
80203 + char __user *final_dst = (char __force_user *)buf;
80204 if (runtime->oss.plugin_first) {
80205 struct snd_pcm_plugin_channel *channels;
80206 size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
80207 @@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
80208 xfer += tmp;
80209 runtime->oss.buffer_used -= tmp;
80210 } else {
80211 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
80212 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
80213 runtime->oss.period_bytes, 0);
80214 if (tmp <= 0)
80215 goto err;
80216 @@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
80217 size1);
80218 size1 /= runtime->channels; /* frames */
80219 fs = snd_enter_user();
80220 - snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
80221 + snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
80222 snd_leave_user(fs);
80223 }
80224 } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
80225 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
80226 index 91cdf94..4085161 100644
80227 --- a/sound/core/pcm_compat.c
80228 +++ b/sound/core/pcm_compat.c
80229 @@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
80230 int err;
80231
80232 fs = snd_enter_user();
80233 - err = snd_pcm_delay(substream, &delay);
80234 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
80235 snd_leave_user(fs);
80236 if (err < 0)
80237 return err;
80238 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
80239 index 25ed9fe..24c46e9 100644
80240 --- a/sound/core/pcm_native.c
80241 +++ b/sound/core/pcm_native.c
80242 @@ -2765,11 +2765,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
80243 switch (substream->stream) {
80244 case SNDRV_PCM_STREAM_PLAYBACK:
80245 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
80246 - (void __user *)arg);
80247 + (void __force_user *)arg);
80248 break;
80249 case SNDRV_PCM_STREAM_CAPTURE:
80250 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
80251 - (void __user *)arg);
80252 + (void __force_user *)arg);
80253 break;
80254 default:
80255 result = -EINVAL;
80256 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
80257 index 5cf8d65..912a79c 100644
80258 --- a/sound/core/seq/seq_device.c
80259 +++ b/sound/core/seq/seq_device.c
80260 @@ -64,7 +64,7 @@ struct ops_list {
80261 int argsize; /* argument size */
80262
80263 /* operators */
80264 - struct snd_seq_dev_ops ops;
80265 + struct snd_seq_dev_ops *ops;
80266
80267 /* registred devices */
80268 struct list_head dev_list; /* list of devices */
80269 @@ -333,7 +333,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
80270
80271 mutex_lock(&ops->reg_mutex);
80272 /* copy driver operators */
80273 - ops->ops = *entry;
80274 + ops->ops = entry;
80275 ops->driver |= DRIVER_LOADED;
80276 ops->argsize = argsize;
80277
80278 @@ -463,7 +463,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
80279 dev->name, ops->id, ops->argsize, dev->argsize);
80280 return -EINVAL;
80281 }
80282 - if (ops->ops.init_device(dev) >= 0) {
80283 + if (ops->ops->init_device(dev) >= 0) {
80284 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
80285 ops->num_init_devices++;
80286 } else {
80287 @@ -490,7 +490,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
80288 dev->name, ops->id, ops->argsize, dev->argsize);
80289 return -EINVAL;
80290 }
80291 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
80292 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
80293 dev->status = SNDRV_SEQ_DEVICE_FREE;
80294 dev->driver_data = NULL;
80295 ops->num_init_devices--;
80296 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
80297 index 621e60e..f4543f5 100644
80298 --- a/sound/drivers/mts64.c
80299 +++ b/sound/drivers/mts64.c
80300 @@ -29,6 +29,7 @@
80301 #include <sound/initval.h>
80302 #include <sound/rawmidi.h>
80303 #include <sound/control.h>
80304 +#include <asm/local.h>
80305
80306 #define CARD_NAME "Miditerminal 4140"
80307 #define DRIVER_NAME "MTS64"
80308 @@ -67,7 +68,7 @@ struct mts64 {
80309 struct pardevice *pardev;
80310 int pardev_claimed;
80311
80312 - int open_count;
80313 + local_t open_count;
80314 int current_midi_output_port;
80315 int current_midi_input_port;
80316 u8 mode[MTS64_NUM_INPUT_PORTS];
80317 @@ -697,7 +698,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
80318 {
80319 struct mts64 *mts = substream->rmidi->private_data;
80320
80321 - if (mts->open_count == 0) {
80322 + if (local_read(&mts->open_count) == 0) {
80323 /* We don't need a spinlock here, because this is just called
80324 if the device has not been opened before.
80325 So there aren't any IRQs from the device */
80326 @@ -705,7 +706,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
80327
80328 msleep(50);
80329 }
80330 - ++(mts->open_count);
80331 + local_inc(&mts->open_count);
80332
80333 return 0;
80334 }
80335 @@ -715,8 +716,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
80336 struct mts64 *mts = substream->rmidi->private_data;
80337 unsigned long flags;
80338
80339 - --(mts->open_count);
80340 - if (mts->open_count == 0) {
80341 + if (local_dec_return(&mts->open_count) == 0) {
80342 /* We need the spinlock_irqsave here because we can still
80343 have IRQs at this point */
80344 spin_lock_irqsave(&mts->lock, flags);
80345 @@ -725,8 +725,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
80346
80347 msleep(500);
80348
80349 - } else if (mts->open_count < 0)
80350 - mts->open_count = 0;
80351 + } else if (local_read(&mts->open_count) < 0)
80352 + local_set(&mts->open_count, 0);
80353
80354 return 0;
80355 }
80356 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
80357 index b953fb4..1999c01 100644
80358 --- a/sound/drivers/opl4/opl4_lib.c
80359 +++ b/sound/drivers/opl4/opl4_lib.c
80360 @@ -29,7 +29,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
80361 MODULE_DESCRIPTION("OPL4 driver");
80362 MODULE_LICENSE("GPL");
80363
80364 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
80365 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
80366 {
80367 int timeout = 10;
80368 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
80369 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
80370 index 3e32bd3..46fc152 100644
80371 --- a/sound/drivers/portman2x4.c
80372 +++ b/sound/drivers/portman2x4.c
80373 @@ -48,6 +48,7 @@
80374 #include <sound/initval.h>
80375 #include <sound/rawmidi.h>
80376 #include <sound/control.h>
80377 +#include <asm/local.h>
80378
80379 #define CARD_NAME "Portman 2x4"
80380 #define DRIVER_NAME "portman"
80381 @@ -85,7 +86,7 @@ struct portman {
80382 struct pardevice *pardev;
80383 int pardev_claimed;
80384
80385 - int open_count;
80386 + local_t open_count;
80387 int mode[PORTMAN_NUM_INPUT_PORTS];
80388 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
80389 };
80390 diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
80391 index 87657dd..a8268d4 100644
80392 --- a/sound/firewire/amdtp.c
80393 +++ b/sound/firewire/amdtp.c
80394 @@ -371,7 +371,7 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
80395 ptr = s->pcm_buffer_pointer + data_blocks;
80396 if (ptr >= pcm->runtime->buffer_size)
80397 ptr -= pcm->runtime->buffer_size;
80398 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
80399 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
80400
80401 s->pcm_period_pointer += data_blocks;
80402 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
80403 @@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
80404 */
80405 void amdtp_out_stream_update(struct amdtp_out_stream *s)
80406 {
80407 - ACCESS_ONCE(s->source_node_id_field) =
80408 + ACCESS_ONCE_RW(s->source_node_id_field) =
80409 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
80410 }
80411 EXPORT_SYMBOL(amdtp_out_stream_update);
80412 diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
80413 index 537a9cb..8e8c8e9 100644
80414 --- a/sound/firewire/amdtp.h
80415 +++ b/sound/firewire/amdtp.h
80416 @@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_prepare(struct amdtp_out_stream *s)
80417 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
80418 struct snd_pcm_substream *pcm)
80419 {
80420 - ACCESS_ONCE(s->pcm) = pcm;
80421 + ACCESS_ONCE_RW(s->pcm) = pcm;
80422 }
80423
80424 /**
80425 diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
80426 index cd094ec..eca1277 100644
80427 --- a/sound/firewire/isight.c
80428 +++ b/sound/firewire/isight.c
80429 @@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count)
80430 ptr += count;
80431 if (ptr >= runtime->buffer_size)
80432 ptr -= runtime->buffer_size;
80433 - ACCESS_ONCE(isight->buffer_pointer) = ptr;
80434 + ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
80435
80436 isight->period_counter += count;
80437 if (isight->period_counter >= runtime->period_size) {
80438 @@ -307,7 +307,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
80439 if (err < 0)
80440 return err;
80441
80442 - ACCESS_ONCE(isight->pcm_active) = true;
80443 + ACCESS_ONCE_RW(isight->pcm_active) = true;
80444
80445 return 0;
80446 }
80447 @@ -340,7 +340,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
80448 {
80449 struct isight *isight = substream->private_data;
80450
80451 - ACCESS_ONCE(isight->pcm_active) = false;
80452 + ACCESS_ONCE_RW(isight->pcm_active) = false;
80453
80454 mutex_lock(&isight->mutex);
80455 isight_stop_streaming(isight);
80456 @@ -433,10 +433,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
80457
80458 switch (cmd) {
80459 case SNDRV_PCM_TRIGGER_START:
80460 - ACCESS_ONCE(isight->pcm_running) = true;
80461 + ACCESS_ONCE_RW(isight->pcm_running) = true;
80462 break;
80463 case SNDRV_PCM_TRIGGER_STOP:
80464 - ACCESS_ONCE(isight->pcm_running) = false;
80465 + ACCESS_ONCE_RW(isight->pcm_running) = false;
80466 break;
80467 default:
80468 return -EINVAL;
80469 diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
80470 index 7bd5e33..1fcab12 100644
80471 --- a/sound/isa/cmi8330.c
80472 +++ b/sound/isa/cmi8330.c
80473 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
80474
80475 struct snd_pcm *pcm;
80476 struct snd_cmi8330_stream {
80477 - struct snd_pcm_ops ops;
80478 + snd_pcm_ops_no_const ops;
80479 snd_pcm_open_callback_t open;
80480 void *private_data; /* sb or wss */
80481 } streams[2];
80482 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
80483 index 733b014..56ce96f 100644
80484 --- a/sound/oss/sb_audio.c
80485 +++ b/sound/oss/sb_audio.c
80486 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
80487 buf16 = (signed short *)(localbuf + localoffs);
80488 while (c)
80489 {
80490 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
80491 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
80492 if (copy_from_user(lbuf8,
80493 userbuf+useroffs + p,
80494 locallen))
80495 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
80496 index 09d4648..cf234c7 100644
80497 --- a/sound/oss/swarm_cs4297a.c
80498 +++ b/sound/oss/swarm_cs4297a.c
80499 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
80500 {
80501 struct cs4297a_state *s;
80502 u32 pwr, id;
80503 - mm_segment_t fs;
80504 int rval;
80505 #ifndef CONFIG_BCM_CS4297A_CSWARM
80506 u64 cfg;
80507 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
80508 if (!rval) {
80509 char *sb1250_duart_present;
80510
80511 +#if 0
80512 + mm_segment_t fs;
80513 fs = get_fs();
80514 set_fs(KERNEL_DS);
80515 -#if 0
80516 val = SOUND_MASK_LINE;
80517 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
80518 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
80519 val = initvol[i].vol;
80520 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
80521 }
80522 + set_fs(fs);
80523 // cs4297a_write_ac97(s, 0x18, 0x0808);
80524 #else
80525 // cs4297a_write_ac97(s, 0x5e, 0x180);
80526 cs4297a_write_ac97(s, 0x02, 0x0808);
80527 cs4297a_write_ac97(s, 0x18, 0x0808);
80528 #endif
80529 - set_fs(fs);
80530
80531 list_add(&s->list, &cs4297a_devs);
80532
80533 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
80534 index f0f1943..8e1f96c 100644
80535 --- a/sound/pci/hda/hda_codec.h
80536 +++ b/sound/pci/hda/hda_codec.h
80537 @@ -611,7 +611,7 @@ struct hda_bus_ops {
80538 /* notify power-up/down from codec to controller */
80539 void (*pm_notify)(struct hda_bus *bus);
80540 #endif
80541 -};
80542 +} __no_const;
80543
80544 /* template to pass to the bus constructor */
80545 struct hda_bus_template {
80546 @@ -713,6 +713,7 @@ struct hda_codec_ops {
80547 #endif
80548 void (*reboot_notify)(struct hda_codec *codec);
80549 };
80550 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
80551
80552 /* record for amp information cache */
80553 struct hda_cache_head {
80554 @@ -743,7 +744,7 @@ struct hda_pcm_ops {
80555 struct snd_pcm_substream *substream);
80556 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
80557 struct snd_pcm_substream *substream);
80558 -};
80559 +} __no_const;
80560
80561 /* PCM information for each substream */
80562 struct hda_pcm_stream {
80563 @@ -801,7 +802,7 @@ struct hda_codec {
80564 const char *modelname; /* model name for preset */
80565
80566 /* set by patch */
80567 - struct hda_codec_ops patch_ops;
80568 + hda_codec_ops_no_const patch_ops;
80569
80570 /* PCM to create, set by patch_ops.build_pcms callback */
80571 unsigned int num_pcms;
80572 diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
80573 index 0da778a..bc38b84 100644
80574 --- a/sound/pci/ice1712/ice1712.h
80575 +++ b/sound/pci/ice1712/ice1712.h
80576 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
80577 unsigned int mask_flags; /* total mask bits */
80578 struct snd_akm4xxx_ops {
80579 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
80580 - } ops;
80581 + } __no_const ops;
80582 };
80583
80584 struct snd_ice1712_spdif {
80585 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
80586 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
80587 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
80588 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
80589 - } ops;
80590 + } __no_const ops;
80591 };
80592
80593
80594 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
80595 index 12a9a2b..2b6138f 100644
80596 --- a/sound/pci/ymfpci/ymfpci_main.c
80597 +++ b/sound/pci/ymfpci/ymfpci_main.c
80598 @@ -203,8 +203,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
80599 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
80600 break;
80601 }
80602 - if (atomic_read(&chip->interrupt_sleep_count)) {
80603 - atomic_set(&chip->interrupt_sleep_count, 0);
80604 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
80605 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
80606 wake_up(&chip->interrupt_sleep);
80607 }
80608 __end:
80609 @@ -788,7 +788,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
80610 continue;
80611 init_waitqueue_entry(&wait, current);
80612 add_wait_queue(&chip->interrupt_sleep, &wait);
80613 - atomic_inc(&chip->interrupt_sleep_count);
80614 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
80615 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
80616 remove_wait_queue(&chip->interrupt_sleep, &wait);
80617 }
80618 @@ -826,8 +826,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
80619 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
80620 spin_unlock(&chip->reg_lock);
80621
80622 - if (atomic_read(&chip->interrupt_sleep_count)) {
80623 - atomic_set(&chip->interrupt_sleep_count, 0);
80624 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
80625 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
80626 wake_up(&chip->interrupt_sleep);
80627 }
80628 }
80629 @@ -2389,7 +2389,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
80630 spin_lock_init(&chip->reg_lock);
80631 spin_lock_init(&chip->voice_lock);
80632 init_waitqueue_head(&chip->interrupt_sleep);
80633 - atomic_set(&chip->interrupt_sleep_count, 0);
80634 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
80635 chip->card = card;
80636 chip->pci = pci;
80637 chip->irq = -1;
80638 diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
80639 index cdc860a..db34a93 100644
80640 --- a/sound/soc/soc-pcm.c
80641 +++ b/sound/soc/soc-pcm.c
80642 @@ -605,7 +605,7 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
80643 struct snd_soc_platform *platform = rtd->platform;
80644 struct snd_soc_dai *codec_dai = rtd->codec_dai;
80645 struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
80646 - struct snd_pcm_ops *soc_pcm_ops = &rtd->ops;
80647 + snd_pcm_ops_no_const *soc_pcm_ops = &rtd->ops;
80648 struct snd_pcm *pcm;
80649 char new_name[64];
80650 int ret = 0, playback = 0, capture = 0;
80651 diff --git a/sound/usb/card.h b/sound/usb/card.h
80652 index da5fa1a..113cd02 100644
80653 --- a/sound/usb/card.h
80654 +++ b/sound/usb/card.h
80655 @@ -45,6 +45,7 @@ struct snd_urb_ops {
80656 int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
80657 int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
80658 };
80659 +typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
80660
80661 struct snd_usb_substream {
80662 struct snd_usb_stream *stream;
80663 @@ -94,7 +95,7 @@ struct snd_usb_substream {
80664 struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
80665 spinlock_t lock;
80666
80667 - struct snd_urb_ops ops; /* callbacks (must be filled at init) */
80668 + snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
80669 int last_frame_number; /* stored frame number */
80670 int last_delay; /* stored delay */
80671 };
80672 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
80673 new file mode 100644
80674 index 0000000..ca64170
80675 --- /dev/null
80676 +++ b/tools/gcc/Makefile
80677 @@ -0,0 +1,26 @@
80678 +#CC := gcc
80679 +#PLUGIN_SOURCE_FILES := pax_plugin.c
80680 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
80681 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
80682 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99
80683 +
80684 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu99 -ggdb
80685 +CFLAGS_size_overflow_plugin.o := -Wno-missing-initializer
80686 +
80687 +hostlibs-y := constify_plugin.so
80688 +hostlibs-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
80689 +hostlibs-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
80690 +hostlibs-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
80691 +hostlibs-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
80692 +hostlibs-y += colorize_plugin.so
80693 +hostlibs-$(CONFIG_PAX_SIZE_OVERFLOW) += size_overflow_plugin.so
80694 +
80695 +always := $(hostlibs-y)
80696 +
80697 +constify_plugin-objs := constify_plugin.o
80698 +stackleak_plugin-objs := stackleak_plugin.o
80699 +kallocstat_plugin-objs := kallocstat_plugin.o
80700 +kernexec_plugin-objs := kernexec_plugin.o
80701 +checker_plugin-objs := checker_plugin.o
80702 +colorize_plugin-objs := colorize_plugin.o
80703 +size_overflow_plugin-objs := size_overflow_plugin.o
80704 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
80705 new file mode 100644
80706 index 0000000..d41b5af
80707 --- /dev/null
80708 +++ b/tools/gcc/checker_plugin.c
80709 @@ -0,0 +1,171 @@
80710 +/*
80711 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
80712 + * Licensed under the GPL v2
80713 + *
80714 + * Note: the choice of the license means that the compilation process is
80715 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
80716 + * but for the kernel it doesn't matter since it doesn't link against
80717 + * any of the gcc libraries
80718 + *
80719 + * gcc plugin to implement various sparse (source code checker) features
80720 + *
80721 + * TODO:
80722 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
80723 + *
80724 + * BUGS:
80725 + * - none known
80726 + */
80727 +#include "gcc-plugin.h"
80728 +#include "config.h"
80729 +#include "system.h"
80730 +#include "coretypes.h"
80731 +#include "tree.h"
80732 +#include "tree-pass.h"
80733 +#include "flags.h"
80734 +#include "intl.h"
80735 +#include "toplev.h"
80736 +#include "plugin.h"
80737 +//#include "expr.h" where are you...
80738 +#include "diagnostic.h"
80739 +#include "plugin-version.h"
80740 +#include "tm.h"
80741 +#include "function.h"
80742 +#include "basic-block.h"
80743 +#include "gimple.h"
80744 +#include "rtl.h"
80745 +#include "emit-rtl.h"
80746 +#include "tree-flow.h"
80747 +#include "target.h"
80748 +
80749 +extern void c_register_addr_space (const char *str, addr_space_t as);
80750 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
80751 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
80752 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
80753 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
80754 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
80755 +
80756 +extern void print_gimple_stmt(FILE *, gimple, int, int);
80757 +extern rtx emit_move_insn(rtx x, rtx y);
80758 +
80759 +int plugin_is_GPL_compatible;
80760 +
80761 +static struct plugin_info checker_plugin_info = {
80762 + .version = "201111150100",
80763 +};
80764 +
80765 +#define ADDR_SPACE_KERNEL 0
80766 +#define ADDR_SPACE_FORCE_KERNEL 1
80767 +#define ADDR_SPACE_USER 2
80768 +#define ADDR_SPACE_FORCE_USER 3
80769 +#define ADDR_SPACE_IOMEM 0
80770 +#define ADDR_SPACE_FORCE_IOMEM 0
80771 +#define ADDR_SPACE_PERCPU 0
80772 +#define ADDR_SPACE_FORCE_PERCPU 0
80773 +#define ADDR_SPACE_RCU 0
80774 +#define ADDR_SPACE_FORCE_RCU 0
80775 +
80776 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
80777 +{
80778 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
80779 +}
80780 +
80781 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
80782 +{
80783 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
80784 +}
80785 +
80786 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
80787 +{
80788 + return default_addr_space_valid_pointer_mode(mode, as);
80789 +}
80790 +
80791 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
80792 +{
80793 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
80794 +}
80795 +
80796 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
80797 +{
80798 + return default_addr_space_legitimize_address(x, oldx, mode, as);
80799 +}
80800 +
80801 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
80802 +{
80803 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
80804 + return true;
80805 +
80806 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
80807 + return true;
80808 +
80809 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
80810 + return true;
80811 +
80812 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
80813 + return true;
80814 +
80815 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
80816 + return true;
80817 +
80818 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
80819 + return true;
80820 +
80821 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
80822 + return true;
80823 +
80824 + return subset == superset;
80825 +}
80826 +
80827 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
80828 +{
80829 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
80830 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
80831 +
80832 + return op;
80833 +}
80834 +
80835 +static void register_checker_address_spaces(void *event_data, void *data)
80836 +{
80837 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
80838 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
80839 + c_register_addr_space("__user", ADDR_SPACE_USER);
80840 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
80841 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
80842 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
80843 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
80844 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
80845 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
80846 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
80847 +
80848 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
80849 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
80850 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
80851 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
80852 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
80853 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
80854 + targetm.addr_space.convert = checker_addr_space_convert;
80855 +}
80856 +
80857 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
80858 +{
80859 + const char * const plugin_name = plugin_info->base_name;
80860 + const int argc = plugin_info->argc;
80861 + const struct plugin_argument * const argv = plugin_info->argv;
80862 + int i;
80863 +
80864 + if (!plugin_default_version_check(version, &gcc_version)) {
80865 + error(G_("incompatible gcc/plugin versions"));
80866 + return 1;
80867 + }
80868 +
80869 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
80870 +
80871 + for (i = 0; i < argc; ++i)
80872 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80873 +
80874 + if (TARGET_64BIT == 0)
80875 + return 0;
80876 +
80877 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
80878 +
80879 + return 0;
80880 +}
80881 diff --git a/tools/gcc/colorize_plugin.c b/tools/gcc/colorize_plugin.c
80882 new file mode 100644
80883 index 0000000..ee950d0
80884 --- /dev/null
80885 +++ b/tools/gcc/colorize_plugin.c
80886 @@ -0,0 +1,147 @@
80887 +/*
80888 + * Copyright 2012 by PaX Team <pageexec@freemail.hu>
80889 + * Licensed under the GPL v2
80890 + *
80891 + * Note: the choice of the license means that the compilation process is
80892 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
80893 + * but for the kernel it doesn't matter since it doesn't link against
80894 + * any of the gcc libraries
80895 + *
80896 + * gcc plugin to colorize diagnostic output
80897 + *
80898 + */
80899 +
80900 +#include "gcc-plugin.h"
80901 +#include "config.h"
80902 +#include "system.h"
80903 +#include "coretypes.h"
80904 +#include "tree.h"
80905 +#include "tree-pass.h"
80906 +#include "flags.h"
80907 +#include "intl.h"
80908 +#include "toplev.h"
80909 +#include "plugin.h"
80910 +#include "diagnostic.h"
80911 +#include "plugin-version.h"
80912 +#include "tm.h"
80913 +
80914 +int plugin_is_GPL_compatible;
80915 +
80916 +static struct plugin_info colorize_plugin_info = {
80917 + .version = "201203092200",
80918 +};
80919 +
80920 +#define GREEN "\033[32m\033[2m"
80921 +#define LIGHTGREEN "\033[32m\033[1m"
80922 +#define YELLOW "\033[33m\033[2m"
80923 +#define LIGHTYELLOW "\033[33m\033[1m"
80924 +#define RED "\033[31m\033[2m"
80925 +#define LIGHTRED "\033[31m\033[1m"
80926 +#define BLUE "\033[34m\033[2m"
80927 +#define LIGHTBLUE "\033[34m\033[1m"
80928 +#define BRIGHT "\033[m\033[1m"
80929 +#define NORMAL "\033[m"
80930 +
80931 +static diagnostic_starter_fn old_starter;
80932 +static diagnostic_finalizer_fn old_finalizer;
80933 +
80934 +static void start_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
80935 +{
80936 + const char *color;
80937 + char *newprefix;
80938 +
80939 + switch (diagnostic->kind) {
80940 + case DK_NOTE:
80941 + color = LIGHTBLUE;
80942 + break;
80943 +
80944 + case DK_PEDWARN:
80945 + case DK_WARNING:
80946 + color = LIGHTYELLOW;
80947 + break;
80948 +
80949 + case DK_ERROR:
80950 + case DK_FATAL:
80951 + case DK_ICE:
80952 + case DK_PERMERROR:
80953 + case DK_SORRY:
80954 + color = LIGHTRED;
80955 + break;
80956 +
80957 + default:
80958 + color = NORMAL;
80959 + }
80960 +
80961 + old_starter(context, diagnostic);
80962 + if (-1 == asprintf(&newprefix, "%s%s" NORMAL, color, context->printer->prefix))
80963 + return;
80964 + pp_destroy_prefix(context->printer);
80965 + pp_set_prefix(context->printer, newprefix);
80966 +}
80967 +
80968 +static void finalize_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
80969 +{
80970 + old_finalizer(context, diagnostic);
80971 +}
80972 +
80973 +static void colorize_arm(void)
80974 +{
80975 + old_starter = diagnostic_starter(global_dc);
80976 + old_finalizer = diagnostic_finalizer(global_dc);
80977 +
80978 + diagnostic_starter(global_dc) = start_colorize;
80979 + diagnostic_finalizer(global_dc) = finalize_colorize;
80980 +}
80981 +
80982 +static unsigned int execute_colorize_rearm(void)
80983 +{
80984 + if (diagnostic_starter(global_dc) == start_colorize)
80985 + return 0;
80986 +
80987 + colorize_arm();
80988 + return 0;
80989 +}
80990 +
80991 +struct simple_ipa_opt_pass pass_ipa_colorize_rearm = {
80992 + .pass = {
80993 + .type = SIMPLE_IPA_PASS,
80994 + .name = "colorize_rearm",
80995 + .gate = NULL,
80996 + .execute = execute_colorize_rearm,
80997 + .sub = NULL,
80998 + .next = NULL,
80999 + .static_pass_number = 0,
81000 + .tv_id = TV_NONE,
81001 + .properties_required = 0,
81002 + .properties_provided = 0,
81003 + .properties_destroyed = 0,
81004 + .todo_flags_start = 0,
81005 + .todo_flags_finish = 0
81006 + }
81007 +};
81008 +
81009 +static void colorize_start_unit(void *gcc_data, void *user_data)
81010 +{
81011 + colorize_arm();
81012 +}
81013 +
81014 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
81015 +{
81016 + const char * const plugin_name = plugin_info->base_name;
81017 + struct register_pass_info colorize_rearm_pass_info = {
81018 + .pass = &pass_ipa_colorize_rearm.pass,
81019 + .reference_pass_name = "*free_lang_data",
81020 + .ref_pass_instance_number = 0,
81021 + .pos_op = PASS_POS_INSERT_AFTER
81022 + };
81023 +
81024 + if (!plugin_default_version_check(version, &gcc_version)) {
81025 + error(G_("incompatible gcc/plugin versions"));
81026 + return 1;
81027 + }
81028 +
81029 + register_callback(plugin_name, PLUGIN_INFO, NULL, &colorize_plugin_info);
81030 + register_callback(plugin_name, PLUGIN_START_UNIT, &colorize_start_unit, NULL);
81031 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &colorize_rearm_pass_info);
81032 + return 0;
81033 +}
81034 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
81035 new file mode 100644
81036 index 0000000..88a7438
81037 --- /dev/null
81038 +++ b/tools/gcc/constify_plugin.c
81039 @@ -0,0 +1,303 @@
81040 +/*
81041 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
81042 + * Copyright 2011 by PaX Team <pageexec@freemail.hu>
81043 + * Licensed under the GPL v2, or (at your option) v3
81044 + *
81045 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
81046 + *
81047 + * Homepage:
81048 + * http://www.grsecurity.net/~ephox/const_plugin/
81049 + *
81050 + * Usage:
81051 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
81052 + * $ gcc -fplugin=constify_plugin.so test.c -O2
81053 + */
81054 +
81055 +#include "gcc-plugin.h"
81056 +#include "config.h"
81057 +#include "system.h"
81058 +#include "coretypes.h"
81059 +#include "tree.h"
81060 +#include "tree-pass.h"
81061 +#include "flags.h"
81062 +#include "intl.h"
81063 +#include "toplev.h"
81064 +#include "plugin.h"
81065 +#include "diagnostic.h"
81066 +#include "plugin-version.h"
81067 +#include "tm.h"
81068 +#include "function.h"
81069 +#include "basic-block.h"
81070 +#include "gimple.h"
81071 +#include "rtl.h"
81072 +#include "emit-rtl.h"
81073 +#include "tree-flow.h"
81074 +
81075 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
81076 +
81077 +int plugin_is_GPL_compatible;
81078 +
81079 +static struct plugin_info const_plugin_info = {
81080 + .version = "201111150100",
81081 + .help = "no-constify\tturn off constification\n",
81082 +};
81083 +
81084 +static void constify_type(tree type);
81085 +static bool walk_struct(tree node);
81086 +
81087 +static tree deconstify_type(tree old_type)
81088 +{
81089 + tree new_type, field;
81090 +
81091 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
81092 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
81093 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
81094 + DECL_FIELD_CONTEXT(field) = new_type;
81095 + TYPE_READONLY(new_type) = 0;
81096 + C_TYPE_FIELDS_READONLY(new_type) = 0;
81097 + return new_type;
81098 +}
81099 +
81100 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
81101 +{
81102 + tree type;
81103 +
81104 + *no_add_attrs = true;
81105 + if (TREE_CODE(*node) == FUNCTION_DECL) {
81106 + error("%qE attribute does not apply to functions", name);
81107 + return NULL_TREE;
81108 + }
81109 +
81110 + if (TREE_CODE(*node) == VAR_DECL) {
81111 + error("%qE attribute does not apply to variables", name);
81112 + return NULL_TREE;
81113 + }
81114 +
81115 + if (TYPE_P(*node)) {
81116 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
81117 + *no_add_attrs = false;
81118 + else
81119 + error("%qE attribute applies to struct and union types only", name);
81120 + return NULL_TREE;
81121 + }
81122 +
81123 + type = TREE_TYPE(*node);
81124 +
81125 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
81126 + error("%qE attribute applies to struct and union types only", name);
81127 + return NULL_TREE;
81128 + }
81129 +
81130 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
81131 + error("%qE attribute is already applied to the type", name);
81132 + return NULL_TREE;
81133 + }
81134 +
81135 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
81136 + error("%qE attribute used on type that is not constified", name);
81137 + return NULL_TREE;
81138 + }
81139 +
81140 + if (TREE_CODE(*node) == TYPE_DECL) {
81141 + TREE_TYPE(*node) = deconstify_type(type);
81142 + TREE_READONLY(*node) = 0;
81143 + return NULL_TREE;
81144 + }
81145 +
81146 + return NULL_TREE;
81147 +}
81148 +
81149 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
81150 +{
81151 + *no_add_attrs = true;
81152 + if (!TYPE_P(*node)) {
81153 + error("%qE attribute applies to types only", name);
81154 + return NULL_TREE;
81155 + }
81156 +
81157 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
81158 + error("%qE attribute applies to struct and union types only", name);
81159 + return NULL_TREE;
81160 + }
81161 +
81162 + *no_add_attrs = false;
81163 + constify_type(*node);
81164 + return NULL_TREE;
81165 +}
81166 +
81167 +static struct attribute_spec no_const_attr = {
81168 + .name = "no_const",
81169 + .min_length = 0,
81170 + .max_length = 0,
81171 + .decl_required = false,
81172 + .type_required = false,
81173 + .function_type_required = false,
81174 + .handler = handle_no_const_attribute,
81175 +#if BUILDING_GCC_VERSION >= 4007
81176 + .affects_type_identity = true
81177 +#endif
81178 +};
81179 +
81180 +static struct attribute_spec do_const_attr = {
81181 + .name = "do_const",
81182 + .min_length = 0,
81183 + .max_length = 0,
81184 + .decl_required = false,
81185 + .type_required = false,
81186 + .function_type_required = false,
81187 + .handler = handle_do_const_attribute,
81188 +#if BUILDING_GCC_VERSION >= 4007
81189 + .affects_type_identity = true
81190 +#endif
81191 +};
81192 +
81193 +static void register_attributes(void *event_data, void *data)
81194 +{
81195 + register_attribute(&no_const_attr);
81196 + register_attribute(&do_const_attr);
81197 +}
81198 +
81199 +static void constify_type(tree type)
81200 +{
81201 + TYPE_READONLY(type) = 1;
81202 + C_TYPE_FIELDS_READONLY(type) = 1;
81203 +}
81204 +
81205 +static bool is_fptr(tree field)
81206 +{
81207 + tree ptr = TREE_TYPE(field);
81208 +
81209 + if (TREE_CODE(ptr) != POINTER_TYPE)
81210 + return false;
81211 +
81212 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
81213 +}
81214 +
81215 +static bool walk_struct(tree node)
81216 +{
81217 + tree field;
81218 +
81219 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
81220 + return false;
81221 +
81222 + if (TYPE_FIELDS(node) == NULL_TREE)
81223 + return false;
81224 +
81225 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
81226 + tree type = TREE_TYPE(field);
81227 + enum tree_code code = TREE_CODE(type);
81228 + if (code == RECORD_TYPE || code == UNION_TYPE) {
81229 + if (!(walk_struct(type)))
81230 + return false;
81231 + } else if (!is_fptr(field) && !TREE_READONLY(field))
81232 + return false;
81233 + }
81234 + return true;
81235 +}
81236 +
81237 +static void finish_type(void *event_data, void *data)
81238 +{
81239 + tree type = (tree)event_data;
81240 +
81241 + if (type == NULL_TREE)
81242 + return;
81243 +
81244 + if (TYPE_READONLY(type))
81245 + return;
81246 +
81247 + if (walk_struct(type))
81248 + constify_type(type);
81249 +}
81250 +
81251 +static unsigned int check_local_variables(void);
81252 +
81253 +struct gimple_opt_pass pass_local_variable = {
81254 + {
81255 + .type = GIMPLE_PASS,
81256 + .name = "check_local_variables",
81257 + .gate = NULL,
81258 + .execute = check_local_variables,
81259 + .sub = NULL,
81260 + .next = NULL,
81261 + .static_pass_number = 0,
81262 + .tv_id = TV_NONE,
81263 + .properties_required = 0,
81264 + .properties_provided = 0,
81265 + .properties_destroyed = 0,
81266 + .todo_flags_start = 0,
81267 + .todo_flags_finish = 0
81268 + }
81269 +};
81270 +
81271 +static unsigned int check_local_variables(void)
81272 +{
81273 + tree var;
81274 + referenced_var_iterator rvi;
81275 +
81276 +#if BUILDING_GCC_VERSION == 4005
81277 + FOR_EACH_REFERENCED_VAR(var, rvi) {
81278 +#else
81279 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
81280 +#endif
81281 + tree type = TREE_TYPE(var);
81282 +
81283 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
81284 + continue;
81285 +
81286 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
81287 + continue;
81288 +
81289 + if (!TYPE_READONLY(type))
81290 + continue;
81291 +
81292 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
81293 +// continue;
81294 +
81295 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
81296 +// continue;
81297 +
81298 + if (walk_struct(type)) {
81299 + error_at(DECL_SOURCE_LOCATION(var), "constified variable %qE cannot be local", var);
81300 + return 1;
81301 + }
81302 + }
81303 + return 0;
81304 +}
81305 +
81306 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
81307 +{
81308 + const char * const plugin_name = plugin_info->base_name;
81309 + const int argc = plugin_info->argc;
81310 + const struct plugin_argument * const argv = plugin_info->argv;
81311 + int i;
81312 + bool constify = true;
81313 +
81314 + struct register_pass_info local_variable_pass_info = {
81315 + .pass = &pass_local_variable.pass,
81316 + .reference_pass_name = "*referenced_vars",
81317 + .ref_pass_instance_number = 0,
81318 + .pos_op = PASS_POS_INSERT_AFTER
81319 + };
81320 +
81321 + if (!plugin_default_version_check(version, &gcc_version)) {
81322 + error(G_("incompatible gcc/plugin versions"));
81323 + return 1;
81324 + }
81325 +
81326 + for (i = 0; i < argc; ++i) {
81327 + if (!(strcmp(argv[i].key, "no-constify"))) {
81328 + constify = false;
81329 + continue;
81330 + }
81331 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
81332 + }
81333 +
81334 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
81335 + if (constify) {
81336 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
81337 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
81338 + }
81339 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
81340 +
81341 + return 0;
81342 +}
81343 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
81344 new file mode 100644
81345 index 0000000..a5eabce
81346 --- /dev/null
81347 +++ b/tools/gcc/kallocstat_plugin.c
81348 @@ -0,0 +1,167 @@
81349 +/*
81350 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
81351 + * Licensed under the GPL v2
81352 + *
81353 + * Note: the choice of the license means that the compilation process is
81354 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
81355 + * but for the kernel it doesn't matter since it doesn't link against
81356 + * any of the gcc libraries
81357 + *
81358 + * gcc plugin to find the distribution of k*alloc sizes
81359 + *
81360 + * TODO:
81361 + *
81362 + * BUGS:
81363 + * - none known
81364 + */
81365 +#include "gcc-plugin.h"
81366 +#include "config.h"
81367 +#include "system.h"
81368 +#include "coretypes.h"
81369 +#include "tree.h"
81370 +#include "tree-pass.h"
81371 +#include "flags.h"
81372 +#include "intl.h"
81373 +#include "toplev.h"
81374 +#include "plugin.h"
81375 +//#include "expr.h" where are you...
81376 +#include "diagnostic.h"
81377 +#include "plugin-version.h"
81378 +#include "tm.h"
81379 +#include "function.h"
81380 +#include "basic-block.h"
81381 +#include "gimple.h"
81382 +#include "rtl.h"
81383 +#include "emit-rtl.h"
81384 +
81385 +extern void print_gimple_stmt(FILE *, gimple, int, int);
81386 +
81387 +int plugin_is_GPL_compatible;
81388 +
81389 +static const char * const kalloc_functions[] = {
81390 + "__kmalloc",
81391 + "kmalloc",
81392 + "kmalloc_large",
81393 + "kmalloc_node",
81394 + "kmalloc_order",
81395 + "kmalloc_order_trace",
81396 + "kmalloc_slab",
81397 + "kzalloc",
81398 + "kzalloc_node",
81399 +};
81400 +
81401 +static struct plugin_info kallocstat_plugin_info = {
81402 + .version = "201111150100",
81403 +};
81404 +
81405 +static unsigned int execute_kallocstat(void);
81406 +
81407 +static struct gimple_opt_pass kallocstat_pass = {
81408 + .pass = {
81409 + .type = GIMPLE_PASS,
81410 + .name = "kallocstat",
81411 + .gate = NULL,
81412 + .execute = execute_kallocstat,
81413 + .sub = NULL,
81414 + .next = NULL,
81415 + .static_pass_number = 0,
81416 + .tv_id = TV_NONE,
81417 + .properties_required = 0,
81418 + .properties_provided = 0,
81419 + .properties_destroyed = 0,
81420 + .todo_flags_start = 0,
81421 + .todo_flags_finish = 0
81422 + }
81423 +};
81424 +
81425 +static bool is_kalloc(const char *fnname)
81426 +{
81427 + size_t i;
81428 +
81429 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
81430 + if (!strcmp(fnname, kalloc_functions[i]))
81431 + return true;
81432 + return false;
81433 +}
81434 +
81435 +static unsigned int execute_kallocstat(void)
81436 +{
81437 + basic_block bb;
81438 +
81439 + // 1. loop through BBs and GIMPLE statements
81440 + FOR_EACH_BB(bb) {
81441 + gimple_stmt_iterator gsi;
81442 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
81443 + // gimple match:
81444 + tree fndecl, size;
81445 + gimple call_stmt;
81446 + const char *fnname;
81447 +
81448 + // is it a call
81449 + call_stmt = gsi_stmt(gsi);
81450 + if (!is_gimple_call(call_stmt))
81451 + continue;
81452 + fndecl = gimple_call_fndecl(call_stmt);
81453 + if (fndecl == NULL_TREE)
81454 + continue;
81455 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
81456 + continue;
81457 +
81458 + // is it a call to k*alloc
81459 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
81460 + if (!is_kalloc(fnname))
81461 + continue;
81462 +
81463 + // is the size arg the result of a simple const assignment
81464 + size = gimple_call_arg(call_stmt, 0);
81465 + while (true) {
81466 + gimple def_stmt;
81467 + expanded_location xloc;
81468 + size_t size_val;
81469 +
81470 + if (TREE_CODE(size) != SSA_NAME)
81471 + break;
81472 + def_stmt = SSA_NAME_DEF_STMT(size);
81473 + if (!def_stmt || !is_gimple_assign(def_stmt))
81474 + break;
81475 + if (gimple_num_ops(def_stmt) != 2)
81476 + break;
81477 + size = gimple_assign_rhs1(def_stmt);
81478 + if (!TREE_CONSTANT(size))
81479 + continue;
81480 + xloc = expand_location(gimple_location(def_stmt));
81481 + if (!xloc.file)
81482 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
81483 + size_val = TREE_INT_CST_LOW(size);
81484 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
81485 + break;
81486 + }
81487 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
81488 +//debug_tree(gimple_call_fn(call_stmt));
81489 +//print_node(stderr, "pax", fndecl, 4);
81490 + }
81491 + }
81492 +
81493 + return 0;
81494 +}
81495 +
81496 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
81497 +{
81498 + const char * const plugin_name = plugin_info->base_name;
81499 + struct register_pass_info kallocstat_pass_info = {
81500 + .pass = &kallocstat_pass.pass,
81501 + .reference_pass_name = "ssa",
81502 + .ref_pass_instance_number = 0,
81503 + .pos_op = PASS_POS_INSERT_AFTER
81504 + };
81505 +
81506 + if (!plugin_default_version_check(version, &gcc_version)) {
81507 + error(G_("incompatible gcc/plugin versions"));
81508 + return 1;
81509 + }
81510 +
81511 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
81512 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
81513 +
81514 + return 0;
81515 +}
81516 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
81517 new file mode 100644
81518 index 0000000..d8a8da2
81519 --- /dev/null
81520 +++ b/tools/gcc/kernexec_plugin.c
81521 @@ -0,0 +1,427 @@
81522 +/*
81523 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
81524 + * Licensed under the GPL v2
81525 + *
81526 + * Note: the choice of the license means that the compilation process is
81527 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
81528 + * but for the kernel it doesn't matter since it doesn't link against
81529 + * any of the gcc libraries
81530 + *
81531 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
81532 + *
81533 + * TODO:
81534 + *
81535 + * BUGS:
81536 + * - none known
81537 + */
81538 +#include "gcc-plugin.h"
81539 +#include "config.h"
81540 +#include "system.h"
81541 +#include "coretypes.h"
81542 +#include "tree.h"
81543 +#include "tree-pass.h"
81544 +#include "flags.h"
81545 +#include "intl.h"
81546 +#include "toplev.h"
81547 +#include "plugin.h"
81548 +//#include "expr.h" where are you...
81549 +#include "diagnostic.h"
81550 +#include "plugin-version.h"
81551 +#include "tm.h"
81552 +#include "function.h"
81553 +#include "basic-block.h"
81554 +#include "gimple.h"
81555 +#include "rtl.h"
81556 +#include "emit-rtl.h"
81557 +#include "tree-flow.h"
81558 +
81559 +extern void print_gimple_stmt(FILE *, gimple, int, int);
81560 +extern rtx emit_move_insn(rtx x, rtx y);
81561 +
81562 +int plugin_is_GPL_compatible;
81563 +
81564 +static struct plugin_info kernexec_plugin_info = {
81565 + .version = "201111291120",
81566 + .help = "method=[bts|or]\tinstrumentation method\n"
81567 +};
81568 +
81569 +static unsigned int execute_kernexec_reload(void);
81570 +static unsigned int execute_kernexec_fptr(void);
81571 +static unsigned int execute_kernexec_retaddr(void);
81572 +static bool kernexec_cmodel_check(void);
81573 +
81574 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *);
81575 +static void (*kernexec_instrument_retaddr)(rtx);
81576 +
81577 +static struct gimple_opt_pass kernexec_reload_pass = {
81578 + .pass = {
81579 + .type = GIMPLE_PASS,
81580 + .name = "kernexec_reload",
81581 + .gate = kernexec_cmodel_check,
81582 + .execute = execute_kernexec_reload,
81583 + .sub = NULL,
81584 + .next = NULL,
81585 + .static_pass_number = 0,
81586 + .tv_id = TV_NONE,
81587 + .properties_required = 0,
81588 + .properties_provided = 0,
81589 + .properties_destroyed = 0,
81590 + .todo_flags_start = 0,
81591 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
81592 + }
81593 +};
81594 +
81595 +static struct gimple_opt_pass kernexec_fptr_pass = {
81596 + .pass = {
81597 + .type = GIMPLE_PASS,
81598 + .name = "kernexec_fptr",
81599 + .gate = kernexec_cmodel_check,
81600 + .execute = execute_kernexec_fptr,
81601 + .sub = NULL,
81602 + .next = NULL,
81603 + .static_pass_number = 0,
81604 + .tv_id = TV_NONE,
81605 + .properties_required = 0,
81606 + .properties_provided = 0,
81607 + .properties_destroyed = 0,
81608 + .todo_flags_start = 0,
81609 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
81610 + }
81611 +};
81612 +
81613 +static struct rtl_opt_pass kernexec_retaddr_pass = {
81614 + .pass = {
81615 + .type = RTL_PASS,
81616 + .name = "kernexec_retaddr",
81617 + .gate = kernexec_cmodel_check,
81618 + .execute = execute_kernexec_retaddr,
81619 + .sub = NULL,
81620 + .next = NULL,
81621 + .static_pass_number = 0,
81622 + .tv_id = TV_NONE,
81623 + .properties_required = 0,
81624 + .properties_provided = 0,
81625 + .properties_destroyed = 0,
81626 + .todo_flags_start = 0,
81627 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
81628 + }
81629 +};
81630 +
81631 +static bool kernexec_cmodel_check(void)
81632 +{
81633 + tree section;
81634 +
81635 + if (ix86_cmodel != CM_KERNEL)
81636 + return false;
81637 +
81638 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
81639 + if (!section || !TREE_VALUE(section))
81640 + return true;
81641 +
81642 + section = TREE_VALUE(TREE_VALUE(section));
81643 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
81644 + return true;
81645 +
81646 + return false;
81647 +}
81648 +
81649 +/*
81650 + * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered
81651 + */
81652 +static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
81653 +{
81654 + gimple asm_movabs_stmt;
81655 +
81656 + // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : );
81657 + asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL);
81658 + gimple_asm_set_volatile(asm_movabs_stmt, true);
81659 + gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
81660 + update_stmt(asm_movabs_stmt);
81661 +}
81662 +
81663 +/*
81664 + * find all asm() stmts that clobber r10 and add a reload of r10
81665 + */
81666 +static unsigned int execute_kernexec_reload(void)
81667 +{
81668 + basic_block bb;
81669 +
81670 + // 1. loop through BBs and GIMPLE statements
81671 + FOR_EACH_BB(bb) {
81672 + gimple_stmt_iterator gsi;
81673 +
81674 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
81675 + // gimple match: __asm__ ("" : : : "r10");
81676 + gimple asm_stmt;
81677 + size_t nclobbers;
81678 +
81679 + // is it an asm ...
81680 + asm_stmt = gsi_stmt(gsi);
81681 + if (gimple_code(asm_stmt) != GIMPLE_ASM)
81682 + continue;
81683 +
81684 + // ... clobbering r10
81685 + nclobbers = gimple_asm_nclobbers(asm_stmt);
81686 + while (nclobbers--) {
81687 + tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
81688 + if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10"))
81689 + continue;
81690 + kernexec_reload_fptr_mask(&gsi);
81691 +//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
81692 + break;
81693 + }
81694 + }
81695 + }
81696 +
81697 + return 0;
81698 +}
81699 +
81700 +/*
81701 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
81702 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
81703 + */
81704 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi)
81705 +{
81706 + gimple assign_intptr, assign_new_fptr, call_stmt;
81707 + tree intptr, old_fptr, new_fptr, kernexec_mask;
81708 +
81709 + call_stmt = gsi_stmt(*gsi);
81710 + old_fptr = gimple_call_fn(call_stmt);
81711 +
81712 + // create temporary unsigned long variable used for bitops and cast fptr to it
81713 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
81714 + add_referenced_var(intptr);
81715 + mark_sym_for_renaming(intptr);
81716 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
81717 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
81718 + update_stmt(assign_intptr);
81719 +
81720 + // apply logical or to temporary unsigned long and bitmask
81721 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
81722 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
81723 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
81724 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
81725 + update_stmt(assign_intptr);
81726 +
81727 + // cast temporary unsigned long back to a temporary fptr variable
81728 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_fptr");
81729 + add_referenced_var(new_fptr);
81730 + mark_sym_for_renaming(new_fptr);
81731 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
81732 + gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT);
81733 + update_stmt(assign_new_fptr);
81734 +
81735 + // replace call stmt fn with the new fptr
81736 + gimple_call_set_fn(call_stmt, new_fptr);
81737 + update_stmt(call_stmt);
81738 +}
81739 +
81740 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi)
81741 +{
81742 + gimple asm_or_stmt, call_stmt;
81743 + tree old_fptr, new_fptr, input, output;
81744 + VEC(tree, gc) *inputs = NULL;
81745 + VEC(tree, gc) *outputs = NULL;
81746 +
81747 + call_stmt = gsi_stmt(*gsi);
81748 + old_fptr = gimple_call_fn(call_stmt);
81749 +
81750 + // create temporary fptr variable
81751 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
81752 + add_referenced_var(new_fptr);
81753 + mark_sym_for_renaming(new_fptr);
81754 +
81755 + // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
81756 + input = build_tree_list(NULL_TREE, build_string(2, "0"));
81757 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
81758 + output = build_tree_list(NULL_TREE, build_string(3, "=r"));
81759 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
81760 + VEC_safe_push(tree, gc, inputs, input);
81761 + VEC_safe_push(tree, gc, outputs, output);
81762 + asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
81763 + gimple_asm_set_volatile(asm_or_stmt, true);
81764 + gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
81765 + update_stmt(asm_or_stmt);
81766 +
81767 + // replace call stmt fn with the new fptr
81768 + gimple_call_set_fn(call_stmt, new_fptr);
81769 + update_stmt(call_stmt);
81770 +}
81771 +
81772 +/*
81773 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
81774 + */
81775 +static unsigned int execute_kernexec_fptr(void)
81776 +{
81777 + basic_block bb;
81778 +
81779 + // 1. loop through BBs and GIMPLE statements
81780 + FOR_EACH_BB(bb) {
81781 + gimple_stmt_iterator gsi;
81782 +
81783 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
81784 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
81785 + tree fn;
81786 + gimple call_stmt;
81787 +
81788 + // is it a call ...
81789 + call_stmt = gsi_stmt(gsi);
81790 + if (!is_gimple_call(call_stmt))
81791 + continue;
81792 + fn = gimple_call_fn(call_stmt);
81793 + if (TREE_CODE(fn) == ADDR_EXPR)
81794 + continue;
81795 + if (TREE_CODE(fn) != SSA_NAME)
81796 + gcc_unreachable();
81797 +
81798 + // ... through a function pointer
81799 + fn = SSA_NAME_VAR(fn);
81800 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
81801 + continue;
81802 + fn = TREE_TYPE(fn);
81803 + if (TREE_CODE(fn) != POINTER_TYPE)
81804 + continue;
81805 + fn = TREE_TYPE(fn);
81806 + if (TREE_CODE(fn) != FUNCTION_TYPE)
81807 + continue;
81808 +
81809 + kernexec_instrument_fptr(&gsi);
81810 +
81811 +//debug_tree(gimple_call_fn(call_stmt));
81812 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
81813 + }
81814 + }
81815 +
81816 + return 0;
81817 +}
81818 +
81819 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
81820 +static void kernexec_instrument_retaddr_bts(rtx insn)
81821 +{
81822 + rtx btsq;
81823 + rtvec argvec, constraintvec, labelvec;
81824 + int line;
81825 +
81826 + // create asm volatile("btsq $63,(%%rsp)":::)
81827 + argvec = rtvec_alloc(0);
81828 + constraintvec = rtvec_alloc(0);
81829 + labelvec = rtvec_alloc(0);
81830 + line = expand_location(RTL_LOCATION(insn)).line;
81831 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
81832 + MEM_VOLATILE_P(btsq) = 1;
81833 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
81834 + emit_insn_before(btsq, insn);
81835 +}
81836 +
81837 +// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
81838 +static void kernexec_instrument_retaddr_or(rtx insn)
81839 +{
81840 + rtx orq;
81841 + rtvec argvec, constraintvec, labelvec;
81842 + int line;
81843 +
81844 + // create asm volatile("orq %%r10,(%%rsp)":::)
81845 + argvec = rtvec_alloc(0);
81846 + constraintvec = rtvec_alloc(0);
81847 + labelvec = rtvec_alloc(0);
81848 + line = expand_location(RTL_LOCATION(insn)).line;
81849 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
81850 + MEM_VOLATILE_P(orq) = 1;
81851 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
81852 + emit_insn_before(orq, insn);
81853 +}
81854 +
81855 +/*
81856 + * find all asm level function returns and forcibly set the highest bit of the return address
81857 + */
81858 +static unsigned int execute_kernexec_retaddr(void)
81859 +{
81860 + rtx insn;
81861 +
81862 + // 1. find function returns
81863 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
81864 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
81865 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
81866 + rtx body;
81867 +
81868 + // is it a retn
81869 + if (!JUMP_P(insn))
81870 + continue;
81871 + body = PATTERN(insn);
81872 + if (GET_CODE(body) == PARALLEL)
81873 + body = XVECEXP(body, 0, 0);
81874 + if (GET_CODE(body) != RETURN)
81875 + continue;
81876 + kernexec_instrument_retaddr(insn);
81877 + }
81878 +
81879 +// print_simple_rtl(stderr, get_insns());
81880 +// print_rtl(stderr, get_insns());
81881 +
81882 + return 0;
81883 +}
81884 +
81885 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
81886 +{
81887 + const char * const plugin_name = plugin_info->base_name;
81888 + const int argc = plugin_info->argc;
81889 + const struct plugin_argument * const argv = plugin_info->argv;
81890 + int i;
81891 + struct register_pass_info kernexec_reload_pass_info = {
81892 + .pass = &kernexec_reload_pass.pass,
81893 + .reference_pass_name = "ssa",
81894 + .ref_pass_instance_number = 0,
81895 + .pos_op = PASS_POS_INSERT_AFTER
81896 + };
81897 + struct register_pass_info kernexec_fptr_pass_info = {
81898 + .pass = &kernexec_fptr_pass.pass,
81899 + .reference_pass_name = "ssa",
81900 + .ref_pass_instance_number = 0,
81901 + .pos_op = PASS_POS_INSERT_AFTER
81902 + };
81903 + struct register_pass_info kernexec_retaddr_pass_info = {
81904 + .pass = &kernexec_retaddr_pass.pass,
81905 + .reference_pass_name = "pro_and_epilogue",
81906 + .ref_pass_instance_number = 0,
81907 + .pos_op = PASS_POS_INSERT_AFTER
81908 + };
81909 +
81910 + if (!plugin_default_version_check(version, &gcc_version)) {
81911 + error(G_("incompatible gcc/plugin versions"));
81912 + return 1;
81913 + }
81914 +
81915 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
81916 +
81917 + if (TARGET_64BIT == 0)
81918 + return 0;
81919 +
81920 + for (i = 0; i < argc; ++i) {
81921 + if (!strcmp(argv[i].key, "method")) {
81922 + if (!argv[i].value) {
81923 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
81924 + continue;
81925 + }
81926 + if (!strcmp(argv[i].value, "bts")) {
81927 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
81928 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
81929 + } else if (!strcmp(argv[i].value, "or")) {
81930 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
81931 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
81932 + fix_register("r10", 1, 1);
81933 + } else
81934 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
81935 + continue;
81936 + }
81937 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
81938 + }
81939 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
81940 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
81941 +
81942 + if (kernexec_instrument_fptr == kernexec_instrument_fptr_or)
81943 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info);
81944 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
81945 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
81946 +
81947 + return 0;
81948 +}
81949 diff --git a/tools/gcc/size_overflow_hash1.h b/tools/gcc/size_overflow_hash1.h
81950 new file mode 100644
81951 index 0000000..16ccac1
81952 --- /dev/null
81953 +++ b/tools/gcc/size_overflow_hash1.h
81954 @@ -0,0 +1,3047 @@
81955 +struct size_overflow_hash size_overflow_hash1[65536] = {
81956 + [10013].file = "security/smack/smackfs.c",
81957 + [10013].name = "smk_write_direct",
81958 + [10013].param3 = 1,
81959 + [10167].file = "sound/core/oss/pcm_plugin.c",
81960 + [10167].name = "snd_pcm_plugin_build",
81961 + [10167].param5 = 1,
81962 + [1020].file = "drivers/usb/misc/usbtest.c",
81963 + [1020].name = "test_unaligned_bulk",
81964 + [1020].param3 = 1,
81965 + [1022].file = "sound/pci/rme9652/rme9652.c",
81966 + [1022].name = "snd_rme9652_playback_copy",
81967 + [1022].param5 = 1,
81968 + [10321].file = "drivers/platform/x86/thinkpad_acpi.c",
81969 + [10321].name = "create_attr_set",
81970 + [10321].param1 = 1,
81971 + [10341].file = "fs/nfsd/nfs4xdr.c",
81972 + [10341].name = "read_buf",
81973 + [10341].param2 = 1,
81974 + [10357].file = "net/sunrpc/cache.c",
81975 + [10357].name = "cache_read",
81976 + [10357].param3 = 1,
81977 + [10397].file = "drivers/gpu/drm/i915/i915_debugfs.c",
81978 + [10397].name = "i915_wedged_write",
81979 + [10397].param3 = 1,
81980 + [10399].file = "kernel/trace/trace.c",
81981 + [10399].name = "trace_seq_to_user",
81982 + [10399].param3 = 1,
81983 + [10414].file = "drivers/tty/vt/vt.c",
81984 + [10414].name = "vc_do_resize",
81985 + [10414].param3 = 1,
81986 + [10414].param4 = 1,
81987 + [10565].file = "drivers/input/touchscreen/ad7879-spi.c",
81988 + [10565].name = "ad7879_spi_multi_read",
81989 + [10565].param3 = 1,
81990 + [10623].file = "drivers/infiniband/core/user_mad.c",
81991 + [10623].name = "ib_umad_write",
81992 + [10623].param3 = 1,
81993 + [10707].file = "fs/nfs/idmap.c",
81994 + [10707].name = "nfs_idmap_request_key",
81995 + [10707].param2 = 1,
81996 + [1073].file = "drivers/block/aoe/aoecmd.c",
81997 + [1073].name = "addtgt",
81998 + [1073].param3 = 1,
81999 + [10745].file = "fs/cifs/connect.c",
82000 + [10745].name = "get_server_iovec",
82001 + [10745].param2 = 1,
82002 + [10750].file = "drivers/net/wireless/iwmc3200wifi/rx.c",
82003 + [10750].name = "iwm_ntf_calib_res",
82004 + [10750].param3 = 1,
82005 + [10773].file = "drivers/input/mousedev.c",
82006 + [10773].name = "mousedev_read",
82007 + [10773].param3 = 1,
82008 + [10777].file = "fs/ntfs/file.c",
82009 + [10777].name = "ntfs_file_buffered_write",
82010 + [10777].param6 = 1,
82011 + [10893].file = "drivers/misc/sgi-gru/gruprocfs.c",
82012 + [10893].name = "options_write",
82013 + [10893].param3 = 1,
82014 + [10919].file = "net/ipv4/netfilter/arp_tables.c",
82015 + [10919].name = "do_arpt_set_ctl",
82016 + [10919].param4 = 1,
82017 + [1107].file = "mm/process_vm_access.c",
82018 + [1107].name = "process_vm_rw_single_vec",
82019 + [1107].param1 = 1,
82020 + [1107].param2 = 1,
82021 + [11230].file = "net/core/neighbour.c",
82022 + [11230].name = "neigh_hash_grow",
82023 + [11230].param2 = 1,
82024 + [11364].file = "fs/ext4/super.c",
82025 + [11364].name = "ext4_kvzalloc",
82026 + [11364].param1 = 1,
82027 + [114].file = "security/selinux/selinuxfs.c",
82028 + [114].name = "sel_write_relabel",
82029 + [114].param3 = 1,
82030 + [11549].file = "drivers/media/rc/redrat3.c",
82031 + [11549].name = "redrat3_transmit_ir",
82032 + [11549].param3 = 1,
82033 + [11568].file = "drivers/gpu/drm/drm_scatter.c",
82034 + [11568].name = "drm_vmalloc_dma",
82035 + [11568].param1 = 1,
82036 + [11582].file = "drivers/scsi/lpfc/lpfc_sli.c",
82037 + [11582].name = "lpfc_sli4_queue_alloc",
82038 + [11582].param3 = 1,
82039 + [11616].file = "security/selinux/selinuxfs.c",
82040 + [11616].name = "sel_write_enforce",
82041 + [11616].param3 = 1,
82042 + [11699].file = "drivers/net/ethernet/neterion/vxge/vxge-config.h",
82043 + [11699].name = "vxge_os_dma_malloc",
82044 + [11699].param2 = 1,
82045 + [11766].file = "drivers/block/paride/pt.c",
82046 + [11766].name = "pt_read",
82047 + [11766].param3 = 1,
82048 + [11784].file = "fs/bio.c",
82049 + [11784].name = "bio_kmalloc",
82050 + [11784].param2 = 1,
82051 + [11919].file = "drivers/lguest/core.c",
82052 + [11919].name = "__lgread",
82053 + [11919].param4 = 1,
82054 + [11925].file = "drivers/media/video/cx18/cx18-fileops.c",
82055 + [11925].name = "cx18_copy_mdl_to_user",
82056 + [11925].param4 = 1,
82057 + [11985].file = "drivers/block/floppy.c",
82058 + [11985].name = "fd_copyin",
82059 + [11985].param3 = 1,
82060 + [11986].file = "drivers/net/usb/asix.c",
82061 + [11986].name = "asix_read_cmd",
82062 + [11986].param5 = 1,
82063 + [12018].file = "sound/core/oss/pcm_oss.c",
82064 + [12018].name = "snd_pcm_oss_read1",
82065 + [12018].param3 = 1,
82066 + [12059].file = "drivers/net/wireless/libertas/debugfs.c",
82067 + [12059].name = "lbs_debugfs_write",
82068 + [12059].param3 = 1,
82069 + [12151].file = "fs/compat.c",
82070 + [12151].name = "compat_rw_copy_check_uvector",
82071 + [12151].param3 = 1,
82072 + [12205].file = "fs/reiserfs/journal.c",
82073 + [12205].name = "reiserfs_allocate_list_bitmaps",
82074 + [12205].param3 = 1,
82075 + [12234].file = "include/acpi/platform/aclinux.h",
82076 + [12234].name = "acpi_os_allocate",
82077 + [12234].param1 = 1,
82078 + [1227].file = "lib/cpu_rmap.c",
82079 + [1227].name = "alloc_cpu_rmap",
82080 + [1227].param1 = 1,
82081 + [12395].file = "drivers/char/hw_random/core.c",
82082 + [12395].name = "rng_dev_read",
82083 + [12395].param3 = 1,
82084 + [12602].file = "net/sunrpc/cache.c",
82085 + [12602].name = "cache_downcall",
82086 + [12602].param3 = 1,
82087 + [12712].file = "drivers/net/wimax/i2400m/fw.c",
82088 + [12712].name = "i2400m_zrealloc_2x",
82089 + [12712].param3 = 1,
82090 + [12755].file = "sound/drivers/opl4/opl4_proc.c",
82091 + [12755].name = "snd_opl4_mem_proc_read",
82092 + [12755].param5 = 1,
82093 + [12833].file = "net/sctp/auth.c",
82094 + [12833].name = "sctp_auth_create_key",
82095 + [12833].param1 = 1,
82096 + [12840].file = "net/sctp/tsnmap.c",
82097 + [12840].name = "sctp_tsnmap_mark",
82098 + [12840].param2 = 1,
82099 + [12931].file = "drivers/hid/hid-roccat.c",
82100 + [12931].name = "roccat_read",
82101 + [12931].param3 = 1,
82102 + [12954].file = "fs/proc/base.c",
82103 + [12954].name = "oom_adjust_write",
82104 + [12954].param3 = 1,
82105 + [13103].file = "drivers/acpi/acpica/utobject.c",
82106 + [13103].name = "acpi_ut_create_string_object",
82107 + [13103].param1 = 1,
82108 + [13121].file = "net/ipv4/ip_sockglue.c",
82109 + [13121].name = "do_ip_setsockopt",
82110 + [13121].param5 = 1,
82111 + [1327].file = "net/netfilter/nfnetlink_log.c",
82112 + [1327].name = "nfulnl_alloc_skb",
82113 + [1327].param2 = 1,
82114 + [13337].file = "net/core/iovec.c",
82115 + [13337].name = "csum_partial_copy_fromiovecend",
82116 + [13337].param4 = 1,
82117 + [13339].file = "security/smack/smackfs.c",
82118 + [13339].name = "smk_write_netlbladdr",
82119 + [13339].param3 = 1,
82120 + [13342].file = "fs/jbd2/journal.c",
82121 + [13342].name = "jbd2_alloc",
82122 + [13342].param1 = 1,
82123 + [13384].file = "drivers/char/virtio_console.c",
82124 + [13384].name = "alloc_buf",
82125 + [13384].param1 = 1,
82126 + [13412].file = "fs/proc/base.c",
82127 + [13412].name = "oom_score_adj_write",
82128 + [13412].param3 = 1,
82129 + [13559].file = "drivers/media/video/ivtv/ivtv-fileops.c",
82130 + [13559].name = "ivtv_read",
82131 + [13559].param3 = 1,
82132 + [13618].file = "drivers/net/team/team.c",
82133 + [13618].name = "team_options_register",
82134 + [13618].param3 = 1,
82135 + [13659].file = "drivers/net/wan/hdlc.c",
82136 + [13659].name = "attach_hdlc_protocol",
82137 + [13659].param3 = 1,
82138 + [13708].file = "drivers/usb/misc/usbtest.c",
82139 + [13708].name = "simple_alloc_urb",
82140 + [13708].param3 = 1,
82141 + [13805].file = "drivers/misc/altera-stapl/altera-jtag.c",
82142 + [13805].name = "altera_swap_dr",
82143 + [13805].param2 = 1,
82144 + [13868].file = "fs/lockd/mon.c",
82145 + [13868].name = "nsm_create_handle",
82146 + [13868].param4 = 1,
82147 + [13924].file = "net/ipv4/netfilter/ip_tables.c",
82148 + [13924].name = "do_ipt_set_ctl",
82149 + [13924].param4 = 1,
82150 + [14019].file = "net/dns_resolver/dns_key.c",
82151 + [14019].name = "dns_resolver_instantiate",
82152 + [14019].param2 = 1,
82153 + [14019].param3 = 1,
82154 + [14025].file = "net/ax25/af_ax25.c",
82155 + [14025].name = "ax25_setsockopt",
82156 + [14025].param5 = 1,
82157 + [14029].file = "drivers/spi/spidev.c",
82158 + [14029].name = "spidev_compat_ioctl",
82159 + [14029].param2 = 1,
82160 + [14090].file = "drivers/bluetooth/btmrvl_debugfs.c",
82161 + [14090].name = "btmrvl_hsmode_write",
82162 + [14090].param3 = 1,
82163 + [14149].file = "drivers/hid/hidraw.c",
82164 + [14149].name = "hidraw_ioctl",
82165 + [14149].param2 = 1,
82166 + [14153].file = "drivers/staging/bcm/led_control.c",
82167 + [14153].name = "ValidateDSDParamsChecksum",
82168 + [14153].param3 = 1,
82169 + [14174].file = "sound/pci/es1938.c",
82170 + [14174].name = "snd_es1938_capture_copy",
82171 + [14174].param5 = 1,
82172 + [14207].file = "drivers/media/video/v4l2-event.c",
82173 + [14207].name = "v4l2_event_subscribe",
82174 + [14207].param3 = 1,
82175 + [14241].file = "drivers/platform/x86/asus_acpi.c",
82176 + [14241].name = "brn_proc_write",
82177 + [14241].param3 = 1,
82178 + [14345].file = "fs/cachefiles/daemon.c",
82179 + [14345].name = "cachefiles_daemon_write",
82180 + [14345].param3 = 1,
82181 + [14347].file = "drivers/media/dvb/dvb-core/dvb_ca_en50221.c",
82182 + [14347].name = "dvb_ca_en50221_io_write",
82183 + [14347].param3 = 1,
82184 + [14566].file = "drivers/pci/hotplug/ibmphp_ebda.c",
82185 + [14566].name = "alloc_ebda_hpc",
82186 + [14566].param1 = 1,
82187 + [14566].param2 = 1,
82188 + [1458].file = "drivers/misc/lkdtm.c",
82189 + [1458].name = "direct_entry",
82190 + [1458].param3 = 1,
82191 + [14646].file = "fs/compat.c",
82192 + [14646].name = "compat_writev",
82193 + [14646].param3 = 1,
82194 + [14684].file = "drivers/media/video/stk-webcam.c",
82195 + [14684].name = "stk_allocate_buffers",
82196 + [14684].param2 = 1,
82197 + [14736].file = "drivers/usb/misc/usbtest.c",
82198 + [14736].name = "unlink_queued",
82199 + [14736].param3 = 1,
82200 + [1482].file = "drivers/scsi/scsi_netlink.c",
82201 + [1482].name = "scsi_nl_send_vendor_msg",
82202 + [1482].param5 = 1,
82203 + [15017].file = "drivers/edac/edac_device.c",
82204 + [15017].name = "edac_device_alloc_ctl_info",
82205 + [15017].param1 = 1,
82206 + [15044].file = "drivers/uio/uio.c",
82207 + [15044].name = "uio_write",
82208 + [15044].param3 = 1,
82209 + [15087].file = "fs/bio.c",
82210 + [15087].name = "bio_map_kern",
82211 + [15087].param2 = 1,
82212 + [15087].param3 = 1,
82213 + [15112].file = "drivers/xen/evtchn.c",
82214 + [15112].name = "evtchn_write",
82215 + [15112].param3 = 1,
82216 + [15130].file = "net/bluetooth/hci_core.c",
82217 + [15130].name = "hci_send_cmd",
82218 + [15130].param3 = 1,
82219 + [15202].file = "net/bluetooth/rfcomm/tty.c",
82220 + [15202].name = "rfcomm_wmalloc",
82221 + [15202].param2 = 1,
82222 + [15274].file = "crypto/shash.c",
82223 + [15274].name = "crypto_shash_setkey",
82224 + [15274].param3 = 1,
82225 + [15354].file = "drivers/isdn/mISDN/socket.c",
82226 + [15354].name = "mISDN_sock_sendmsg",
82227 + [15354].param4 = 1,
82228 + [15361].file = "drivers/char/agp/generic.c",
82229 + [15361].name = "agp_allocate_memory",
82230 + [15361].param2 = 1,
82231 + [15497].file = "drivers/media/dvb/ddbridge/ddbridge-core.c",
82232 + [15497].name = "ts_read",
82233 + [15497].param3 = 1,
82234 + [15551].file = "net/ipv4/netfilter/ipt_CLUSTERIP.c",
82235 + [15551].name = "clusterip_proc_write",
82236 + [15551].param3 = 1,
82237 + [15701].file = "drivers/hid/hid-roccat-common.c",
82238 + [15701].name = "roccat_common_receive",
82239 + [15701].param4 = 1,
82240 + [1572].file = "net/ceph/pagevec.c",
82241 + [1572].name = "ceph_copy_page_vector_to_user",
82242 + [1572].param4 = 1,
82243 + [15814].file = "net/mac80211/debugfs_netdev.c",
82244 + [15814].name = "ieee80211_if_write",
82245 + [15814].param3 = 1,
82246 + [15883].file = "security/keys/keyctl.c",
82247 + [15883].name = "sys_add_key",
82248 + [15883].param4 = 1,
82249 + [15884].file = "fs/exofs/super.c",
82250 + [15884].name = "exofs_read_lookup_dev_table",
82251 + [15884].param3 = 1,
82252 + [16037].file = "drivers/staging/media/easycap/easycap_sound.c",
82253 + [16037].name = "easycap_alsa_vmalloc",
82254 + [16037].param2 = 1,
82255 + [16073].file = "net/sctp/socket.c",
82256 + [16073].name = "sctp_setsockopt",
82257 + [16073].param5 = 1,
82258 + [16132].file = "drivers/staging/vme/devices/vme_user.c",
82259 + [16132].name = "buffer_from_user",
82260 + [16132].param3 = 1,
82261 + [16138].file = "security/selinux/ss/services.c",
82262 + [16138].name = "security_context_to_sid_force",
82263 + [16138].param2 = 1,
82264 + [16166].file = "drivers/platform/x86/thinkpad_acpi.c",
82265 + [16166].name = "dispatch_proc_write",
82266 + [16166].param3 = 1,
82267 + [16229].file = "drivers/scsi/scsi_transport_iscsi.c",
82268 + [16229].name = "iscsi_offload_mesg",
82269 + [16229].param5 = 1,
82270 + [16353].file = "drivers/base/regmap/regmap.c",
82271 + [16353].name = "regmap_raw_write",
82272 + [16353].param4 = 1,
82273 + [16383].file = "fs/proc/base.c",
82274 + [16383].name = "comm_write",
82275 + [16383].param3 = 1,
82276 + [16396].file = "drivers/misc/altera-stapl/altera-jtag.c",
82277 + [16396].name = "altera_irscan",
82278 + [16396].param2 = 1,
82279 + [16447].file = "drivers/hid/usbhid/hiddev.c",
82280 + [16447].name = "hiddev_ioctl",
82281 + [16447].param2 = 1,
82282 + [16453].file = "include/linux/slab.h",
82283 + [16453].name = "kzalloc",
82284 + [16453].param1 = 1,
82285 + [16605].file = "fs/ecryptfs/miscdev.c",
82286 + [16605].name = "ecryptfs_send_miscdev",
82287 + [16605].param2 = 1,
82288 + [16606].file = "drivers/ide/ide-tape.c",
82289 + [16606].name = "idetape_chrdev_write",
82290 + [16606].param3 = 1,
82291 + [16637].file = "security/keys/encrypted-keys/encrypted.c",
82292 + [16637].name = "datablob_hmac_verify",
82293 + [16637].param4 = 1,
82294 + [16828].file = "net/batman-adv/hash.c",
82295 + [16828].name = "hash_new",
82296 + [16828].param1 = 1,
82297 + [16853].file = "drivers/net/ethernet/chelsio/cxgb4vf/sge.c",
82298 + [16853].name = "t4vf_pktgl_to_skb",
82299 + [16853].param2 = 1,
82300 + [16911].file = "drivers/media/dvb/ttpci/av7110_hw.c",
82301 + [16911].name = "LoadBitmap",
82302 + [16911].param2 = 1,
82303 + [169].file = "drivers/net/ethernet/amd/pcnet32.c",
82304 + [169].name = "pcnet32_realloc_rx_ring",
82305 + [169].param3 = 1,
82306 + [17075].file = "sound/isa/gus/gus_dram.c",
82307 + [17075].name = "snd_gus_dram_write",
82308 + [17075].param4 = 1,
82309 + [17133].file = "drivers/usb/misc/iowarrior.c",
82310 + [17133].name = "iowarrior_read",
82311 + [17133].param3 = 1,
82312 + [17185].file = "net/wireless/scan.c",
82313 + [17185].name = "cfg80211_inform_bss",
82314 + [17185].param8 = 1,
82315 + [17349].file = "net/tipc/link.c",
82316 + [17349].name = "tipc_link_send_sections_fast",
82317 + [17349].param4 = 1,
82318 + [17377].file = "drivers/usb/class/cdc-wdm.c",
82319 + [17377].name = "wdm_write",
82320 + [17377].param3 = 1,
82321 + [17459].file = "drivers/usb/misc/rio500.c",
82322 + [17459].name = "write_rio",
82323 + [17459].param3 = 1,
82324 + [17460].file = "fs/nfsd/nfscache.c",
82325 + [17460].name = "nfsd_cache_update",
82326 + [17460].param3 = 1,
82327 + [17492].file = "net/dccp/proto.c",
82328 + [17492].name = "do_dccp_setsockopt",
82329 + [17492].param5 = 1,
82330 + [1754].file = "sound/core/oss/pcm_oss.c",
82331 + [1754].name = "snd_pcm_oss_write",
82332 + [1754].param3 = 1,
82333 + [17604].file = "fs/proc/generic.c",
82334 + [17604].name = "__proc_file_read",
82335 + [17604].param3 = 1,
82336 + [17718].file = "net/caif/caif_socket.c",
82337 + [17718].name = "setsockopt",
82338 + [17718].param5 = 1,
82339 + [17828].file = "kernel/sched/core.c",
82340 + [17828].name = "sched_feat_write",
82341 + [17828].param3 = 1,
82342 + [17841].file = "drivers/misc/tifm_core.c",
82343 + [17841].name = "tifm_alloc_adapter",
82344 + [17841].param1 = 1,
82345 + [17946].file = "drivers/net/wireless/libertas/if_spi.c",
82346 + [17946].name = "if_spi_host_to_card",
82347 + [17946].param4 = 1,
82348 + [1800].file = "drivers/media/dvb/dvb-core/dmxdev.c",
82349 + [1800].name = "dvb_dvr_do_ioctl",
82350 + [1800].param3 = 1,
82351 + [18119].file = "drivers/misc/iwmc3200top/fw-download.c",
82352 + [18119].name = "iwmct_fw_parser_init",
82353 + [18119].param4 = 1,
82354 + [18140].file = "drivers/scsi/pm8001/pm8001_ctl.c",
82355 + [18140].name = "pm8001_store_update_fw",
82356 + [18140].param4 = 1,
82357 + [18191].file = "sound/pci/hda/patch_realtek.c",
82358 + [18191].name = "new_bind_ctl",
82359 + [18191].param2 = 1,
82360 + [18224].file = "drivers/xen/grant-table.c",
82361 + [18224].name = "gnttab_map",
82362 + [18224].param2 = 1,
82363 + [18232].file = "fs/nfs/write.c",
82364 + [18232].name = "nfs_writedata_alloc",
82365 + [18232].param1 = 1,
82366 + [18247].file = "drivers/char/agp/generic.c",
82367 + [18247].name = "agp_create_user_memory",
82368 + [18247].param1 = 1,
82369 + [18303].file = "fs/xattr.c",
82370 + [18303].name = "getxattr",
82371 + [18303].param4 = 1,
82372 + [18353].file = "net/rfkill/core.c",
82373 + [18353].name = "rfkill_fop_read",
82374 + [18353].param3 = 1,
82375 + [18386].file = "fs/read_write.c",
82376 + [18386].name = "vfs_readv",
82377 + [18386].param3 = 1,
82378 + [18391].file = "fs/ocfs2/stack_user.c",
82379 + [18391].name = "ocfs2_control_write",
82380 + [18391].param3 = 1,
82381 + [183].file = "crypto/ahash.c",
82382 + [183].name = "crypto_ahash_setkey",
82383 + [183].param3 = 1,
82384 + [18406].file = "drivers/media/video/tm6000/tm6000-core.c",
82385 + [18406].name = "tm6000_read_write_usb",
82386 + [18406].param7 = 1,
82387 + [1845].file = "drivers/net/wireless/rt2x00/rt2x00debug.c",
82388 + [1845].name = "rt2x00debug_write_rf",
82389 + [1845].param3 = 1,
82390 + [18465].file = "drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c",
82391 + [18465].name = "cxgb_alloc_mem",
82392 + [18465].param1 = 1,
82393 + [184].file = "drivers/firewire/nosy.c",
82394 + [184].name = "packet_buffer_init",
82395 + [184].param2 = 1,
82396 + [1858].file = "net/ipv6/netfilter/ip6_tables.c",
82397 + [1858].name = "do_ip6t_set_ctl",
82398 + [1858].param4 = 1,
82399 + [18659].file = "drivers/media/dvb/dvb-core/dvbdev.c",
82400 + [18659].name = "dvb_usercopy",
82401 + [18659].param2 = 1,
82402 + [18722].file = "security/tomoyo/condition.c",
82403 + [18722].name = "tomoyo_scan_bprm",
82404 + [18722].param2 = 1,
82405 + [18722].param4 = 1,
82406 + [18775].file = "include/linux/textsearch.h",
82407 + [18775].name = "alloc_ts_config",
82408 + [18775].param1 = 1,
82409 + [18940].file = "drivers/usb/host/hwa-hc.c",
82410 + [18940].name = "__hwahc_op_set_gtk",
82411 + [18940].param4 = 1,
82412 + [19012].file = "drivers/acpi/event.c",
82413 + [19012].name = "acpi_system_read_event",
82414 + [19012].param3 = 1,
82415 + [19028].file = "mm/filemap.c",
82416 + [19028].name = "iov_iter_copy_from_user_atomic",
82417 + [19028].param4 = 1,
82418 + [19107].file = "security/smack/smackfs.c",
82419 + [19107].name = "smk_write_load_list",
82420 + [19107].param3 = 1,
82421 + [19240].file = "net/sctp/socket.c",
82422 + [19240].name = "sctp_setsockopt_delayed_ack",
82423 + [19240].param3 = 1,
82424 + [19274].file = "net/core/pktgen.c",
82425 + [19274].name = "pktgen_if_write",
82426 + [19274].param3 = 1,
82427 + [19286].file = "drivers/base/regmap/regmap.c",
82428 + [19286].name = "_regmap_raw_write",
82429 + [19286].param4 = 1,
82430 + [19308].file = "drivers/char/mem.c",
82431 + [19308].name = "read_oldmem",
82432 + [19308].param3 = 1,
82433 + [19343].file = "security/keys/encrypted-keys/encrypted.c",
82434 + [19343].name = "datablob_hmac_append",
82435 + [19343].param3 = 1,
82436 + [19349].file = "drivers/acpi/acpica/utobject.c",
82437 + [19349].name = "acpi_ut_create_package_object",
82438 + [19349].param1 = 1,
82439 + [19453].file = "drivers/net/ethernet/chelsio/cxgb/sge.c",
82440 + [19453].name = "sge_rx",
82441 + [19453].param3 = 1,
82442 + [19504].file = "drivers/usb/serial/garmin_gps.c",
82443 + [19504].name = "pkt_add",
82444 + [19504].param3 = 1,
82445 + [19522].file = "mm/percpu.c",
82446 + [19522].name = "pcpu_mem_zalloc",
82447 + [19522].param1 = 1,
82448 + [19548].file = "drivers/scsi/qla2xxx/qla_init.c",
82449 + [19548].name = "qla2x00_get_ctx_sp",
82450 + [19548].param3 = 1,
82451 + [19592].file = "net/dccp/proto.c",
82452 + [19592].name = "dccp_setsockopt_service",
82453 + [19592].param4 = 1,
82454 + [19726].file = "kernel/trace/trace.c",
82455 + [19726].name = "tracing_set_trace_write",
82456 + [19726].param3 = 1,
82457 + [19738].file = "fs/sysfs/file.c",
82458 + [19738].name = "sysfs_write_file",
82459 + [19738].param3 = 1,
82460 + [19833].file = "drivers/xen/privcmd.c",
82461 + [19833].name = "gather_array",
82462 + [19833].param3 = 1,
82463 + [19910].file = "drivers/media/video/saa7164/saa7164-buffer.c",
82464 + [19910].name = "saa7164_buffer_alloc_user",
82465 + [19910].param2 = 1,
82466 + [19920].file = "drivers/input/joydev.c",
82467 + [19920].name = "joydev_ioctl",
82468 + [19920].param2 = 1,
82469 + [19931].file = "drivers/usb/misc/ftdi-elan.c",
82470 + [19931].name = "ftdi_elan_write",
82471 + [19931].param3 = 1,
82472 + [19960].file = "drivers/usb/class/usblp.c",
82473 + [19960].name = "usblp_read",
82474 + [19960].param3 = 1,
82475 + [1996].file = "drivers/scsi/libsrp.c",
82476 + [1996].name = "srp_target_alloc",
82477 + [1996].param3 = 1,
82478 + [20023].file = "drivers/media/video/gspca/gspca.c",
82479 + [20023].name = "dev_read",
82480 + [20023].param3 = 1,
82481 + [20207].file = "net/core/sock.c",
82482 + [20207].name = "sock_alloc_send_pskb",
82483 + [20207].param2 = 1,
82484 + [20263].file = "kernel/trace/trace_events.c",
82485 + [20263].name = "event_filter_write",
82486 + [20263].param3 = 1,
82487 + [20314].file = "drivers/gpu/drm/drm_hashtab.c",
82488 + [20314].name = "drm_ht_create",
82489 + [20314].param2 = 1,
82490 + [20320].file = "drivers/mfd/sm501.c",
82491 + [20320].name = "sm501_create_subdev",
82492 + [20320].param3 = 1,
82493 + [20320].param4 = 1,
82494 + [20376].file = "mm/nobootmem.c",
82495 + [20376].name = "__alloc_bootmem_nopanic",
82496 + [20376].param1 = 1,
82497 + [20409].file = "drivers/media/dvb/dvb-usb/opera1.c",
82498 + [20409].name = "opera1_usb_i2c_msgxfer",
82499 + [20409].param4 = 1,
82500 + [20473].file = "drivers/mtd/mtdchar.c",
82501 + [20473].name = "mtdchar_write",
82502 + [20473].param3 = 1,
82503 + [20611].file = "net/netfilter/x_tables.c",
82504 + [20611].name = "xt_alloc_table_info",
82505 + [20611].param1 = 1,
82506 + [20618].file = "drivers/staging/crystalhd/crystalhd_lnx.c",
82507 + [20618].name = "chd_dec_fetch_cdata",
82508 + [20618].param3 = 1,
82509 + [20713].file = "drivers/gpu/drm/ttm/ttm_bo_vm.c",
82510 + [20713].name = "ttm_bo_io",
82511 + [20713].param5 = 1,
82512 + [20801].file = "drivers/vhost/vhost.c",
82513 + [20801].name = "vhost_add_used_n",
82514 + [20801].param3 = 1,
82515 + [20835].file = "drivers/isdn/i4l/isdn_common.c",
82516 + [20835].name = "isdn_read",
82517 + [20835].param3 = 1,
82518 + [20951].file = "crypto/rng.c",
82519 + [20951].name = "rngapi_reset",
82520 + [20951].param3 = 1,
82521 + [21125].file = "fs/gfs2/dir.c",
82522 + [21125].name = "gfs2_alloc_sort_buffer",
82523 + [21125].param1 = 1,
82524 + [21132].file = "kernel/cgroup.c",
82525 + [21132].name = "cgroup_write_X64",
82526 + [21132].param5 = 1,
82527 + [21138].file = "drivers/uio/uio.c",
82528 + [21138].name = "uio_read",
82529 + [21138].param3 = 1,
82530 + [21193].file = "net/wireless/sme.c",
82531 + [21193].name = "cfg80211_disconnected",
82532 + [21193].param4 = 1,
82533 + [21312].file = "lib/ts_kmp.c",
82534 + [21312].name = "kmp_init",
82535 + [21312].param2 = 1,
82536 + [21335].file = "net/econet/af_econet.c",
82537 + [21335].name = "econet_sendmsg",
82538 + [21335].param4 = 1,
82539 + [21406].file = "fs/libfs.c",
82540 + [21406].name = "simple_write_to_buffer",
82541 + [21406].param2 = 1,
82542 + [21406].param5 = 1,
82543 + [21451].file = "net/netfilter/ipvs/ip_vs_ctl.c",
82544 + [21451].name = "do_ip_vs_set_ctl",
82545 + [21451].param4 = 1,
82546 + [21459].file = "security/smack/smackfs.c",
82547 + [21459].name = "smk_write_doi",
82548 + [21459].param3 = 1,
82549 + [21508].file = "include/linux/usb/wusb.h",
82550 + [21508].name = "wusb_prf_64",
82551 + [21508].param7 = 1,
82552 + [21511].file = "drivers/input/ff-core.c",
82553 + [21511].name = "input_ff_create",
82554 + [21511].param2 = 1,
82555 + [21538].file = "net/bluetooth/l2cap_sock.c",
82556 + [21538].name = "l2cap_sock_setsockopt",
82557 + [21538].param5 = 1,
82558 + [21543].file = "drivers/media/video/gspca/gspca.c",
82559 + [21543].name = "frame_alloc",
82560 + [21543].param4 = 1,
82561 + [21608].file = "drivers/char/tpm/tpm.c",
82562 + [21608].name = "tpm_write",
82563 + [21608].param3 = 1,
82564 + [2160].file = "drivers/net/wireless/ray_cs.c",
82565 + [2160].name = "int_proc_write",
82566 + [2160].param3 = 1,
82567 + [21632].file = "fs/afs/cell.c",
82568 + [21632].name = "afs_cell_create",
82569 + [21632].param2 = 1,
82570 + [21679].file = "drivers/net/wireless/ath/carl9170/debug.c",
82571 + [21679].name = "carl9170_debugfs_write",
82572 + [21679].param3 = 1,
82573 + [21784].file = "crypto/ahash.c",
82574 + [21784].name = "ahash_setkey_unaligned",
82575 + [21784].param3 = 1,
82576 + [2180].file = "drivers/char/ppdev.c",
82577 + [2180].name = "pp_write",
82578 + [2180].param3 = 1,
82579 + [21810].file = "net/core/netprio_cgroup.c",
82580 + [21810].name = "extend_netdev_table",
82581 + [21810].param2 = 1,
82582 + [21906].file = "net/atm/mpc.c",
82583 + [21906].name = "copy_macs",
82584 + [21906].param4 = 1,
82585 + [21946].file = "fs/nfs/idmap.c",
82586 + [21946].name = "nfs_map_name_to_uid",
82587 + [21946].param3 = 1,
82588 + [22052].file = "drivers/net/ethernet/chelsio/cxgb3/sge.c",
82589 + [22052].name = "get_packet_pg",
82590 + [22052].param4 = 1,
82591 + [22085].file = "drivers/staging/sep/sep_driver.c",
82592 + [22085].name = "sep_lock_user_pages",
82593 + [22085].param2 = 1,
82594 + [22085].param3 = 1,
82595 + [22190].file = "drivers/char/tpm/tpm.c",
82596 + [22190].name = "tpm_read",
82597 + [22190].param3 = 1,
82598 + [22291].file = "net/core/pktgen.c",
82599 + [22291].name = "pgctrl_write",
82600 + [22291].param3 = 1,
82601 + [22439].file = "fs/afs/rxrpc.c",
82602 + [22439].name = "afs_alloc_flat_call",
82603 + [22439].param2 = 1,
82604 + [22439].param3 = 1,
82605 + [2243].file = "drivers/scsi/scsi_tgt_lib.c",
82606 + [2243].name = "scsi_tgt_kspace_exec",
82607 + [2243].param8 = 1,
82608 + [22440].file = "drivers/uwb/neh.c",
82609 + [22440].name = "uwb_rc_neh_grok_event",
82610 + [22440].param3 = 1,
82611 + [22611].file = "drivers/staging/android/logger.c",
82612 + [22611].name = "do_write_log_from_user",
82613 + [22611].param3 = 1,
82614 + [22614].file = "drivers/media/video/cx18/cx18-fileops.c",
82615 + [22614].name = "cx18_copy_buf_to_user",
82616 + [22614].param4 = 1,
82617 + [22667].file = "drivers/misc/altera-stapl/altera-jtag.c",
82618 + [22667].name = "altera_set_ir_post",
82619 + [22667].param2 = 1,
82620 + [22772].file = "drivers/target/iscsi/iscsi_target_erl1.c",
82621 + [22772].name = "iscsit_dump_data_payload",
82622 + [22772].param2 = 1,
82623 + [22777].file = "drivers/infiniband/ulp/srp/ib_srp.c",
82624 + [22777].name = "srp_alloc_iu",
82625 + [22777].param2 = 1,
82626 + [22811].file = "drivers/usb/dwc3/debugfs.c",
82627 + [22811].name = "dwc3_mode_write",
82628 + [22811].param3 = 1,
82629 + [22817].file = "drivers/media/video/usbvision/usbvision-core.c",
82630 + [22817].name = "usbvision_rvmalloc",
82631 + [22817].param1 = 1,
82632 + [22864].file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
82633 + [22864].name = "ath6kl_add_bss_if_needed",
82634 + [22864].param6 = 1,
82635 + [2286].file = "drivers/scsi/mvumi.c",
82636 + [2286].name = "mvumi_alloc_mem_resource",
82637 + [2286].param3 = 1,
82638 + [22904].file = "security/selinux/ss/services.c",
82639 + [22904].name = "security_context_to_sid_default",
82640 + [22904].param2 = 1,
82641 + [22932].file = "fs/compat.c",
82642 + [22932].name = "compat_sys_writev",
82643 + [22932].param3 = 1,
82644 + [2302].file = "drivers/media/video/stk-webcam.c",
82645 + [2302].name = "v4l_stk_read",
82646 + [2302].param3 = 1,
82647 + [2307].file = "drivers/pcmcia/cistpl.c",
82648 + [2307].name = "pcmcia_replace_cis",
82649 + [2307].param3 = 1,
82650 + [23117].file = "drivers/media/dvb/ttpci/av7110_av.c",
82651 + [23117].name = "dvb_audio_write",
82652 + [23117].param3 = 1,
82653 + [23220].file = "drivers/gpu/drm/vmwgfx/vmwgfx_kms.c",
82654 + [23220].name = "do_dmabuf_dirty_sou",
82655 + [23220].param7 = 1,
82656 + [23232].file = "drivers/md/persistent-data/dm-space-map-checker.c",
82657 + [23232].name = "sm_checker_extend",
82658 + [23232].param2 = 1,
82659 + [2324].file = "net/ieee802154/wpan-class.c",
82660 + [2324].name = "wpan_phy_alloc",
82661 + [2324].param1 = 1,
82662 + [2328].file = "kernel/trace/ftrace.c",
82663 + [2328].name = "ftrace_pid_write",
82664 + [2328].param3 = 1,
82665 + [23290].file = "fs/proc/base.c",
82666 + [23290].name = "mem_rw",
82667 + [23290].param3 = 1,
82668 + [23449].file = "crypto/blkcipher.c",
82669 + [23449].name = "blkcipher_next_slow",
82670 + [23449].param3 = 1,
82671 + [23449].param4 = 1,
82672 + [23535].file = "ipc/sem.c",
82673 + [23535].name = "sys_semtimedop",
82674 + [23535].param3 = 1,
82675 + [2357].file = "drivers/usb/serial/garmin_gps.c",
82676 + [2357].name = "garmin_read_process",
82677 + [2357].param3 = 1,
82678 + [23589].file = "kernel/relay.c",
82679 + [23589].name = "subbuf_read_actor",
82680 + [23589].param3 = 1,
82681 + [23848].file = "crypto/blkcipher.c",
82682 + [23848].name = "async_setkey",
82683 + [23848].param3 = 1,
82684 + [2386].file = "drivers/acpi/acpica/exnames.c",
82685 + [2386].name = "acpi_ex_allocate_name_string",
82686 + [2386].param2 = 1,
82687 + [2389].file = "net/core/sock.c",
82688 + [2389].name = "sock_rmalloc",
82689 + [2389].param2 = 1,
82690 + [23994].file = "net/bluetooth/mgmt.c",
82691 + [23994].name = "set_powered",
82692 + [23994].param4 = 1,
82693 + [23999].file = "sound/pci/rme9652/hdsp.c",
82694 + [23999].name = "snd_hdsp_capture_copy",
82695 + [23999].param5 = 1,
82696 + [24233].file = "drivers/pci/pcie/aer/aer_inject.c",
82697 + [24233].name = "aer_inject_write",
82698 + [24233].param3 = 1,
82699 + [24359].file = "kernel/power/qos.c",
82700 + [24359].name = "pm_qos_power_write",
82701 + [24359].param3 = 1,
82702 + [24457].file = "fs/btrfs/backref.c",
82703 + [24457].name = "init_data_container",
82704 + [24457].param1 = 1,
82705 + [24719].file = "drivers/input/evdev.c",
82706 + [24719].name = "bits_to_user",
82707 + [24719].param3 = 1,
82708 + [2472].file = "net/ipv4/netfilter/ip_tables.c",
82709 + [2472].name = "compat_do_ipt_set_ctl",
82710 + [2472].param4 = 1,
82711 + [24755].file = "drivers/infiniband/hw/qib/qib_diag.c",
82712 + [24755].name = "qib_diag_write",
82713 + [24755].param3 = 1,
82714 + [24805].file = "security/keys/user_defined.c",
82715 + [24805].name = "user_update",
82716 + [24805].param3 = 1,
82717 + [25036].file = "fs/pipe.c",
82718 + [25036].name = "pipe_iov_copy_from_user",
82719 + [25036].param3 = 1,
82720 + [25078].file = "drivers/net/wireless/p54/fwio.c",
82721 + [25078].name = "p54_download_eeprom",
82722 + [25078].param4 = 1,
82723 + [25127].file = "drivers/scsi/device_handler/scsi_dh_alua.c",
82724 + [25127].name = "realloc_buffer",
82725 + [25127].param2 = 1,
82726 + [25145].file = "net/tipc/link.c",
82727 + [25145].name = "link_send_sections_long",
82728 + [25145].param4 = 1,
82729 + [25157].file = "security/keys/request_key_auth.c",
82730 + [25157].name = "request_key_auth_new",
82731 + [25157].param3 = 1,
82732 + [25158].file = "drivers/net/ethernet/mellanox/mlx4/en_rx.c",
82733 + [25158].name = "mlx4_en_create_rx_ring",
82734 + [25158].param3 = 1,
82735 + [25267].file = "fs/configfs/file.c",
82736 + [25267].name = "configfs_write_file",
82737 + [25267].param3 = 1,
82738 + [25495].file = "drivers/scsi/bfa/bfad_debugfs.c",
82739 + [25495].name = "bfad_debugfs_write_regwr",
82740 + [25495].param3 = 1,
82741 + [25558].file = "fs/proc/task_mmu.c",
82742 + [25558].name = "clear_refs_write",
82743 + [25558].param3 = 1,
82744 + [25692].file = "drivers/net/wireless/ath/ath6kl/wmi.c",
82745 + [25692].name = "ath6kl_wmi_send_action_cmd",
82746 + [25692].param7 = 1,
82747 + [25765].file = "drivers/media/dvb/b2c2/flexcop.c",
82748 + [25765].name = "flexcop_device_kmalloc",
82749 + [25765].param1 = 1,
82750 + [26100].file = "sound/core/info.c",
82751 + [26100].name = "snd_info_entry_write",
82752 + [26100].param3 = 1,
82753 + [26256].file = "fs/hpfs/name.c",
82754 + [26256].name = "hpfs_translate_name",
82755 + [26256].param3 = 1,
82756 + [26394].file = "drivers/hid/hidraw.c",
82757 + [26394].name = "hidraw_get_report",
82758 + [26394].param3 = 1,
82759 + [26494].file = "kernel/signal.c",
82760 + [26494].name = "sys_rt_sigpending",
82761 + [26494].param2 = 1,
82762 + [26497].file = "security/keys/keyctl.c",
82763 + [26497].name = "sys_keyctl",
82764 + [26497].param4 = 1,
82765 + [26533].file = "drivers/block/aoe/aoechr.c",
82766 + [26533].name = "aoechr_write",
82767 + [26533].param3 = 1,
82768 + [26560].file = "crypto/algapi.c",
82769 + [26560].name = "crypto_alloc_instance2",
82770 + [26560].param3 = 1,
82771 + [26605].file = "security/selinux/selinuxfs.c",
82772 + [26605].name = "sel_write_user",
82773 + [26605].param3 = 1,
82774 + [26620].file = "net/bluetooth/mgmt.c",
82775 + [26620].name = "mgmt_control",
82776 + [26620].param3 = 1,
82777 + [26701].file = "drivers/mtd/chips/cfi_util.c",
82778 + [26701].name = "cfi_read_pri",
82779 + [26701].param3 = 1,
82780 + [26757].file = "fs/xattr.c",
82781 + [26757].name = "sys_fgetxattr",
82782 + [26757].param4 = 1,
82783 + [2678].file = "drivers/platform/x86/asus_acpi.c",
82784 + [2678].name = "disp_proc_write",
82785 + [2678].param3 = 1,
82786 + [26834].file = "drivers/gpu/drm/drm_drv.c",
82787 + [26834].name = "drm_ioctl",
82788 + [26834].param2 = 1,
82789 + [26843].file = "drivers/firewire/core-cdev.c",
82790 + [26843].name = "fw_device_op_compat_ioctl",
82791 + [26843].param2 = 1,
82792 + [26845].file = "drivers/scsi/qla2xxx/qla_bsg.c",
82793 + [26845].name = "qla2x00_get_ctx_bsg_sp",
82794 + [26845].param3 = 1,
82795 + [26888].file = "net/bridge/br_ioctl.c",
82796 + [26888].name = "get_fdb_entries",
82797 + [26888].param3 = 1,
82798 + [26962].file = "drivers/usb/class/usbtmc.c",
82799 + [26962].name = "usbtmc_write",
82800 + [26962].param3 = 1,
82801 + [26966].file = "drivers/media/dvb/ddbridge/ddbridge-core.c",
82802 + [26966].name = "ts_write",
82803 + [26966].param3 = 1,
82804 + [27004].file = "drivers/misc/hpilo.c",
82805 + [27004].name = "ilo_write",
82806 + [27004].param3 = 1,
82807 + [27025].file = "fs/ntfs/file.c",
82808 + [27025].name = "__ntfs_copy_from_user_iovec_inatomic",
82809 + [27025].param3 = 1,
82810 + [27025].param4 = 1,
82811 + [27061].file = "drivers/firewire/core-cdev.c",
82812 + [27061].name = "iso_callback",
82813 + [27061].param3 = 1,
82814 + [2711].file = "drivers/media/dvb/dvb-core/dvb_ringbuffer.c",
82815 + [2711].name = "dvb_ringbuffer_read_user",
82816 + [2711].param3 = 1,
82817 + [27129].file = "fs/lockd/mon.c",
82818 + [27129].name = "nsm_get_handle",
82819 + [27129].param4 = 1,
82820 + [27142].file = "fs/proc/kcore.c",
82821 + [27142].name = "read_kcore",
82822 + [27142].param3 = 1,
82823 + [27164].file = "include/drm/drm_mem_util.h",
82824 + [27164].name = "drm_calloc_large",
82825 + [27164].param1 = 1,
82826 + [27164].param2 = 1,
82827 + [27176].file = "drivers/mtd/devices/mtd_dataflash.c",
82828 + [27176].name = "otp_read",
82829 + [27176].param2 = 1,
82830 + [27176].param5 = 1,
82831 + [27232].file = "security/apparmor/lib.c",
82832 + [27232].name = "kvmalloc",
82833 + [27232].param1 = 1,
82834 + [27275].file = "drivers/scsi/cxgbi/libcxgbi.c",
82835 + [27275].name = "cxgbi_ddp_reserve",
82836 + [27275].param4 = 1,
82837 + [27280].file = "drivers/net/ethernet/mellanox/mlx4/en_tx.c",
82838 + [27280].name = "mlx4_en_create_tx_ring",
82839 + [27280].param4 = 1,
82840 + [27290].file = "security/selinux/ss/services.c",
82841 + [27290].name = "security_context_to_sid_core",
82842 + [27290].param2 = 1,
82843 + [27302].file = "fs/proc/base.c",
82844 + [27302].name = "proc_loginuid_write",
82845 + [27302].param3 = 1,
82846 + [2730].file = "drivers/target/iscsi/iscsi_target_parameters.c",
82847 + [2730].name = "iscsi_decode_text_input",
82848 + [2730].param4 = 1,
82849 + [27314].file = "net/bluetooth/mgmt.c",
82850 + [27314].name = "cmd_complete",
82851 + [27314].param5 = 1,
82852 + [27472].file = "security/selinux/selinuxfs.c",
82853 + [27472].name = "sel_write_load",
82854 + [27472].param3 = 1,
82855 + [27491].file = "fs/proc/base.c",
82856 + [27491].name = "proc_pid_attr_write",
82857 + [27491].param3 = 1,
82858 + [27568].file = "drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c",
82859 + [27568].name = "t4_alloc_mem",
82860 + [27568].param1 = 1,
82861 + [27582].file = "drivers/platform/x86/asus_acpi.c",
82862 + [27582].name = "ledd_proc_write",
82863 + [27582].param3 = 1,
82864 + [27595].file = "net/core/sock.c",
82865 + [27595].name = "sock_alloc_send_skb",
82866 + [27595].param2 = 1,
82867 + [27648].file = "net/bluetooth/l2cap_core.c",
82868 + [27648].name = "l2cap_bredr_sig_cmd",
82869 + [27648].param3 = 1,
82870 + [27697].file = "drivers/staging/mei/iorw.c",
82871 + [27697].name = "amthi_read",
82872 + [27697].param4 = 1,
82873 + [27911].file = "fs/ext4/resize.c",
82874 + [27911].name = "alloc_flex_gd",
82875 + [27911].param1 = 1,
82876 + [27927].file = "drivers/tty/tty_io.c",
82877 + [27927].name = "redirected_tty_write",
82878 + [27927].param3 = 1,
82879 + [28040].file = "kernel/kfifo.c",
82880 + [28040].name = "__kfifo_alloc",
82881 + [28040].param2 = 1,
82882 + [28040].param3 = 1,
82883 + [28151].file = "mm/filemap_xip.c",
82884 + [28151].name = "do_xip_mapping_read",
82885 + [28151].param5 = 1,
82886 + [28247].file = "net/sctp/tsnmap.c",
82887 + [28247].name = "sctp_tsnmap_init",
82888 + [28247].param2 = 1,
82889 + [28253].file = "include/linux/fb.h",
82890 + [28253].name = "alloc_apertures",
82891 + [28253].param1 = 1,
82892 + [28265].file = "fs/notify/fanotify/fanotify_user.c",
82893 + [28265].name = "fanotify_write",
82894 + [28265].param3 = 1,
82895 + [28316].file = "drivers/input/joydev.c",
82896 + [28316].name = "joydev_ioctl_common",
82897 + [28316].param2 = 1,
82898 + [28359].file = "drivers/spi/spidev.c",
82899 + [28359].name = "spidev_message",
82900 + [28359].param3 = 1,
82901 + [28360].file = "drivers/hid/usbhid/hiddev.c",
82902 + [28360].name = "hiddev_compat_ioctl",
82903 + [28360].param2 = 1,
82904 + [28407].file = "drivers/net/wireless/rt2x00/rt2x00debug.c",
82905 + [28407].name = "rt2x00debug_write_csr",
82906 + [28407].param3 = 1,
82907 + [2847].file = "fs/ntfs/file.c",
82908 + [2847].name = "ntfs_copy_from_user",
82909 + [2847].param3 = 1,
82910 + [2847].param5 = 1,
82911 + [28584].file = "drivers/memstick/core/memstick.c",
82912 + [28584].name = "memstick_alloc_host",
82913 + [28584].param1 = 1,
82914 + [28783].file = "drivers/gpu/drm/i915/i915_debugfs.c",
82915 + [28783].name = "i915_cache_sharing_write",
82916 + [28783].param3 = 1,
82917 + [28787].file = "drivers/media/video/videobuf2-core.c",
82918 + [28787].name = "vb2_write",
82919 + [28787].param3 = 1,
82920 + [28879].file = "drivers/base/map.c",
82921 + [28879].name = "kobj_map",
82922 + [28879].param2 = 1,
82923 + [28879].param3 = 1,
82924 + [28889].file = "drivers/char/pcmcia/cm4040_cs.c",
82925 + [28889].name = "cm4040_write",
82926 + [28889].param3 = 1,
82927 + [29073].file = "drivers/gpu/drm/vmwgfx/vmwgfx_kms.c",
82928 + [29073].name = "vmw_kms_readback",
82929 + [29073].param6 = 1,
82930 + [29085].file = "security/apparmor/apparmorfs.c",
82931 + [29085].name = "profile_load",
82932 + [29085].param3 = 1,
82933 + [29092].file = "lib/lru_cache.c",
82934 + [29092].name = "lc_create",
82935 + [29092].param3 = 1,
82936 + [29257].file = "drivers/vhost/vhost.c",
82937 + [29257].name = "vhost_add_used_and_signal_n",
82938 + [29257].param4 = 1,
82939 + [29267].file = "net/ipv4/fib_trie.c",
82940 + [29267].name = "tnode_alloc",
82941 + [29267].param1 = 1,
82942 + [29338].file = "drivers/net/ethernet/brocade/bna/bnad_debugfs.c",
82943 + [29338].name = "bnad_debugfs_write_regwr",
82944 + [29338].param3 = 1,
82945 + [29353].file = "net/sctp/socket.c",
82946 + [29353].name = "sctp_setsockopt_del_key",
82947 + [29353].param3 = 1,
82948 + [29405].file = "drivers/media/dvb/dvb-usb/dw2102.c",
82949 + [29405].name = "dw210x_op_rw",
82950 + [29405].param6 = 1,
82951 + [29542].file = "net/nfc/nci/core.c",
82952 + [29542].name = "nci_send_cmd",
82953 + [29542].param3 = 1,
82954 + [29714].file = "drivers/scsi/cxgbi/libcxgbi.c",
82955 + [29714].name = "cxgbi_device_register",
82956 + [29714].param1 = 1,
82957 + [29714].param2 = 1,
82958 + [2972].file = "drivers/staging/crystalhd/crystalhd_misc.c",
82959 + [2972].name = "crystalhd_create_dio_pool",
82960 + [2972].param2 = 1,
82961 + [29769].file = "drivers/misc/iwmc3200top/log.c",
82962 + [29769].name = "store_iwmct_log_level",
82963 + [29769].param4 = 1,
82964 + [29792].file = "drivers/staging/bcm/nvm.c",
82965 + [29792].name = "BcmCopySection",
82966 + [29792].param5 = 1,
82967 + [29859].file = "net/rds/page.c",
82968 + [29859].name = "rds_page_copy_user",
82969 + [29859].param4 = 1,
82970 + [29905].file = "mm/nobootmem.c",
82971 + [29905].name = "___alloc_bootmem",
82972 + [29905].param1 = 1,
82973 + [2995].file = "mm/page_alloc.c",
82974 + [2995].name = "alloc_large_system_hash",
82975 + [2995].param2 = 1,
82976 + [30000].file = "drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c",
82977 + [30000].name = "wlc_phy_loadsampletable_nphy",
82978 + [30000].param3 = 1,
82979 + [30242].file = "fs/cifs/cifssmb.c",
82980 + [30242].name = "cifs_readdata_alloc",
82981 + [30242].param1 = 1,
82982 + [30494].file = "net/ceph/buffer.c",
82983 + [30494].name = "ceph_buffer_new",
82984 + [30494].param1 = 1,
82985 + [30590].file = "security/tomoyo/memory.c",
82986 + [30590].name = "tomoyo_commit_ok",
82987 + [30590].param2 = 1,
82988 + [3060].file = "lib/mpi/mpiutil.c",
82989 + [3060].name = "mpi_alloc_limb_space",
82990 + [3060].param1 = 1,
82991 + [30687].file = "drivers/uwb/uwb-debug.c",
82992 + [30687].name = "command_write",
82993 + [30687].param3 = 1,
82994 + [30726].file = "drivers/bluetooth/hci_vhci.c",
82995 + [30726].name = "vhci_get_user",
82996 + [30726].param3 = 1,
82997 + [30873].file = "net/packet/af_packet.c",
82998 + [30873].name = "alloc_one_pg_vec_page",
82999 + [30873].param1 = 1,
83000 + [30970].file = "drivers/staging/hv/storvsc_drv.c",
83001 + [30970].name = "create_bounce_buffer",
83002 + [30970].param3 = 1,
83003 + [310].file = "drivers/block/drbd/drbd_bitmap.c",
83004 + [310].name = "bm_realloc_pages",
83005 + [310].param2 = 1,
83006 + [3119].file = "drivers/misc/ibmasm/command.c",
83007 + [3119].name = "ibmasm_new_command",
83008 + [3119].param2 = 1,
83009 + [31207].file = "drivers/platform/x86/asus_acpi.c",
83010 + [31207].name = "parse_arg",
83011 + [31207].param2 = 1,
83012 + [31287].file = "drivers/scsi/libsrp.c",
83013 + [31287].name = "srp_iu_pool_alloc",
83014 + [31287].param2 = 1,
83015 + [31291].file = "sound/pci/rme9652/rme9652.c",
83016 + [31291].name = "snd_rme9652_capture_copy",
83017 + [31291].param5 = 1,
83018 + [31348].file = "kernel/sched/core.c",
83019 + [31348].name = "sys_sched_getaffinity",
83020 + [31348].param2 = 1,
83021 + [31492].file = "drivers/hid/hidraw.c",
83022 + [31492].name = "hidraw_read",
83023 + [31492].param3 = 1,
83024 + [3170].file = "security/integrity/ima/ima_fs.c",
83025 + [3170].name = "ima_write_policy",
83026 + [3170].param3 = 1,
83027 + [31782].file = "drivers/misc/pti.c",
83028 + [31782].name = "pti_char_write",
83029 + [31782].param3 = 1,
83030 + [31789].file = "fs/file.c",
83031 + [31789].name = "alloc_fdmem",
83032 + [31789].param1 = 1,
83033 + [31957].file = "fs/afs/proc.c",
83034 + [31957].name = "afs_proc_cells_write",
83035 + [31957].param3 = 1,
83036 + [32002].file = "net/sctp/socket.c",
83037 + [32002].name = "sctp_setsockopt_active_key",
83038 + [32002].param3 = 1,
83039 + [32182].file = "net/sunrpc/cache.c",
83040 + [32182].name = "cache_write",
83041 + [32182].param3 = 1,
83042 + [32278].file = "kernel/time/timer_stats.c",
83043 + [32278].name = "tstats_write",
83044 + [32278].param3 = 1,
83045 + [32326].file = "drivers/tty/n_r3964.c",
83046 + [32326].name = "r3964_write",
83047 + [32326].param4 = 1,
83048 + [32399].file = "drivers/net/phy/mdio_bus.c",
83049 + [32399].name = "mdiobus_alloc_size",
83050 + [32399].param1 = 1,
83051 + [32402].file = "net/ceph/pagevec.c",
83052 + [32402].name = "ceph_copy_user_to_page_vector",
83053 + [32402].param4 = 1,
83054 + [3241].file = "drivers/usb/wusbcore/crypto.c",
83055 + [3241].name = "wusb_prf",
83056 + [3241].param7 = 1,
83057 + [32459].file = "drivers/media/radio/radio-wl1273.c",
83058 + [32459].name = "wl1273_fm_fops_write",
83059 + [32459].param3 = 1,
83060 + [32531].file = "fs/bio.c",
83061 + [32531].name = "__bio_map_kern",
83062 + [32531].param2 = 1,
83063 + [32531].param3 = 1,
83064 + [32537].file = "drivers/staging/vme/devices/vme_user.c",
83065 + [32537].name = "buffer_to_user",
83066 + [32537].param3 = 1,
83067 + [32560].file = "drivers/input/input-mt.c",
83068 + [32560].name = "input_mt_init_slots",
83069 + [32560].param2 = 1,
83070 + [32600].file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
83071 + [32600].name = "ath6kl_set_assoc_req_ies",
83072 + [32600].param3 = 1,
83073 + [32608].file = "security/selinux/selinuxfs.c",
83074 + [32608].name = "sel_write_checkreqprot",
83075 + [32608].param3 = 1,
83076 + [32812].file = "drivers/net/ethernet/neterion/vxge/vxge-config.c",
83077 + [32812].name = "__vxge_hw_channel_allocate",
83078 + [32812].param3 = 1,
83079 + [32950].file = "fs/reiserfs/resize.c",
83080 + [32950].name = "reiserfs_resize",
83081 + [32950].param2 = 1,
83082 + [33010].file = "drivers/media/dvb/dvb-core/dvb_ringbuffer.c",
83083 + [33010].name = "dvb_ringbuffer_pkt_read_user",
83084 + [33010].param5 = 1,
83085 + [33130].file = "net/llc/llc_sap.c",
83086 + [33130].name = "llc_alloc_frame",
83087 + [33130].param4 = 1,
83088 + [33221].file = "crypto/ablkcipher.c",
83089 + [33221].name = "ablkcipher_copy_iv",
83090 + [33221].param3 = 1,
83091 + [33268].file = "mm/maccess.c",
83092 + [33268].name = "__probe_kernel_write",
83093 + [33268].param3 = 1,
83094 + [33280].file = "fs/xfs/kmem.c",
83095 + [33280].name = "kmem_realloc",
83096 + [33280].param2 = 1,
83097 + [33375].file = "drivers/staging/rtl8712/osdep_service.h",
83098 + [33375].name = "_malloc",
83099 + [33375].param1 = 1,
83100 + [33420].file = "drivers/net/team/team.c",
83101 + [33420].name = "__team_options_register",
83102 + [33420].param3 = 1,
83103 + [33489].file = "fs/binfmt_misc.c",
83104 + [33489].name = "create_entry",
83105 + [33489].param2 = 1,
83106 + [33637].file = "net/9p/client.c",
83107 + [33637].name = "p9_client_read",
83108 + [33637].param5 = 1,
83109 + [33669].file = "fs/gfs2/glock.c",
83110 + [33669].name = "gfs2_glock_nq_m",
83111 + [33669].param1 = 1,
83112 + [33704].file = "drivers/gpu/drm/ttm/ttm_page_alloc_dma.c",
83113 + [33704].name = "ttm_dma_page_pool_free",
83114 + [33704].param2 = 1,
83115 + [33779].file = "drivers/staging/vme/devices/vme_user.c",
83116 + [33779].name = "resource_from_user",
83117 + [33779].param3 = 1,
83118 + [33810].file = "net/mac80211/util.c",
83119 + [33810].name = "ieee80211_send_probe_req",
83120 + [33810].param6 = 1,
83121 + [3384].file = "drivers/block/paride/pg.c",
83122 + [3384].name = "pg_write",
83123 + [3384].param3 = 1,
83124 + [34105].file = "fs/libfs.c",
83125 + [34105].name = "simple_read_from_buffer",
83126 + [34105].param2 = 1,
83127 + [34105].param5 = 1,
83128 + [34120].file = "drivers/media/video/pvrusb2/pvrusb2-io.c",
83129 + [34120].name = "pvr2_stream_buffer_count",
83130 + [34120].param2 = 1,
83131 + [34226].file = "mm/shmem.c",
83132 + [34226].name = "shmem_xattr_set",
83133 + [34226].param4 = 1,
83134 + [34251].file = "drivers/staging/cxt1e1/sbecom_inline_linux.h",
83135 + [34251].name = "OS_kmalloc",
83136 + [34251].param1 = 1,
83137 + [34276].file = "drivers/media/video/videobuf2-core.c",
83138 + [34276].name = "__vb2_perform_fileio",
83139 + [34276].param3 = 1,
83140 + [34278].file = "fs/ubifs/debug.c",
83141 + [34278].name = "dfs_global_file_write",
83142 + [34278].param3 = 1,
83143 + [34432].file = "drivers/edac/edac_pci.c",
83144 + [34432].name = "edac_pci_alloc_ctl_info",
83145 + [34432].param1 = 1,
83146 + [34532].file = "drivers/virtio/virtio_ring.c",
83147 + [34532].name = "vring_add_indirect",
83148 + [34532].param3 = 1,
83149 + [34532].param4 = 1,
83150 + [34543].file = "net/sctp/tsnmap.c",
83151 + [34543].name = "sctp_tsnmap_grow",
83152 + [34543].param2 = 1,
83153 + [34551].file = "fs/ocfs2/stack_user.c",
83154 + [34551].name = "ocfs2_control_cfu",
83155 + [34551].param2 = 1,
83156 + [34634].file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
83157 + [34634].name = "ath6kl_send_go_probe_resp",
83158 + [34634].param3 = 1,
83159 + [34666].file = "fs/cifs/cifs_debug.c",
83160 + [34666].name = "cifs_security_flags_proc_write",
83161 + [34666].param3 = 1,
83162 + [3466].file = "drivers/misc/altera-stapl/altera-jtag.c",
83163 + [3466].name = "altera_drscan",
83164 + [3466].param2 = 1,
83165 + [34672].file = "drivers/tty/tty_io.c",
83166 + [34672].name = "tty_write",
83167 + [34672].param3 = 1,
83168 + [34679].file = "drivers/media/video/ivtv/ivtv-fileops.c",
83169 + [34679].name = "ivtv_copy_buf_to_user",
83170 + [34679].param4 = 1,
83171 + [34721].file = "drivers/usb/host/hwa-hc.c",
83172 + [34721].name = "__hwahc_dev_set_key",
83173 + [34721].param5 = 1,
83174 + [34749].file = "mm/nobootmem.c",
83175 + [34749].name = "__alloc_bootmem_low_node",
83176 + [34749].param2 = 1,
83177 + [34760].file = "include/acpi/platform/aclinux.h",
83178 + [34760].name = "acpi_os_allocate_zeroed",
83179 + [34760].param1 = 1,
83180 + [34802].file = "drivers/scsi/cxgbi/libcxgbi.h",
83181 + [34802].name = "cxgbi_alloc_big_mem",
83182 + [34802].param1 = 1,
83183 + [34863].file = "drivers/video/fbsysfs.c",
83184 + [34863].name = "framebuffer_alloc",
83185 + [34863].param1 = 1,
83186 + [34868].file = "drivers/net/ethernet/brocade/bna/bnad_debugfs.c",
83187 + [34868].name = "bnad_debugfs_write_regrd",
83188 + [34868].param3 = 1,
83189 + [34882].file = "drivers/platform/x86/toshiba_acpi.c",
83190 + [34882].name = "video_proc_write",
83191 + [34882].param3 = 1,
83192 + [35050].file = "fs/ocfs2/dlmfs/dlmfs.c",
83193 + [35050].name = "dlmfs_file_write",
83194 + [35050].param3 = 1,
83195 + [35119].file = "fs/xattr.c",
83196 + [35119].name = "sys_llistxattr",
83197 + [35119].param3 = 1,
83198 + [35129].file = "mm/nobootmem.c",
83199 + [35129].name = "___alloc_bootmem_nopanic",
83200 + [35129].param1 = 1,
83201 + [35159].file = "drivers/net/wimax/i2400m/usb.c",
83202 + [35159].name = "__i2400mu_send_barker",
83203 + [35159].param3 = 1,
83204 + [35232].file = "drivers/media/video/cx18/cx18-fileops.c",
83205 + [35232].name = "cx18_read",
83206 + [35232].param3 = 1,
83207 + [35234].file = "net/irda/irnet/irnet_ppp.c",
83208 + [35234].name = "irnet_ctrl_write",
83209 + [35234].param3 = 1,
83210 + [35256].file = "sound/core/memory.c",
83211 + [35256].name = "copy_from_user_toio",
83212 + [35256].param3 = 1,
83213 + [35268].file = "security/keys/request_key_auth.c",
83214 + [35268].name = "request_key_auth_read",
83215 + [35268].param3 = 1,
83216 + [3538].file = "net/bluetooth/mgmt.c",
83217 + [3538].name = "disconnect",
83218 + [3538].param4 = 1,
83219 + [35443].file = "sound/core/pcm_memory.c",
83220 + [35443].name = "_snd_pcm_lib_alloc_vmalloc_buffer",
83221 + [35443].param2 = 1,
83222 + [35468].file = "drivers/xen/xenbus/xenbus_dev_frontend.c",
83223 + [35468].name = "xenbus_file_write",
83224 + [35468].param3 = 1,
83225 + [35536].file = "kernel/sysctl_binary.c",
83226 + [35536].name = "bin_uuid",
83227 + [35536].param3 = 1,
83228 + [35551].file = "drivers/media/video/ivtv/ivtv-fileops.c",
83229 + [35551].name = "ivtv_read_pos",
83230 + [35551].param3 = 1,
83231 + [35556].file = "fs/read_write.c",
83232 + [35556].name = "sys_readv",
83233 + [35556].param3 = 1,
83234 + [35693].file = "drivers/staging/mei/main.c",
83235 + [35693].name = "mei_read",
83236 + [35693].param3 = 1,
83237 + [35703].file = "crypto/ablkcipher.c",
83238 + [35703].name = "ablkcipher_next_slow",
83239 + [35703].param3 = 1,
83240 + [35703].param4 = 1,
83241 + [35729].file = "include/linux/skbuff.h",
83242 + [35729].name = "__dev_alloc_skb",
83243 + [35729].param1 = 1,
83244 + [35731].file = "drivers/usb/class/cdc-wdm.c",
83245 + [35731].name = "wdm_read",
83246 + [35731].param3 = 1,
83247 + [35796].file = "drivers/mtd/nand/nand_bch.c",
83248 + [35796].name = "nand_bch_init",
83249 + [35796].param2 = 1,
83250 + [35796].param3 = 1,
83251 + [35880].file = "fs/ecryptfs/crypto.c",
83252 + [35880].name = "ecryptfs_encrypt_and_encode_filename",
83253 + [35880].param6 = 1,
83254 + [36076].file = "drivers/net/ethernet/sfc/tx.c",
83255 + [36076].name = "efx_tsoh_heap_alloc",
83256 + [36076].param2 = 1,
83257 + [36080].file = "drivers/media/video/v4l2-ioctl.c",
83258 + [36080].name = "video_usercopy",
83259 + [36080].param2 = 1,
83260 + [36149].file = "fs/udf/inode.c",
83261 + [36149].name = "udf_alloc_i_data",
83262 + [36149].param2 = 1,
83263 + [36183].file = "drivers/tty/vt/vc_screen.c",
83264 + [36183].name = "vcs_read",
83265 + [36183].param3 = 1,
83266 + [36199].file = "net/sunrpc/auth_gss/auth_gss.c",
83267 + [36199].name = "gss_pipe_downcall",
83268 + [36199].param3 = 1,
83269 + [36206].file = "net/ipv4/tcp_input.c",
83270 + [36206].name = "tcp_collapse",
83271 + [36206].param5 = 1,
83272 + [36206].param6 = 1,
83273 + [36230].file = "drivers/net/wan/hdlc_ppp.c",
83274 + [36230].name = "ppp_cp_parse_cr",
83275 + [36230].param4 = 1,
83276 + [36284].file = "drivers/spi/spi.c",
83277 + [36284].name = "spi_register_board_info",
83278 + [36284].param2 = 1,
83279 + [36490].file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
83280 + [36490].name = "ath6kl_cfg80211_connect_event",
83281 + [36490].param7 = 1,
83282 + [36522].file = "drivers/hid/hidraw.c",
83283 + [36522].name = "hidraw_send_report",
83284 + [36522].param3 = 1,
83285 + [36560].file = "net/sunrpc/cache.c",
83286 + [36560].name = "write_flush",
83287 + [36560].param3 = 1,
83288 + [36807].file = "drivers/usb/mon/mon_bin.c",
83289 + [36807].name = "mon_bin_get_event",
83290 + [36807].param4 = 1,
83291 + [37034].file = "fs/cifs/cifssmb.c",
83292 + [37034].name = "cifs_writedata_alloc",
83293 + [37034].param1 = 1,
83294 + [37044].file = "sound/firewire/packets-buffer.c",
83295 + [37044].name = "iso_packets_buffer_init",
83296 + [37044].param3 = 1,
83297 + [37108].file = "drivers/media/dvb/ttpci/av7110_av.c",
83298 + [37108].name = "dvb_video_write",
83299 + [37108].param3 = 1,
83300 + [37154].file = "net/nfc/llcp/commands.c",
83301 + [37154].name = "nfc_llcp_build_tlv",
83302 + [37154].param3 = 1,
83303 + [37163].file = "net/core/skbuff.c",
83304 + [37163].name = "__netdev_alloc_skb",
83305 + [37163].param2 = 1,
83306 + [37233].file = "fs/ocfs2/cluster/tcp.c",
83307 + [37233].name = "o2net_send_message_vec",
83308 + [37233].param4 = 1,
83309 + [37241].file = "net/atm/lec.c",
83310 + [37241].name = "lane2_associate_req",
83311 + [37241].param4 = 1,
83312 + [37384].file = "drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c",
83313 + [37384].name = "vmw_fifo_reserve",
83314 + [37384].param2 = 1,
83315 + [37497].file = "net/mac80211/util.c",
83316 + [37497].name = "ieee80211_build_probe_req",
83317 + [37497].param7 = 1,
83318 + [37535].file = "kernel/trace/trace.c",
83319 + [37535].name = "tracing_trace_options_write",
83320 + [37535].param3 = 1,
83321 + [37611].file = "drivers/xen/xenbus/xenbus_xs.c",
83322 + [37611].name = "split",
83323 + [37611].param2 = 1,
83324 + [37661].file = "mm/filemap.c",
83325 + [37661].name = "file_read_actor",
83326 + [37661].param4 = 1,
83327 + [37852].file = "drivers/staging/android/logger.c",
83328 + [37852].name = "do_read_log_to_user",
83329 + [37852].param4 = 1,
83330 + [37921].file = "drivers/net/wireless/wl12xx/rx.c",
83331 + [37921].name = "wl1271_rx_handle_data",
83332 + [37921].param3 = 1,
83333 + [37976].file = "drivers/platform/x86/asus_acpi.c",
83334 + [37976].name = "bluetooth_proc_write",
83335 + [37976].param3 = 1,
83336 + [3797].file = "sound/pci/asihpi/hpicmn.c",
83337 + [3797].name = "hpi_alloc_control_cache",
83338 + [3797].param1 = 1,
83339 + [3801].file = "drivers/block/paride/pt.c",
83340 + [3801].name = "pt_write",
83341 + [3801].param3 = 1,
83342 + [38052].file = "kernel/kexec.c",
83343 + [38052].name = "kimage_normal_alloc",
83344 + [38052].param3 = 1,
83345 + [38057].file = "fs/coda/psdev.c",
83346 + [38057].name = "coda_psdev_write",
83347 + [38057].param3 = 1,
83348 + [38186].file = "kernel/signal.c",
83349 + [38186].name = "do_sigpending",
83350 + [38186].param2 = 1,
83351 + [38314].file = "fs/nfs/read.c",
83352 + [38314].name = "nfs_readdata_alloc",
83353 + [38314].param1 = 1,
83354 + [38401].file = "drivers/xen/xenbus/xenbus_dev_frontend.c",
83355 + [38401].name = "queue_reply",
83356 + [38401].param3 = 1,
83357 + [3841].file = "drivers/platform/x86/asus_acpi.c",
83358 + [3841].name = "write_led",
83359 + [3841].param2 = 1,
83360 + [38532].file = "fs/afs/cell.c",
83361 + [38532].name = "afs_cell_lookup",
83362 + [38532].param2 = 1,
83363 + [38564].file = "fs/nfs/nfs4proc.c",
83364 + [38564].name = "nfs4_realloc_slot_table",
83365 + [38564].param2 = 1,
83366 + [38576].file = "drivers/i2c/i2c-dev.c",
83367 + [38576].name = "i2cdev_read",
83368 + [38576].param3 = 1,
83369 + [38704].file = "drivers/media/video/uvc/uvc_driver.c",
83370 + [38704].name = "uvc_alloc_entity",
83371 + [38704].param3 = 1,
83372 + [38704].param4 = 1,
83373 + [38747].file = "fs/xattr.c",
83374 + [38747].name = "sys_lgetxattr",
83375 + [38747].param4 = 1,
83376 + [38867].file = "drivers/scsi/scsi_transport_fc.c",
83377 + [38867].name = "fc_host_post_vendor_event",
83378 + [38867].param3 = 1,
83379 + [38931].file = "drivers/isdn/hardware/eicon/capimain.c",
83380 + [38931].name = "diva_os_alloc_message_buffer",
83381 + [38931].param1 = 1,
83382 + [38972].file = "security/smack/smackfs.c",
83383 + [38972].name = "smk_write_logging",
83384 + [38972].param3 = 1,
83385 + [39001].file = "net/xfrm/xfrm_hash.c",
83386 + [39001].name = "xfrm_hash_alloc",
83387 + [39001].param1 = 1,
83388 + [39052].file = "drivers/input/evdev.c",
83389 + [39052].name = "evdev_ioctl",
83390 + [39052].param2 = 1,
83391 + [39066].file = "drivers/media/dvb/frontends/tda10048.c",
83392 + [39066].name = "tda10048_writeregbulk",
83393 + [39066].param4 = 1,
83394 + [39118].file = "drivers/misc/iwmc3200top/log.c",
83395 + [39118].name = "store_iwmct_log_level_fw",
83396 + [39118].param4 = 1,
83397 + [39254].file = "drivers/char/pcmcia/cm4000_cs.c",
83398 + [39254].name = "cmm_write",
83399 + [39254].param3 = 1,
83400 + [39392].file = "drivers/atm/solos-pci.c",
83401 + [39392].name = "send_command",
83402 + [39392].param4 = 1,
83403 + [39415].file = "fs/pstore/inode.c",
83404 + [39415].name = "pstore_mkfile",
83405 + [39415].param5 = 1,
83406 + [39417].file = "drivers/block/DAC960.c",
83407 + [39417].name = "dac960_user_command_proc_write",
83408 + [39417].param3 = 1,
83409 + [39460].file = "fs/btrfs/volumes.c",
83410 + [39460].name = "btrfs_map_block",
83411 + [39460].param3 = 1,
83412 + [39479].file = "drivers/ide/ide-tape.c",
83413 + [39479].name = "idetape_chrdev_read",
83414 + [39479].param3 = 1,
83415 + [39586].file = "drivers/hv/channel.c",
83416 + [39586].name = "create_gpadl_header",
83417 + [39586].param2 = 1,
83418 + [39638].file = "security/selinux/selinuxfs.c",
83419 + [39638].name = "sel_write_avc_cache_threshold",
83420 + [39638].param3 = 1,
83421 + [39645].file = "drivers/media/dvb/dvb-core/dvbdev.c",
83422 + [39645].name = "dvb_generic_ioctl",
83423 + [39645].param2 = 1,
83424 + [39770].file = "include/linux/mISDNif.h",
83425 + [39770].name = "mI_alloc_skb",
83426 + [39770].param1 = 1,
83427 + [39813].file = "fs/ocfs2/stack_user.c",
83428 + [39813].name = "ocfs2_control_message",
83429 + [39813].param3 = 1,
83430 + [39888].file = "net/core/skbuff.c",
83431 + [39888].name = "__alloc_skb",
83432 + [39888].param1 = 1,
83433 + [39980].file = "net/bluetooth/mgmt.c",
83434 + [39980].name = "pair_device",
83435 + [39980].param4 = 1,
83436 + [40043].file = "drivers/media/video/v4l2-ioctl.c",
83437 + [40043].name = "video_ioctl2",
83438 + [40043].param2 = 1,
83439 + [40049].file = "drivers/bluetooth/btmrvl_debugfs.c",
83440 + [40049].name = "btmrvl_psmode_write",
83441 + [40049].param3 = 1,
83442 + [40075].file = "drivers/media/video/c-qcam.c",
83443 + [40075].name = "qc_capture",
83444 + [40075].param3 = 1,
83445 + [40163].file = "fs/ncpfs/file.c",
83446 + [40163].name = "ncp_file_write",
83447 + [40163].param3 = 1,
83448 + [40240].file = "drivers/char/nvram.c",
83449 + [40240].name = "nvram_write",
83450 + [40240].param3 = 1,
83451 + [40256].file = "drivers/tty/vt/vc_screen.c",
83452 + [40256].name = "vcs_write",
83453 + [40256].param3 = 1,
83454 + [40302].file = "sound/isa/gus/gus_dram.c",
83455 + [40302].name = "snd_gus_dram_poke",
83456 + [40302].param4 = 1,
83457 + [40339].file = "drivers/acpi/apei/hest.c",
83458 + [40339].name = "hest_ghes_dev_register",
83459 + [40339].param1 = 1,
83460 + [40355].file = "drivers/staging/mei/main.c",
83461 + [40355].name = "mei_write",
83462 + [40355].param3 = 1,
83463 + [40373].file = "fs/cifs/cifs_spnego.c",
83464 + [40373].name = "cifs_spnego_key_instantiate",
83465 + [40373].param3 = 1,
83466 + [40519].file = "net/sctp/socket.c",
83467 + [40519].name = "sctp_setsockopt_events",
83468 + [40519].param3 = 1,
83469 + [40694].file = "mm/page_cgroup.c",
83470 + [40694].name = "alloc_page_cgroup",
83471 + [40694].param1 = 1,
83472 + [40731].file = "drivers/tty/tty_io.c",
83473 + [40731].name = "do_tty_write",
83474 + [40731].param5 = 1,
83475 + [40754].file = "fs/btrfs/delayed-inode.c",
83476 + [40754].name = "btrfs_alloc_delayed_item",
83477 + [40754].param1 = 1,
83478 + [40786].file = "net/ipv4/netfilter/nf_nat_snmp_basic.c",
83479 + [40786].name = "asn1_octets_decode",
83480 + [40786].param2 = 1,
83481 + [40901].file = "drivers/block/drbd/drbd_bitmap.c",
83482 + [40901].name = "drbd_bm_resize",
83483 + [40901].param2 = 1,
83484 + [40951].file = "drivers/xen/evtchn.c",
83485 + [40951].name = "evtchn_read",
83486 + [40951].param3 = 1,
83487 + [40952].file = "drivers/misc/sgi-xp/xpc_partition.c",
83488 + [40952].name = "xpc_kmalloc_cacheline_aligned",
83489 + [40952].param1 = 1,
83490 + [41000].file = "sound/core/pcm_native.c",
83491 + [41000].name = "snd_pcm_aio_read",
83492 + [41000].param3 = 1,
83493 + [41005].file = "net/bridge/netfilter/ebtables.c",
83494 + [41005].name = "copy_counters_to_user",
83495 + [41005].param5 = 1,
83496 + [41041].file = "net/core/sock.c",
83497 + [41041].name = "sock_wmalloc",
83498 + [41041].param2 = 1,
83499 + [41122].file = "fs/binfmt_misc.c",
83500 + [41122].name = "bm_status_write",
83501 + [41122].param3 = 1,
83502 + [41176].file = "kernel/trace/trace_events.c",
83503 + [41176].name = "subsystem_filter_write",
83504 + [41176].param3 = 1,
83505 + [41249].file = "drivers/media/video/zr364xx.c",
83506 + [41249].name = "send_control_msg",
83507 + [41249].param6 = 1,
83508 + [41287].file = "drivers/net/ethernet/neterion/vxge/vxge-config.c",
83509 + [41287].name = "vxge_os_dma_malloc_async",
83510 + [41287].param3 = 1,
83511 + [41302].file = "net/dns_resolver/dns_query.c",
83512 + [41302].name = "dns_query",
83513 + [41302].param3 = 1,
83514 + [41408].file = "mm/filemap_xip.c",
83515 + [41408].name = "__xip_file_write",
83516 + [41408].param3 = 1,
83517 + [41547].file = "net/bluetooth/smp.c",
83518 + [41547].name = "smp_build_cmd",
83519 + [41547].param3 = 1,
83520 + [4155].file = "kernel/kexec.c",
83521 + [4155].name = "do_kimage_alloc",
83522 + [4155].param3 = 1,
83523 + [41676].file = "fs/compat.c",
83524 + [41676].name = "compat_sys_preadv",
83525 + [41676].param3 = 1,
83526 + [4167].file = "drivers/media/dvb/frontends/cx24116.c",
83527 + [4167].name = "cx24116_writeregN",
83528 + [4167].param4 = 1,
83529 + [41793].file = "drivers/net/wireless/ath/ath6kl/wmi.c",
83530 + [41793].name = "ath6kl_wmi_send_mgmt_cmd",
83531 + [41793].param7 = 1,
83532 + [41924].file = "security/keys/keyctl.c",
83533 + [41924].name = "keyctl_get_security",
83534 + [41924].param3 = 1,
83535 + [41968].file = "fs/btrfs/volumes.c",
83536 + [41968].name = "__btrfs_map_block",
83537 + [41968].param3 = 1,
83538 + [4202].file = "drivers/edac/edac_mc.c",
83539 + [4202].name = "edac_mc_alloc",
83540 + [4202].param1 = 1,
83541 + [42081].file = "net/econet/af_econet.c",
83542 + [42081].name = "aun_incoming",
83543 + [42081].param3 = 1,
83544 + [42143].file = "drivers/media/video/c-qcam.c",
83545 + [42143].name = "qcam_read",
83546 + [42143].param3 = 1,
83547 + [42206].file = "fs/quota/quota_tree.c",
83548 + [42206].name = "getdqbuf",
83549 + [42206].param1 = 1,
83550 + [42270].file = "net/wireless/scan.c",
83551 + [42270].name = "cfg80211_inform_bss_frame",
83552 + [42270].param4 = 1,
83553 + [42281].file = "include/linux/mISDNif.h",
83554 + [42281].name = "_queue_data",
83555 + [42281].param4 = 1,
83556 + [42420].file = "drivers/net/wireless/hostap/hostap_ioctl.c",
83557 + [42420].name = "prism2_set_genericelement",
83558 + [42420].param3 = 1,
83559 + [42472].file = "fs/compat.c",
83560 + [42472].name = "compat_readv",
83561 + [42472].param3 = 1,
83562 + [42473].file = "net/tipc/name_table.c",
83563 + [42473].name = "tipc_subseq_alloc",
83564 + [42473].param1 = 1,
83565 + [42562].file = "kernel/kfifo.c",
83566 + [42562].name = "__kfifo_to_user_r",
83567 + [42562].param3 = 1,
83568 + [42666].file = "drivers/pcmcia/cistpl.c",
83569 + [42666].name = "read_cis_cache",
83570 + [42666].param4 = 1,
83571 + [42714].file = "drivers/scsi/scsi_tgt_lib.c",
83572 + [42714].name = "scsi_tgt_copy_sense",
83573 + [42714].param3 = 1,
83574 + [42833].file = "kernel/trace/blktrace.c",
83575 + [42833].name = "blk_msg_write",
83576 + [42833].param3 = 1,
83577 + [42857].file = "security/selinux/selinuxfs.c",
83578 + [42857].name = "sel_write_member",
83579 + [42857].param3 = 1,
83580 + [42882].file = "security/keys/user_defined.c",
83581 + [42882].name = "user_instantiate",
83582 + [42882].param3 = 1,
83583 + [42930].file = "net/caif/cfpkt_skbuff.c",
83584 + [42930].name = "cfpkt_create_pfx",
83585 + [42930].param1 = 1,
83586 + [42930].param2 = 1,
83587 + [43023].file = "drivers/usb/misc/usblcd.c",
83588 + [43023].name = "lcd_write",
83589 + [43023].param3 = 1,
83590 + [43104].file = "drivers/mtd/devices/mtd_dataflash.c",
83591 + [43104].name = "dataflash_read_user_otp",
83592 + [43104].param3 = 1,
83593 + [43133].file = "lib/mpi/mpiutil.c",
83594 + [43133].name = "mpi_resize",
83595 + [43133].param2 = 1,
83596 + [4324].file = "drivers/video/fbmem.c",
83597 + [4324].name = "fb_read",
83598 + [4324].param3 = 1,
83599 + [43266].file = "fs/afs/cell.c",
83600 + [43266].name = "afs_cell_alloc",
83601 + [43266].param2 = 1,
83602 + [4328].file = "drivers/usb/musb/musb_debugfs.c",
83603 + [4328].name = "musb_test_mode_write",
83604 + [4328].param3 = 1,
83605 + [43380].file = "drivers/scsi/bfa/bfad_debugfs.c",
83606 + [43380].name = "bfad_debugfs_write_regrd",
83607 + [43380].param3 = 1,
83608 + [43510].file = "kernel/kexec.c",
83609 + [43510].name = "compat_sys_kexec_load",
83610 + [43510].param2 = 1,
83611 + [43540].file = "include/rdma/ib_verbs.h",
83612 + [43540].name = "ib_copy_to_udata",
83613 + [43540].param3 = 1,
83614 + [4357].file = "security/tomoyo/securityfs_if.c",
83615 + [4357].name = "tomoyo_read_self",
83616 + [4357].param3 = 1,
83617 + [43590].file = "security/smack/smackfs.c",
83618 + [43590].name = "smk_write_onlycap",
83619 + [43590].param3 = 1,
83620 + [43596].file = "drivers/usb/core/buffer.c",
83621 + [43596].name = "hcd_buffer_alloc",
83622 + [43596].param2 = 1,
83623 + [43632].file = "drivers/media/video/videobuf2-core.c",
83624 + [43632].name = "vb2_read",
83625 + [43632].param3 = 1,
83626 + [43659].file = "drivers/firmware/efivars.c",
83627 + [43659].name = "efivar_create_sysfs_entry",
83628 + [43659].param2 = 1,
83629 + [43731].file = "drivers/hid/hid-picolcd.c",
83630 + [43731].name = "picolcd_debug_eeprom_read",
83631 + [43731].param3 = 1,
83632 + [43777].file = "drivers/acpi/acpica/utobject.c",
83633 + [43777].name = "acpi_ut_create_buffer_object",
83634 + [43777].param1 = 1,
83635 + [43798].file = "net/bluetooth/mgmt.c",
83636 + [43798].name = "set_local_name",
83637 + [43798].param4 = 1,
83638 + [4380].file = "drivers/mtd/devices/mtd_dataflash.c",
83639 + [4380].name = "dataflash_read_fact_otp",
83640 + [4380].param3 = 1,
83641 + [43834].file = "security/apparmor/apparmorfs.c",
83642 + [43834].name = "profile_replace",
83643 + [43834].param3 = 1,
83644 + [43895].file = "drivers/media/dvb/ddbridge/ddbridge-core.c",
83645 + [43895].name = "ddb_output_write",
83646 + [43895].param3 = 1,
83647 + [43899].file = "drivers/media/rc/imon.c",
83648 + [43899].name = "vfd_write",
83649 + [43899].param3 = 1,
83650 + [43900].file = "drivers/scsi/cxgbi/libcxgbi.c",
83651 + [43900].name = "cxgbi_device_portmap_create",
83652 + [43900].param3 = 1,
83653 + [43922].file = "drivers/mmc/card/mmc_test.c",
83654 + [43922].name = "mmc_test_alloc_mem",
83655 + [43922].param3 = 1,
83656 + [43946].file = "drivers/net/wireless/ath/ath6kl/txrx.c",
83657 + [43946].name = "aggr_recv_addba_req_evt",
83658 + [43946].param4 = 1,
83659 + [44006].file = "mm/process_vm_access.c",
83660 + [44006].name = "process_vm_rw_pages",
83661 + [44006].param5 = 1,
83662 + [44006].param6 = 1,
83663 + [44050].file = "fs/nfs/idmap.c",
83664 + [44050].name = "nfs_map_group_to_gid",
83665 + [44050].param3 = 1,
83666 + [44125].file = "fs/ext4/super.c",
83667 + [44125].name = "ext4_kvmalloc",
83668 + [44125].param1 = 1,
83669 + [44266].file = "kernel/cgroup.c",
83670 + [44266].name = "cgroup_write_string",
83671 + [44266].param5 = 1,
83672 + [44290].file = "drivers/net/usb/dm9601.c",
83673 + [44290].name = "dm_read",
83674 + [44290].param3 = 1,
83675 + [44308].file = "crypto/af_alg.c",
83676 + [44308].name = "alg_setkey",
83677 + [44308].param3 = 1,
83678 + [44510].file = "drivers/net/ethernet/broadcom/bnx2.c",
83679 + [44510].name = "bnx2_nvram_write",
83680 + [44510].param2 = 1,
83681 + [44625].file = "net/bluetooth/mgmt.c",
83682 + [44625].name = "set_connectable",
83683 + [44625].param4 = 1,
83684 + [44642].file = "drivers/net/wireless/iwmc3200wifi/commands.c",
83685 + [44642].name = "iwm_umac_set_config_var",
83686 + [44642].param4 = 1,
83687 + [44698].file = "net/sctp/socket.c",
83688 + [44698].name = "sctp_setsockopt_context",
83689 + [44698].param3 = 1,
83690 + [4471].file = "fs/ntfs/malloc.h",
83691 + [4471].name = "__ntfs_malloc",
83692 + [4471].param1 = 1,
83693 + [44773].file = "drivers/staging/vme/devices/vme_user.c",
83694 + [44773].name = "vme_user_write",
83695 + [44773].param3 = 1,
83696 + [44825].file = "drivers/scsi/osd/osd_initiator.c",
83697 + [44825].name = "_osd_realloc_seg",
83698 + [44825].param3 = 1,
83699 + [44852].file = "net/sctp/socket.c",
83700 + [44852].name = "sctp_setsockopt_rtoinfo",
83701 + [44852].param3 = 1,
83702 + [44936].file = "drivers/md/dm-raid.c",
83703 + [44936].name = "context_alloc",
83704 + [44936].param3 = 1,
83705 + [44943].file = "mm/util.c",
83706 + [44943].name = "kmemdup",
83707 + [44943].param2 = 1,
83708 + [44946].file = "net/sctp/socket.c",
83709 + [44946].name = "sctp_setsockopt_auth_chunk",
83710 + [44946].param3 = 1,
83711 + [44990].file = "drivers/media/video/pvrusb2/pvrusb2-ioread.c",
83712 + [44990].name = "pvr2_ioread_set_sync_key",
83713 + [44990].param3 = 1,
83714 + [45000].file = "fs/afs/proc.c",
83715 + [45000].name = "afs_proc_rootcell_write",
83716 + [45000].param3 = 1,
83717 + [45117].file = "drivers/staging/winbond/wb35reg.c",
83718 + [45117].name = "Wb35Reg_BurstWrite",
83719 + [45117].param4 = 1,
83720 + [45200].file = "drivers/scsi/scsi_proc.c",
83721 + [45200].name = "proc_scsi_write_proc",
83722 + [45200].param3 = 1,
83723 + [45217].file = "drivers/net/wireless/iwlwifi/iwl-debugfs.c",
83724 + [45217].name = "iwl_dbgfs_debug_level_write",
83725 + [45217].param3 = 1,
83726 + [45233].file = "net/rds/info.c",
83727 + [45233].name = "rds_info_getsockopt",
83728 + [45233].param3 = 1,
83729 + [45326].file = "drivers/mtd/ubi/cdev.c",
83730 + [45326].name = "vol_cdev_read",
83731 + [45326].param3 = 1,
83732 + [45335].file = "fs/read_write.c",
83733 + [45335].name = "vfs_writev",
83734 + [45335].param3 = 1,
83735 + [45366].file = "drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c",
83736 + [45366].name = "init_tid_tabs",
83737 + [45366].param2 = 1,
83738 + [45366].param3 = 1,
83739 + [45366].param4 = 1,
83740 + [45534].file = "drivers/net/wireless/ath/carl9170/cmd.c",
83741 + [45534].name = "carl9170_cmd_buf",
83742 + [45534].param3 = 1,
83743 + [45576].file = "net/netfilter/xt_recent.c",
83744 + [45576].name = "recent_mt_proc_write",
83745 + [45576].param3 = 1,
83746 + [45583].file = "fs/gfs2/dir.c",
83747 + [45583].name = "leaf_dealloc",
83748 + [45583].param3 = 1,
83749 + [45586].file = "drivers/net/wireless/rt2x00/rt2x00debug.c",
83750 + [45586].name = "rt2x00debug_write_bbp",
83751 + [45586].param3 = 1,
83752 + [45629].file = "lib/bch.c",
83753 + [45629].name = "bch_alloc",
83754 + [45629].param1 = 1,
83755 + [45633].file = "drivers/input/evdev.c",
83756 + [45633].name = "evdev_do_ioctl",
83757 + [45633].param2 = 1,
83758 + [45743].file = "drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c",
83759 + [45743].name = "qlcnic_alloc_msix_entries",
83760 + [45743].param2 = 1,
83761 + [45864].file = "drivers/atm/ambassador.c",
83762 + [45864].name = "create_queues",
83763 + [45864].param2 = 1,
83764 + [45864].param3 = 1,
83765 + [45930].file = "security/apparmor/apparmorfs.c",
83766 + [45930].name = "profile_remove",
83767 + [45930].param3 = 1,
83768 + [45954].file = "drivers/usb/misc/legousbtower.c",
83769 + [45954].name = "tower_write",
83770 + [45954].param3 = 1,
83771 + [46140].file = "sound/core/memalloc.c",
83772 + [46140].name = "snd_mem_proc_write",
83773 + [46140].param3 = 1,
83774 + [4616].file = "net/sunrpc/cache.c",
83775 + [4616].name = "cache_do_downcall",
83776 + [4616].param3 = 1,
83777 + [46243].file = "fs/binfmt_misc.c",
83778 + [46243].name = "bm_register_write",
83779 + [46243].param3 = 1,
83780 + [46250].file = "fs/xattr.c",
83781 + [46250].name = "sys_getxattr",
83782 + [46250].param4 = 1,
83783 + [46343].file = "fs/compat.c",
83784 + [46343].name = "compat_do_readv_writev",
83785 + [46343].param4 = 1,
83786 + [46400].file = "drivers/staging/sep/sep_driver.c",
83787 + [46400].name = "sep_prepare_input_output_dma_table",
83788 + [46400].param2 = 1,
83789 + [46400].param3 = 1,
83790 + [46400].param4 = 1,
83791 + [4644].file = "drivers/net/usb/mcs7830.c",
83792 + [4644].name = "mcs7830_get_reg",
83793 + [4644].param3 = 1,
83794 + [46605].file = "sound/core/oss/pcm_oss.c",
83795 + [46605].name = "snd_pcm_oss_sync1",
83796 + [46605].param2 = 1,
83797 + [46630].file = "net/decnet/af_decnet.c",
83798 + [46630].name = "__dn_setsockopt",
83799 + [46630].param5 = 1,
83800 + [46655].file = "drivers/media/video/hdpvr/hdpvr-video.c",
83801 + [46655].name = "hdpvr_read",
83802 + [46655].param3 = 1,
83803 + [46685].file = "drivers/gpu/drm/ttm/ttm_bo_vm.c",
83804 + [46685].name = "ttm_bo_fbdev_io",
83805 + [46685].param4 = 1,
83806 + [46742].file = "drivers/scsi/st.c",
83807 + [46742].name = "sgl_map_user_pages",
83808 + [46742].param2 = 1,
83809 + [46881].file = "drivers/char/lp.c",
83810 + [46881].name = "lp_write",
83811 + [46881].param3 = 1,
83812 + [47130].file = "kernel/kfifo.c",
83813 + [47130].name = "kfifo_copy_to_user",
83814 + [47130].param3 = 1,
83815 + [47265].file = "drivers/scsi/bnx2fc/bnx2fc_io.c",
83816 + [47265].name = "bnx2fc_cmd_mgr_alloc",
83817 + [47265].param2 = 1,
83818 + [47265].param3 = 1,
83819 + [47309].file = "drivers/scsi/aic94xx/aic94xx_init.c",
83820 + [47309].name = "asd_store_update_bios",
83821 + [47309].param4 = 1,
83822 + [47342].file = "fs/proc/base.c",
83823 + [47342].name = "sched_autogroup_write",
83824 + [47342].param3 = 1,
83825 + [47363].file = "drivers/input/evdev.c",
83826 + [47363].name = "evdev_ioctl_handler",
83827 + [47363].param2 = 1,
83828 + [47385].file = "drivers/net/wireless/zd1211rw/zd_usb.c",
83829 + [47385].name = "zd_usb_iowrite16v",
83830 + [47385].param3 = 1,
83831 + [4738].file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
83832 + [4738].name = "ath6kl_set_ap_probe_resp_ies",
83833 + [4738].param3 = 1,
83834 + [47393].file = "drivers/net/wireless/ath/main.c",
83835 + [47393].name = "ath_rxbuf_alloc",
83836 + [47393].param2 = 1,
83837 + [47463].file = "fs/xfs/kmem.c",
83838 + [47463].name = "kmem_zalloc",
83839 + [47463].param1 = 1,
83840 + [47474].file = "kernel/trace/trace.c",
83841 + [47474].name = "tracing_buffers_read",
83842 + [47474].param3 = 1,
83843 + [47636].file = "drivers/usb/class/usblp.c",
83844 + [47636].name = "usblp_ioctl",
83845 + [47636].param2 = 1,
83846 + [47637].file = "drivers/block/cciss.c",
83847 + [47637].name = "cciss_proc_write",
83848 + [47637].param3 = 1,
83849 + [47712].file = "net/sctp/socket.c",
83850 + [47712].name = "sctp_setsockopt_maxburst",
83851 + [47712].param3 = 1,
83852 + [47728].file = "drivers/char/agp/isoch.c",
83853 + [47728].name = "agp_3_5_isochronous_node_enable",
83854 + [47728].param3 = 1,
83855 + [4779].file = "fs/pipe.c",
83856 + [4779].name = "pipe_set_size",
83857 + [4779].param2 = 1,
83858 + [47881].file = "security/selinux/selinuxfs.c",
83859 + [47881].name = "sel_write_disable",
83860 + [47881].param3 = 1,
83861 + [48111].file = "net/wireless/sme.c",
83862 + [48111].name = "cfg80211_roamed_bss",
83863 + [48111].param4 = 1,
83864 + [48111].param6 = 1,
83865 + [48124].file = "drivers/net/wireless/iwmc3200wifi/main.c",
83866 + [48124].name = "iwm_notif_send",
83867 + [48124].param6 = 1,
83868 + [48155].file = "net/sctp/sm_make_chunk.c",
83869 + [48155].name = "sctp_make_abort_user",
83870 + [48155].param3 = 1,
83871 + [48182].file = "crypto/cryptd.c",
83872 + [48182].name = "cryptd_alloc_instance",
83873 + [48182].param2 = 1,
83874 + [48182].param3 = 1,
83875 + [48248].file = "security/keys/keyctl.c",
83876 + [48248].name = "keyctl_instantiate_key",
83877 + [48248].param3 = 1,
83878 + [4829].file = "drivers/block/floppy.c",
83879 + [4829].name = "fd_copyout",
83880 + [4829].param3 = 1,
83881 + [48632].file = "net/bluetooth/l2cap_core.c",
83882 + [48632].name = "l2cap_build_cmd",
83883 + [48632].param4 = 1,
83884 + [48642].file = "fs/hugetlbfs/inode.c",
83885 + [48642].name = "hugetlbfs_read",
83886 + [48642].param3 = 1,
83887 + [48720].file = "drivers/gpu/drm/i915/i915_debugfs.c",
83888 + [48720].name = "i915_max_freq_write",
83889 + [48720].param3 = 1,
83890 + [48768].file = "net/irda/irnet/irnet_ppp.c",
83891 + [48768].name = "dev_irnet_write",
83892 + [48768].param3 = 1,
83893 + [48818].file = "net/sunrpc/svc.c",
83894 + [48818].name = "svc_pool_map_alloc_arrays",
83895 + [48818].param2 = 1,
83896 + [48856].file = "drivers/acpi/acpica/utalloc.c",
83897 + [48856].name = "acpi_ut_initialize_buffer",
83898 + [48856].param2 = 1,
83899 + [48862].file = "net/sctp/socket.c",
83900 + [48862].name = "sctp_setsockopt_adaptation_layer",
83901 + [48862].param3 = 1,
83902 + [49126].file = "lib/prio_heap.c",
83903 + [49126].name = "heap_init",
83904 + [49126].param2 = 1,
83905 + [49143].file = "sound/core/oss/pcm_oss.c",
83906 + [49143].name = "snd_pcm_oss_write2",
83907 + [49143].param3 = 1,
83908 + [49216].file = "fs/read_write.c",
83909 + [49216].name = "do_readv_writev",
83910 + [49216].param4 = 1,
83911 + [49426].file = "net/bluetooth/l2cap_sock.c",
83912 + [49426].name = "l2cap_sock_setsockopt_old",
83913 + [49426].param4 = 1,
83914 + [49448].file = "drivers/isdn/gigaset/common.c",
83915 + [49448].name = "gigaset_initdriver",
83916 + [49448].param2 = 1,
83917 + [49494].file = "drivers/virtio/virtio_ring.c",
83918 + [49494].name = "vring_new_virtqueue",
83919 + [49494].param1 = 1,
83920 + [49499].file = "drivers/block/nvme.c",
83921 + [49499].name = "nvme_alloc_iod",
83922 + [49499].param1 = 1,
83923 + [49510].file = "net/sctp/socket.c",
83924 + [49510].name = "sctp_setsockopt_autoclose",
83925 + [49510].param3 = 1,
83926 + [4958].file = "drivers/net/wireless/p54/fwio.c",
83927 + [4958].name = "p54_alloc_skb",
83928 + [4958].param3 = 1,
83929 + [49604].file = "crypto/af_alg.c",
83930 + [49604].name = "alg_setsockopt",
83931 + [49604].param5 = 1,
83932 + [49646].file = "drivers/tty/vt/vt.c",
83933 + [49646].name = "vc_resize",
83934 + [49646].param2 = 1,
83935 + [49646].param3 = 1,
83936 + [49658].file = "drivers/net/wireless/brcm80211/brcmsmac/dma.c",
83937 + [49658].name = "dma_attach",
83938 + [49658].param6 = 1,
83939 + [49658].param7 = 1,
83940 + [49663].file = "drivers/media/video/uvc/uvc_driver.c",
83941 + [49663].name = "uvc_simplify_fraction",
83942 + [49663].param3 = 1,
83943 + [49746].file = "net/ipv4/netfilter/arp_tables.c",
83944 + [49746].name = "compat_do_arpt_set_ctl",
83945 + [49746].param4 = 1,
83946 + [49780].file = "net/mac80211/key.c",
83947 + [49780].name = "ieee80211_key_alloc",
83948 + [49780].param3 = 1,
83949 + [49805].file = "drivers/pci/pci.c",
83950 + [49805].name = "pci_add_cap_save_buffer",
83951 + [49805].param3 = 1,
83952 + [49845].file = "mm/vmalloc.c",
83953 + [49845].name = "__vmalloc_node",
83954 + [49845].param1 = 1,
83955 + [49929].file = "drivers/mtd/ubi/cdev.c",
83956 + [49929].name = "vol_cdev_direct_write",
83957 + [49929].param3 = 1,
83958 + [49935].file = "fs/xfs/kmem.c",
83959 + [49935].name = "kmem_zalloc_greedy",
83960 + [49935].param2 = 1,
83961 + [49935].param3 = 1,
83962 + [49].file = "net/atm/svc.c",
83963 + [49].name = "svc_setsockopt",
83964 + [49].param5 = 1,
83965 + [50518].file = "drivers/gpu/drm/nouveau/nouveau_gem.c",
83966 + [50518].name = "u_memcpya",
83967 + [50518].param2 = 1,
83968 + [50518].param3 = 1,
83969 + [5052].file = "drivers/char/ppdev.c",
83970 + [5052].name = "pp_read",
83971 + [5052].param3 = 1,
83972 + [50562].file = "drivers/media/video/zoran/zoran_procfs.c",
83973 + [50562].name = "zoran_write",
83974 + [50562].param3 = 1,
83975 + [50617].file = "fs/hugetlbfs/inode.c",
83976 + [50617].name = "hugetlbfs_read_actor",
83977 + [50617].param2 = 1,
83978 + [50617].param4 = 1,
83979 + [50617].param5 = 1,
83980 + [50692].file = "lib/ts_bm.c",
83981 + [50692].name = "bm_init",
83982 + [50692].param2 = 1,
83983 + [50813].file = "mm/vmalloc.c",
83984 + [50813].name = "__vmalloc_node_flags",
83985 + [50813].param1 = 1,
83986 + [5087].file = "drivers/atm/solos-pci.c",
83987 + [5087].name = "console_store",
83988 + [5087].param4 = 1,
83989 + [5102].file = "drivers/usb/misc/usbtest.c",
83990 + [5102].name = "usbtest_alloc_urb",
83991 + [5102].param3 = 1,
83992 + [5102].param5 = 1,
83993 + [51061].file = "net/bluetooth/mgmt.c",
83994 + [51061].name = "pin_code_reply",
83995 + [51061].param4 = 1,
83996 + [51139].file = "fs/pipe.c",
83997 + [51139].name = "pipe_iov_copy_to_user",
83998 + [51139].param3 = 1,
83999 + [51177].file = "net/sunrpc/xprtrdma/transport.c",
84000 + [51177].name = "xprt_rdma_allocate",
84001 + [51177].param2 = 1,
84002 + [51182].file = "drivers/misc/sgi-xp/xpc_main.c",
84003 + [51182].name = "xpc_kzalloc_cacheline_aligned",
84004 + [51182].param1 = 1,
84005 + [51250].file = "fs/read_write.c",
84006 + [51250].name = "rw_copy_check_uvector",
84007 + [51250].param3 = 1,
84008 + [51253].file = "drivers/net/wireless/rt2x00/rt2x00debug.c",
84009 + [51253].name = "rt2x00debug_write_eeprom",
84010 + [51253].param3 = 1,
84011 + [51323].file = "sound/pci/ac97/ac97_pcm.c",
84012 + [51323].name = "snd_ac97_pcm_assign",
84013 + [51323].param2 = 1,
84014 + [51340].file = "drivers/usb/class/usblp.c",
84015 + [51340].name = "usblp_write",
84016 + [51340].param3 = 1,
84017 + [51499].file = "net/802/garp.c",
84018 + [51499].name = "garp_attr_create",
84019 + [51499].param3 = 1,
84020 + [51842].file = "drivers/hid/hid-core.c",
84021 + [51842].name = "hid_register_field",
84022 + [51842].param2 = 1,
84023 + [51842].param3 = 1,
84024 + [5197].file = "net/core/dev.c",
84025 + [5197].name = "dev_set_alias",
84026 + [5197].param3 = 1,
84027 + [5204].file = "drivers/media/video/usbvision/usbvision-video.c",
84028 + [5204].name = "usbvision_v4l2_read",
84029 + [5204].param3 = 1,
84030 + [5206].file = "drivers/media/dvb/ttpci/av7110_v4l.c",
84031 + [5206].name = "av7110_vbi_write",
84032 + [5206].param3 = 1,
84033 + [52086].file = "drivers/usb/image/mdc800.c",
84034 + [52086].name = "mdc800_device_read",
84035 + [52086].param3 = 1,
84036 + [52099].file = "drivers/gpu/drm/vmwgfx/vmwgfx_kms.c",
84037 + [52099].name = "do_surface_dirty_sou",
84038 + [52099].param7 = 1,
84039 + [52172].file = "drivers/pcmcia/cistpl.c",
84040 + [52172].name = "pccard_store_cis",
84041 + [52172].param6 = 1,
84042 + [52173].file = "drivers/misc/ibmasm/ibmasmfs.c",
84043 + [52173].name = "remote_settings_file_write",
84044 + [52173].param3 = 1,
84045 + [52199].file = "mm/nobootmem.c",
84046 + [52199].name = "__alloc_bootmem",
84047 + [52199].param1 = 1,
84048 + [52343].file = "drivers/usb/misc/adutux.c",
84049 + [52343].name = "adu_read",
84050 + [52343].param3 = 1,
84051 + [52401].file = "drivers/staging/rtl8712/rtl871x_ioctl_linux.c",
84052 + [52401].name = "r871x_set_wpa_ie",
84053 + [52401].param3 = 1,
84054 + [52699].file = "lib/ts_fsm.c",
84055 + [52699].name = "fsm_init",
84056 + [52699].param2 = 1,
84057 + [52721].file = "security/keys/encrypted-keys/encrypted.c",
84058 + [52721].name = "encrypted_instantiate",
84059 + [52721].param3 = 1,
84060 + [52902].file = "fs/xfs/kmem.h",
84061 + [52902].name = "kmem_zalloc_large",
84062 + [52902].param1 = 1,
84063 + [52950].file = "net/bluetooth/mgmt.c",
84064 + [52950].name = "set_discoverable",
84065 + [52950].param4 = 1,
84066 + [53041].file = "fs/libfs.c",
84067 + [53041].name = "simple_transaction_get",
84068 + [53041].param3 = 1,
84069 + [5313].file = "fs/gfs2/quota.c",
84070 + [5313].name = "do_sync",
84071 + [5313].param1 = 1,
84072 + [53209].file = "drivers/usb/host/ehci-sched.c",
84073 + [53209].name = "iso_sched_alloc",
84074 + [53209].param1 = 1,
84075 + [53302].file = "drivers/firewire/core-cdev.c",
84076 + [53302].name = "dispatch_ioctl",
84077 + [53302].param2 = 1,
84078 + [53355].file = "fs/ceph/dir.c",
84079 + [53355].name = "ceph_read_dir",
84080 + [53355].param3 = 1,
84081 + [53405].file = "drivers/media/video/videobuf-core.c",
84082 + [53405].name = "__videobuf_copy_to_user",
84083 + [53405].param4 = 1,
84084 + [53407].file = "net/wireless/sme.c",
84085 + [53407].name = "cfg80211_connect_result",
84086 + [53407].param4 = 1,
84087 + [53407].param6 = 1,
84088 + [53426].file = "fs/libfs.c",
84089 + [53426].name = "simple_transaction_read",
84090 + [53426].param3 = 1,
84091 + [5344].file = "security/selinux/ss/hashtab.c",
84092 + [5344].name = "hashtab_create",
84093 + [5344].param3 = 1,
84094 + [53513].file = "drivers/mmc/core/mmc_ops.c",
84095 + [53513].name = "mmc_send_bus_test",
84096 + [53513].param4 = 1,
84097 + [53626].file = "drivers/block/paride/pg.c",
84098 + [53626].name = "pg_read",
84099 + [53626].param3 = 1,
84100 + [53631].file = "mm/util.c",
84101 + [53631].name = "memdup_user",
84102 + [53631].param2 = 1,
84103 + [53674].file = "drivers/media/dvb/ttpci/av7110_ca.c",
84104 + [53674].name = "ci_ll_write",
84105 + [53674].param4 = 1,
84106 + [5389].file = "drivers/infiniband/core/uverbs_cmd.c",
84107 + [5389].name = "ib_uverbs_unmarshall_recv",
84108 + [5389].param5 = 1,
84109 + [53901].file = "net/rds/message.c",
84110 + [53901].name = "rds_message_alloc",
84111 + [53901].param1 = 1,
84112 + [53902].file = "net/sctp/socket.c",
84113 + [53902].name = "sctp_setsockopt_initmsg",
84114 + [53902].param3 = 1,
84115 + [5410].file = "kernel/kexec.c",
84116 + [5410].name = "sys_kexec_load",
84117 + [5410].param2 = 1,
84118 + [54172].file = "net/nfc/core.c",
84119 + [54172].name = "nfc_alloc_recv_skb",
84120 + [54172].param1 = 1,
84121 + [54182].file = "drivers/block/rbd.c",
84122 + [54182].name = "rbd_snap_add",
84123 + [54182].param4 = 1,
84124 + [54201].file = "drivers/platform/x86/asus_acpi.c",
84125 + [54201].name = "mled_proc_write",
84126 + [54201].param3 = 1,
84127 + [54263].file = "security/keys/trusted.c",
84128 + [54263].name = "trusted_instantiate",
84129 + [54263].param3 = 1,
84130 + [54296].file = "include/linux/mISDNif.h",
84131 + [54296].name = "_alloc_mISDN_skb",
84132 + [54296].param3 = 1,
84133 + [54298].file = "drivers/usb/wusbcore/crypto.c",
84134 + [54298].name = "wusb_ccm_mac",
84135 + [54298].param7 = 1,
84136 + [54318].file = "include/drm/drm_mem_util.h",
84137 + [54318].name = "drm_malloc_ab",
84138 + [54318].param1 = 1,
84139 + [54318].param2 = 1,
84140 + [54335].file = "drivers/md/dm-table.c",
84141 + [54335].name = "dm_vcalloc",
84142 + [54335].param1 = 1,
84143 + [54335].param2 = 1,
84144 + [54338].file = "fs/ntfs/malloc.h",
84145 + [54338].name = "ntfs_malloc_nofs",
84146 + [54338].param1 = 1,
84147 + [54339].file = "security/smack/smackfs.c",
84148 + [54339].name = "smk_write_cipso",
84149 + [54339].param3 = 1,
84150 + [54369].file = "drivers/usb/storage/realtek_cr.c",
84151 + [54369].name = "rts51x_read_mem",
84152 + [54369].param4 = 1,
84153 + [5438].file = "sound/core/memory.c",
84154 + [5438].name = "copy_to_user_fromio",
84155 + [5438].param3 = 1,
84156 + [54401].file = "lib/dynamic_debug.c",
84157 + [54401].name = "ddebug_proc_write",
84158 + [54401].param3 = 1,
84159 + [54467].file = "net/packet/af_packet.c",
84160 + [54467].name = "packet_setsockopt",
84161 + [54467].param5 = 1,
84162 + [54573].file = "ipc/sem.c",
84163 + [54573].name = "sys_semop",
84164 + [54573].param3 = 1,
84165 + [54583].file = "net/sctp/socket.c",
84166 + [54583].name = "sctp_setsockopt_peer_addr_params",
84167 + [54583].param3 = 1,
84168 + [54643].file = "drivers/isdn/hardware/eicon/divasi.c",
84169 + [54643].name = "um_idi_write",
84170 + [54643].param3 = 1,
84171 + [54657].file = "mm/migrate.c",
84172 + [54657].name = "do_pages_stat",
84173 + [54657].param2 = 1,
84174 + [54663].file = "drivers/isdn/hardware/eicon/platform.h",
84175 + [54663].name = "diva_os_malloc",
84176 + [54663].param2 = 1,
84177 + [54701].file = "drivers/misc/altera-stapl/altera-jtag.c",
84178 + [54701].name = "altera_swap_ir",
84179 + [54701].param2 = 1,
84180 + [54751].file = "drivers/infiniband/core/device.c",
84181 + [54751].name = "ib_alloc_device",
84182 + [54751].param1 = 1,
84183 + [54771].file = "drivers/isdn/mISDN/socket.c",
84184 + [54771].name = "_l2_alloc_skb",
84185 + [54771].param1 = 1,
84186 + [54777].file = "drivers/net/wireless/ath/ath6kl/debug.c",
84187 + [54777].name = "ath6kl_debug_roam_tbl_event",
84188 + [54777].param3 = 1,
84189 + [54806].file = "drivers/scsi/lpfc/lpfc_debugfs.c",
84190 + [54806].name = "lpfc_debugfs_dif_err_write",
84191 + [54806].param3 = 1,
84192 + [5494].file = "fs/cifs/cifsacl.c",
84193 + [5494].name = "cifs_idmap_key_instantiate",
84194 + [5494].param3 = 1,
84195 + [55066].file = "net/ipv6/ipv6_sockglue.c",
84196 + [55066].name = "do_ipv6_setsockopt",
84197 + [55066].param5 = 1,
84198 + [55105].file = "drivers/base/devres.c",
84199 + [55105].name = "devres_alloc",
84200 + [55105].param2 = 1,
84201 + [55115].file = "net/sctp/probe.c",
84202 + [55115].name = "sctpprobe_read",
84203 + [55115].param3 = 1,
84204 + [55155].file = "net/bluetooth/rfcomm/sock.c",
84205 + [55155].name = "rfcomm_sock_setsockopt",
84206 + [55155].param5 = 1,
84207 + [55187].file = "security/keys/keyctl.c",
84208 + [55187].name = "keyctl_describe_key",
84209 + [55187].param3 = 1,
84210 + [55253].file = "drivers/net/wireless/ray_cs.c",
84211 + [55253].name = "ray_cs_essid_proc_write",
84212 + [55253].param3 = 1,
84213 + [55341].file = "drivers/staging/sep/sep_driver.c",
84214 + [55341].name = "sep_prepare_input_output_dma_table_in_dcb",
84215 + [55341].param4 = 1,
84216 + [55341].param5 = 1,
84217 + [55417].file = "drivers/hv/channel.c",
84218 + [55417].name = "vmbus_open",
84219 + [55417].param2 = 1,
84220 + [55417].param3 = 1,
84221 + [5548].file = "drivers/media/media-entity.c",
84222 + [5548].name = "media_entity_init",
84223 + [5548].param2 = 1,
84224 + [5548].param4 = 1,
84225 + [55546].file = "drivers/spi/spi.c",
84226 + [55546].name = "spi_alloc_master",
84227 + [55546].param2 = 1,
84228 + [55580].file = "drivers/usb/mon/mon_bin.c",
84229 + [55580].name = "copy_from_buf",
84230 + [55580].param2 = 1,
84231 + [55584].file = "drivers/tty/tty_buffer.c",
84232 + [55584].name = "tty_buffer_alloc",
84233 + [55584].param2 = 1,
84234 + [55712].file = "drivers/char/mem.c",
84235 + [55712].name = "read_zero",
84236 + [55712].param3 = 1,
84237 + [55727].file = "drivers/media/video/stk-webcam.c",
84238 + [55727].name = "stk_prepare_sio_buffers",
84239 + [55727].param2 = 1,
84240 + [55816].file = "drivers/misc/altera-stapl/altera-jtag.c",
84241 + [55816].name = "altera_set_ir_pre",
84242 + [55816].param2 = 1,
84243 + [55826].file = "drivers/infiniband/hw/ipath/ipath_file_ops.c",
84244 + [55826].name = "ipath_get_base_info",
84245 + [55826].param3 = 1,
84246 + [5586].file = "net/atm/common.c",
84247 + [5586].name = "alloc_tx",
84248 + [5586].param2 = 1,
84249 + [55978].file = "drivers/usb/misc/iowarrior.c",
84250 + [55978].name = "iowarrior_write",
84251 + [55978].param3 = 1,
84252 + [56170].file = "drivers/usb/wusbcore/wa-xfer.c",
84253 + [56170].name = "__wa_xfer_setup_segs",
84254 + [56170].param2 = 1,
84255 + [56199].file = "fs/binfmt_misc.c",
84256 + [56199].name = "parse_command",
84257 + [56199].param2 = 1,
84258 + [56218].file = "drivers/mmc/card/mmc_test.c",
84259 + [56218].name = "mtf_test_write",
84260 + [56218].param3 = 1,
84261 + [56239].file = "fs/sysfs/file.c",
84262 + [56239].name = "fill_write_buffer",
84263 + [56239].param3 = 1,
84264 + [5624].file = "drivers/net/wireless/ath/ath9k/wmi.c",
84265 + [5624].name = "ath9k_wmi_cmd",
84266 + [5624].param4 = 1,
84267 + [56416].file = "drivers/misc/lkdtm.c",
84268 + [56416].name = "do_register_entry",
84269 + [56416].param4 = 1,
84270 + [56458].file = "drivers/usb/host/hwa-hc.c",
84271 + [56458].name = "__hwahc_op_set_ptk",
84272 + [56458].param5 = 1,
84273 + [56471].file = "include/linux/slab.h",
84274 + [56471].name = "kcalloc",
84275 + [56471].param1 = 1,
84276 + [56471].param2 = 1,
84277 + [56513].file = "fs/cifs/connect.c",
84278 + [56513].name = "cifs_readv_from_socket",
84279 + [56513].param3 = 1,
84280 + [56531].file = "net/bluetooth/l2cap_core.c",
84281 + [56531].name = "l2cap_send_cmd",
84282 + [56531].param4 = 1,
84283 + [56544].file = "drivers/block/drbd/drbd_receiver.c",
84284 + [56544].name = "receive_DataRequest",
84285 + [56544].param3 = 1,
84286 + [56609].file = "lib/mpi/mpi-internal.h",
84287 + [56609].name = "RESIZE_IF_NEEDED",
84288 + [56609].param2 = 1,
84289 + [56652].file = "drivers/misc/altera-stapl/altera-jtag.c",
84290 + [56652].name = "altera_set_dr_post",
84291 + [56652].param2 = 1,
84292 + [56653].file = "net/irda/af_irda.c",
84293 + [56653].name = "irda_setsockopt",
84294 + [56653].param5 = 1,
84295 + [56672].file = "drivers/char/agp/generic.c",
84296 + [56672].name = "agp_alloc_page_array",
84297 + [56672].param1 = 1,
84298 + [56798].file = "fs/bio.c",
84299 + [56798].name = "bio_alloc_map_data",
84300 + [56798].param2 = 1,
84301 + [56843].file = "drivers/scsi/scsi_transport_iscsi.c",
84302 + [56843].name = "iscsi_recv_pdu",
84303 + [56843].param4 = 1,
84304 + [56903].file = "drivers/mtd/mtdchar.c",
84305 + [56903].name = "mtdchar_readoob",
84306 + [56903].param4 = 1,
84307 + [5699].file = "net/sctp/socket.c",
84308 + [5699].name = "sctp_setsockopt_default_send_param",
84309 + [5699].param3 = 1,
84310 + [5704].file = "drivers/mtd/mtdswap.c",
84311 + [5704].name = "mtdswap_init",
84312 + [5704].param2 = 1,
84313 + [57128].file = "drivers/pnp/pnpbios/proc.c",
84314 + [57128].name = "pnpbios_proc_write",
84315 + [57128].param3 = 1,
84316 + [57190].file = "drivers/char/agp/generic.c",
84317 + [57190].name = "agp_generic_alloc_user",
84318 + [57190].param1 = 1,
84319 + [57252].file = "drivers/media/dvb/dvb-core/dmxdev.c",
84320 + [57252].name = "dvb_dmxdev_set_buffer_size",
84321 + [57252].param2 = 1,
84322 + [57392].file = "drivers/block/aoe/aoecmd.c",
84323 + [57392].name = "new_skb",
84324 + [57392].param1 = 1,
84325 + [57471].file = "drivers/media/video/sn9c102/sn9c102_core.c",
84326 + [57471].name = "sn9c102_read",
84327 + [57471].param3 = 1,
84328 + [57547].file = "security/keys/encrypted-keys/encrypted.c",
84329 + [57547].name = "get_derived_key",
84330 + [57547].param4 = 1,
84331 + [57552].file = "net/sunrpc/cache.c",
84332 + [57552].name = "cache_slow_downcall",
84333 + [57552].param2 = 1,
84334 + [57670].file = "drivers/bluetooth/btmrvl_debugfs.c",
84335 + [57670].name = "btmrvl_pscmd_write",
84336 + [57670].param3 = 1,
84337 + [57710].file = "include/linux/usb/wusb.h",
84338 + [57710].name = "wusb_prf_256",
84339 + [57710].param7 = 1,
84340 + [57724].file = "net/bluetooth/hci_sock.c",
84341 + [57724].name = "hci_sock_setsockopt",
84342 + [57724].param5 = 1,
84343 + [57761].file = "kernel/kexec.c",
84344 + [57761].name = "kimage_crash_alloc",
84345 + [57761].param3 = 1,
84346 + [57786].file = "net/ipv6/netfilter/ip6_tables.c",
84347 + [57786].name = "compat_do_ip6t_set_ctl",
84348 + [57786].param4 = 1,
84349 + [57872].file = "fs/ceph/xattr.c",
84350 + [57872].name = "ceph_setxattr",
84351 + [57872].param4 = 1,
84352 + [57927].file = "fs/read_write.c",
84353 + [57927].name = "sys_preadv",
84354 + [57927].param3 = 1,
84355 + [58012].file = "include/net/bluetooth/bluetooth.h",
84356 + [58012].name = "bt_skb_alloc",
84357 + [58012].param1 = 1,
84358 + [58020].file = "drivers/firewire/core-cdev.c",
84359 + [58020].name = "fw_device_op_ioctl",
84360 + [58020].param2 = 1,
84361 + [58043].file = "kernel/auditfilter.c",
84362 + [58043].name = "audit_unpack_string",
84363 + [58043].param3 = 1,
84364 + [58087].file = "kernel/module.c",
84365 + [58087].name = "module_alloc_update_bounds_rw",
84366 + [58087].param1 = 1,
84367 + [58124].file = "drivers/usb/misc/usbtest.c",
84368 + [58124].name = "ctrl_out",
84369 + [58124].param3 = 1,
84370 + [58124].param5 = 1,
84371 + [58217].file = "net/sctp/socket.c",
84372 + [58217].name = "sctp_setsockopt_peer_primary_addr",
84373 + [58217].param3 = 1,
84374 + [58263].file = "security/keys/keyring.c",
84375 + [58263].name = "keyring_read",
84376 + [58263].param3 = 1,
84377 + [5830].file = "drivers/gpu/vga/vga_switcheroo.c",
84378 + [5830].name = "vga_switcheroo_debugfs_write",
84379 + [5830].param3 = 1,
84380 + [58320].file = "drivers/scsi/scsi_proc.c",
84381 + [58320].name = "proc_scsi_write",
84382 + [58320].param3 = 1,
84383 + [58344].file = "net/sunrpc/cache.c",
84384 + [58344].name = "read_flush",
84385 + [58344].param3 = 1,
84386 + [58379].file = "mm/nobootmem.c",
84387 + [58379].name = "__alloc_bootmem_node",
84388 + [58379].param2 = 1,
84389 + [58597].file = "kernel/kfifo.c",
84390 + [58597].name = "__kfifo_to_user",
84391 + [58597].param3 = 1,
84392 + [58641].file = "drivers/usb/misc/adutux.c",
84393 + [58641].name = "adu_write",
84394 + [58641].param3 = 1,
84395 + [58709].file = "fs/compat.c",
84396 + [58709].name = "compat_sys_pwritev",
84397 + [58709].param3 = 1,
84398 + [58769].file = "drivers/net/wireless/zd1211rw/zd_usb.c",
84399 + [58769].name = "zd_usb_read_fw",
84400 + [58769].param4 = 1,
84401 + [5876].file = "drivers/net/ppp/ppp_generic.c",
84402 + [5876].name = "ppp_write",
84403 + [5876].param3 = 1,
84404 + [58826].file = "net/sunrpc/xprt.c",
84405 + [58826].name = "xprt_alloc",
84406 + [58826].param2 = 1,
84407 + [58865].file = "include/linux/slub_def.h",
84408 + [58865].name = "kmalloc_order_trace",
84409 + [58865].param1 = 1,
84410 + [58867].file = "drivers/platform/x86/asus_acpi.c",
84411 + [58867].name = "wled_proc_write",
84412 + [58867].param3 = 1,
84413 + [58888].file = "fs/xattr.c",
84414 + [58888].name = "listxattr",
84415 + [58888].param3 = 1,
84416 + [58889].file = "kernel/trace/trace_kprobe.c",
84417 + [58889].name = "probes_write",
84418 + [58889].param3 = 1,
84419 + [58912].file = "drivers/lguest/core.c",
84420 + [58912].name = "__lgwrite",
84421 + [58912].param4 = 1,
84422 + [58918].file = "sound/core/pcm_native.c",
84423 + [58918].name = "snd_pcm_aio_write",
84424 + [58918].param3 = 1,
84425 + [58942].file = "drivers/block/aoe/aoedev.c",
84426 + [58942].name = "aoedev_flush",
84427 + [58942].param2 = 1,
84428 + [58958].file = "fs/fuse/control.c",
84429 + [58958].name = "fuse_conn_limit_write",
84430 + [58958].param3 = 1,
84431 + [59005].file = "drivers/staging/sep/sep_driver.c",
84432 + [59005].name = "sep_prepare_input_dma_table",
84433 + [59005].param2 = 1,
84434 + [59005].param3 = 1,
84435 + [59013].file = "fs/xfs/xfs_ioctl.c",
84436 + [59013].name = "xfs_handle_to_dentry",
84437 + [59013].param3 = 1,
84438 + [59034].file = "drivers/acpi/acpica/dsobject.c",
84439 + [59034].name = "acpi_ds_build_internal_package_obj",
84440 + [59034].param3 = 1,
84441 + [59073].file = "drivers/staging/speakup/i18n.c",
84442 + [59073].name = "msg_set",
84443 + [59073].param3 = 1,
84444 + [59074].file = "drivers/scsi/cxgbi/libcxgbi.c",
84445 + [59074].name = "ddp_make_gl",
84446 + [59074].param1 = 1,
84447 + [59297].file = "drivers/media/dvb/ttpci/av7110_av.c",
84448 + [59297].name = "dvb_play",
84449 + [59297].param3 = 1,
84450 + [59472].file = "drivers/misc/ibmasm/ibmasmfs.c",
84451 + [59472].name = "command_file_write",
84452 + [59472].param3 = 1,
84453 + [59504].file = "fs/exofs/super.c",
84454 + [59504].name = "__alloc_dev_table",
84455 + [59504].param2 = 1,
84456 + [59505].file = "drivers/media/video/pvrusb2/pvrusb2-ioread.c",
84457 + [59505].name = "pvr2_ioread_read",
84458 + [59505].param3 = 1,
84459 + [59681].file = "fs/xfs/kmem.c",
84460 + [59681].name = "kmem_alloc",
84461 + [59681].param1 = 1,
84462 + [5968].file = "net/sunrpc/sched.c",
84463 + [5968].name = "rpc_malloc",
84464 + [5968].param2 = 1,
84465 + [59695].file = "net/ipv4/netfilter/ipt_ULOG.c",
84466 + [59695].name = "ulog_alloc_skb",
84467 + [59695].param1 = 1,
84468 + [59838].file = "net/netlink/af_netlink.c",
84469 + [59838].name = "nl_pid_hash_zalloc",
84470 + [59838].param1 = 1,
84471 + [59856].file = "drivers/base/devres.c",
84472 + [59856].name = "devm_kzalloc",
84473 + [59856].param2 = 1,
84474 + [60066].file = "mm/filemap.c",
84475 + [60066].name = "iov_iter_copy_from_user",
84476 + [60066].param4 = 1,
84477 + [60185].file = "kernel/params.c",
84478 + [60185].name = "kmalloc_parameter",
84479 + [60185].param1 = 1,
84480 + [60198].file = "fs/nfs/nfs4proc.c",
84481 + [60198].name = "nfs4_write_cached_acl",
84482 + [60198].param3 = 1,
84483 + [60330].file = "drivers/media/video/w9966.c",
84484 + [60330].name = "w9966_v4l_read",
84485 + [60330].param3 = 1,
84486 + [604].file = "drivers/staging/rtl8712/usb_ops_linux.c",
84487 + [604].name = "r8712_usbctrl_vendorreq",
84488 + [604].param6 = 1,
84489 + [60543].file = "drivers/usb/class/usbtmc.c",
84490 + [60543].name = "usbtmc_read",
84491 + [60543].param3 = 1,
84492 + [60683].file = "sound/drivers/opl4/opl4_proc.c",
84493 + [60683].name = "snd_opl4_mem_proc_write",
84494 + [60683].param5 = 1,
84495 + [60693].file = "drivers/misc/hpilo.c",
84496 + [60693].name = "ilo_read",
84497 + [60693].param3 = 1,
84498 + [60744].file = "sound/pci/emu10k1/emuproc.c",
84499 + [60744].name = "snd_emu10k1_fx8010_read",
84500 + [60744].param5 = 1,
84501 + [60777].file = "fs/ntfs/malloc.h",
84502 + [60777].name = "ntfs_malloc_nofs_nofail",
84503 + [60777].param1 = 1,
84504 + [60833].file = "drivers/block/aoe/aoenet.c",
84505 + [60833].name = "set_aoe_iflist",
84506 + [60833].param2 = 1,
84507 + [60882].file = "drivers/input/joydev.c",
84508 + [60882].name = "joydev_compat_ioctl",
84509 + [60882].param2 = 1,
84510 + [60891].file = "kernel/sched/core.c",
84511 + [60891].name = "sys_sched_setaffinity",
84512 + [60891].param2 = 1,
84513 + [60920].file = "drivers/infiniband/hw/qib/qib_file_ops.c",
84514 + [60920].name = "qib_get_base_info",
84515 + [60920].param3 = 1,
84516 + [60928].file = "drivers/staging/bcm/Bcmchar.c",
84517 + [60928].name = "bcm_char_read",
84518 + [60928].param3 = 1,
84519 + [61122].file = "drivers/base/devres.c",
84520 + [61122].name = "alloc_dr",
84521 + [61122].param2 = 1,
84522 + [61254].file = "drivers/scsi/scsi_devinfo.c",
84523 + [61254].name = "proc_scsi_devinfo_write",
84524 + [61254].param3 = 1,
84525 + [61283].file = "drivers/net/wireless/ath/ath6kl/debug.c",
84526 + [61283].name = "ath6kl_fwlog_read",
84527 + [61283].param3 = 1,
84528 + [61289].file = "security/apparmor/apparmorfs.c",
84529 + [61289].name = "aa_simple_write_to_buffer",
84530 + [61289].param4 = 1,
84531 + [61389].file = "include/linux/slab.h",
84532 + [61389].name = "kzalloc_node",
84533 + [61389].param1 = 1,
84534 + [61441].file = "fs/ntfs/file.c",
84535 + [61441].name = "ntfs_copy_from_user_iovec",
84536 + [61441].param3 = 1,
84537 + [61441].param6 = 1,
84538 + [61552].file = "drivers/input/evdev.c",
84539 + [61552].name = "str_to_user",
84540 + [61552].param2 = 1,
84541 + [61673].file = "security/keys/trusted.c",
84542 + [61673].name = "trusted_update",
84543 + [61673].param3 = 1,
84544 + [61676].file = "kernel/module.c",
84545 + [61676].name = "module_alloc_update_bounds_rx",
84546 + [61676].param1 = 1,
84547 + [61684].file = "drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c",
84548 + [61684].name = "cxgb3_get_cpl_reply_skb",
84549 + [61684].param2 = 1,
84550 + [6173].file = "net/netlink/af_netlink.c",
84551 + [6173].name = "netlink_sendmsg",
84552 + [6173].param4 = 1,
84553 + [61770].file = "drivers/media/video/et61x251/et61x251_core.c",
84554 + [61770].name = "et61x251_read",
84555 + [61770].param3 = 1,
84556 + [61772].file = "fs/exofs/ore_raid.c",
84557 + [61772].name = "_sp2d_alloc",
84558 + [61772].param1 = 1,
84559 + [61772].param2 = 1,
84560 + [61772].param3 = 1,
84561 + [61926].file = "drivers/media/dvb/ddbridge/ddbridge-core.c",
84562 + [61926].name = "ddb_input_read",
84563 + [61926].param3 = 1,
84564 + [61932].file = "drivers/message/fusion/mptctl.c",
84565 + [61932].name = "__mptctl_ioctl",
84566 + [61932].param2 = 1,
84567 + [61966].file = "fs/nfs/nfs4proc.c",
84568 + [61966].name = "nfs4_alloc_slots",
84569 + [61966].param1 = 1,
84570 + [62081].file = "drivers/net/irda/vlsi_ir.c",
84571 + [62081].name = "vlsi_alloc_ring",
84572 + [62081].param3 = 1,
84573 + [62081].param4 = 1,
84574 + [62116].file = "fs/libfs.c",
84575 + [62116].name = "simple_attr_read",
84576 + [62116].param3 = 1,
84577 + [6211].file = "drivers/net/ethernet/amd/pcnet32.c",
84578 + [6211].name = "pcnet32_realloc_tx_ring",
84579 + [6211].param3 = 1,
84580 + [62294].file = "sound/core/info.c",
84581 + [62294].name = "resize_info_buffer",
84582 + [62294].param2 = 1,
84583 + [62387].file = "fs/nfs/idmap.c",
84584 + [62387].name = "nfs_idmap_lookup_id",
84585 + [62387].param2 = 1,
84586 + [62465].file = "drivers/misc/altera-stapl/altera-jtag.c",
84587 + [62465].name = "altera_set_dr_pre",
84588 + [62465].param2 = 1,
84589 + [62466].file = "lib/mpi/mpiutil.c",
84590 + [62466].name = "mpi_alloc",
84591 + [62466].param1 = 1,
84592 + [62495].file = "drivers/block/floppy.c",
84593 + [62495].name = "fallback_on_nodma_alloc",
84594 + [62495].param2 = 1,
84595 + [62498].file = "fs/xattr.c",
84596 + [62498].name = "sys_listxattr",
84597 + [62498].param3 = 1,
84598 + [625].file = "fs/read_write.c",
84599 + [625].name = "sys_pwritev",
84600 + [625].param3 = 1,
84601 + [62662].file = "drivers/message/fusion/mptctl.c",
84602 + [62662].name = "mptctl_getiocinfo",
84603 + [62662].param2 = 1,
84604 + [62669].file = "drivers/platform/x86/asus_acpi.c",
84605 + [62669].name = "tled_proc_write",
84606 + [62669].param3 = 1,
84607 + [62714].file = "security/keys/keyctl.c",
84608 + [62714].name = "keyctl_update_key",
84609 + [62714].param3 = 1,
84610 + [62760].file = "drivers/media/dvb/ttpci/av7110_av.c",
84611 + [62760].name = "play_iframe",
84612 + [62760].param3 = 1,
84613 + [62851].file = "fs/proc/vmcore.c",
84614 + [62851].name = "read_vmcore",
84615 + [62851].param3 = 1,
84616 + [62870].file = "fs/nfs/idmap.c",
84617 + [62870].name = "nfs_idmap_get_desc",
84618 + [62870].param2 = 1,
84619 + [62870].param4 = 1,
84620 + [62905].file = "net/caif/cfpkt_skbuff.c",
84621 + [62905].name = "cfpkt_create",
84622 + [62905].param1 = 1,
84623 + [62920].file = "drivers/net/wireless/b43/phy_n.c",
84624 + [62920].name = "b43_nphy_load_samples",
84625 + [62920].param3 = 1,
84626 + [62925].file = "include/rdma/ib_verbs.h",
84627 + [62925].name = "ib_copy_from_udata",
84628 + [62925].param3 = 1,
84629 + [62934].file = "drivers/net/wireless/wl1251/cmd.c",
84630 + [62934].name = "wl1251_cmd_template_set",
84631 + [62934].param4 = 1,
84632 + [62940].file = "drivers/scsi/libsrp.c",
84633 + [62940].name = "srp_ring_alloc",
84634 + [62940].param2 = 1,
84635 + [62967].file = "security/keys/encrypted-keys/encrypted.c",
84636 + [62967].name = "encrypted_update",
84637 + [62967].param3 = 1,
84638 + [62970].file = "net/sched/sch_api.c",
84639 + [62970].name = "qdisc_class_hash_alloc",
84640 + [62970].param1 = 1,
84641 + [62999].file = "net/core/neighbour.c",
84642 + [62999].name = "neigh_hash_alloc",
84643 + [62999].param1 = 1,
84644 + [63007].file = "fs/proc/base.c",
84645 + [63007].name = "proc_coredump_filter_write",
84646 + [63007].param3 = 1,
84647 + [63010].file = "drivers/gpu/drm/ttm/ttm_page_alloc.c",
84648 + [63010].name = "ttm_page_pool_free",
84649 + [63010].param2 = 1,
84650 + [63045].file = "crypto/shash.c",
84651 + [63045].name = "shash_setkey_unaligned",
84652 + [63045].param3 = 1,
84653 + [63075].file = "kernel/relay.c",
84654 + [63075].name = "relay_alloc_page_array",
84655 + [63075].param1 = 1,
84656 + [63076].file = "fs/cifs/xattr.c",
84657 + [63076].name = "cifs_setxattr",
84658 + [63076].param4 = 1,
84659 + [63091].file = "drivers/net/usb/pegasus.c",
84660 + [63091].name = "get_registers",
84661 + [63091].param3 = 1,
84662 + [6331].file = "drivers/atm/solos-pci.c",
84663 + [6331].name = "solos_param_store",
84664 + [6331].param4 = 1,
84665 + [63367].file = "net/netfilter/ipset/ip_set_core.c",
84666 + [63367].name = "ip_set_alloc",
84667 + [63367].param1 = 1,
84668 + [63489].file = "drivers/bluetooth/btmrvl_debugfs.c",
84669 + [63489].name = "btmrvl_hscfgcmd_write",
84670 + [63489].param3 = 1,
84671 + [63490].file = "crypto/shash.c",
84672 + [63490].name = "shash_compat_setkey",
84673 + [63490].param3 = 1,
84674 + [63605].file = "mm/mempool.c",
84675 + [63605].name = "mempool_kmalloc",
84676 + [63605].param2 = 1,
84677 + [63633].file = "drivers/bluetooth/btmrvl_sdio.c",
84678 + [63633].name = "btmrvl_sdio_host_to_card",
84679 + [63633].param3 = 1,
84680 + [63961].file = "fs/xattr.c",
84681 + [63961].name = "sys_flistxattr",
84682 + [63961].param3 = 1,
84683 + [63964].file = "net/sctp/socket.c",
84684 + [63964].name = "sctp_setsockopt_maxseg",
84685 + [63964].param3 = 1,
84686 + [63988].file = "drivers/input/evdev.c",
84687 + [63988].name = "evdev_ioctl_compat",
84688 + [63988].param2 = 1,
84689 + [64055].file = "drivers/media/dvb/ttpci/av7110_av.c",
84690 + [64055].name = "dvb_aplay",
84691 + [64055].param3 = 1,
84692 + [64156].file = "drivers/net/wireless/ath/ath6kl/cfg80211.c",
84693 + [64156].name = "ath6kl_mgmt_tx",
84694 + [64156].param9 = 1,
84695 + [64226].file = "drivers/md/persistent-data/dm-space-map-checker.c",
84696 + [64226].name = "ca_extend",
84697 + [64226].param2 = 1,
84698 + [64227].file = "mm/nobootmem.c",
84699 + [64227].name = "__alloc_bootmem_node_nopanic",
84700 + [64227].param2 = 1,
84701 + [64351].file = "kernel/kfifo.c",
84702 + [64351].name = "kfifo_copy_from_user",
84703 + [64351].param3 = 1,
84704 + [64392].file = "drivers/mmc/core/mmc_ops.c",
84705 + [64392].name = "mmc_send_cxd_data",
84706 + [64392].param5 = 1,
84707 + [64423].file = "kernel/sched/core.c",
84708 + [64423].name = "get_user_cpu_mask",
84709 + [64423].param2 = 1,
84710 + [64432].file = "security/selinux/selinuxfs.c",
84711 + [64432].name = "sel_write_create",
84712 + [64432].param3 = 1,
84713 + [64471].file = "drivers/bluetooth/btmrvl_debugfs.c",
84714 + [64471].name = "btmrvl_hscmd_write",
84715 + [64471].param3 = 1,
84716 + [64667].file = "sound/core/oss/pcm_oss.c",
84717 + [64667].name = "snd_pcm_oss_read",
84718 + [64667].param3 = 1,
84719 + [64689].file = "sound/isa/gus/gus_dram.c",
84720 + [64689].name = "snd_gus_dram_read",
84721 + [64689].param4 = 1,
84722 + [64692].file = "fs/binfmt_misc.c",
84723 + [64692].name = "bm_entry_write",
84724 + [64692].param3 = 1,
84725 + [64705].file = "drivers/staging/iio/accel/sca3000_ring.c",
84726 + [64705].name = "sca3000_read_first_n_hw_rb",
84727 + [64705].param2 = 1,
84728 + [64713].file = "fs/cifs/connect.c",
84729 + [64713].name = "extract_hostname",
84730 + [64713].param1 = 1,
84731 + [64743].file = "fs/ocfs2/dlmfs/dlmfs.c",
84732 + [64743].name = "dlmfs_file_read",
84733 + [64743].param3 = 1,
84734 + [64771].file = "security/keys/encrypted-keys/encrypted.c",
84735 + [64771].name = "datablob_format",
84736 + [64771].param2 = 1,
84737 + [6477].file = "net/bluetooth/mgmt.c",
84738 + [6477].name = "mgmt_pending_add",
84739 + [6477].param5 = 1,
84740 + [64906].file = "drivers/net/wireless/b43legacy/debugfs.c",
84741 + [64906].name = "b43legacy_debugfs_write",
84742 + [64906].param3 = 1,
84743 + [64913].file = "sound/core/oss/pcm_oss.c",
84744 + [64913].name = "snd_pcm_oss_write1",
84745 + [64913].param3 = 1,
84746 + [64961].file = "drivers/spi/spidev.c",
84747 + [64961].name = "spidev_ioctl",
84748 + [64961].param2 = 1,
84749 + [65033].file = "crypto/shash.c",
84750 + [65033].name = "shash_async_setkey",
84751 + [65033].param3 = 1,
84752 + [65093].file = "security/integrity/evm/evm_secfs.c",
84753 + [65093].name = "evm_write_key",
84754 + [65093].param3 = 1,
84755 + [6514].file = "mm/nobootmem.c",
84756 + [6514].name = "__alloc_bootmem_low",
84757 + [6514].param1 = 1,
84758 + [65169].file = "net/core/skbuff.c",
84759 + [65169].name = "dev_alloc_skb",
84760 + [65169].param1 = 1,
84761 + [6517].file = "drivers/md/dm-table.c",
84762 + [6517].name = "alloc_targets",
84763 + [6517].param2 = 1,
84764 + [65205].file = "drivers/input/evdev.c",
84765 + [65205].name = "handle_eviocgbit",
84766 + [65205].param3 = 1,
84767 + [65237].file = "kernel/profile.c",
84768 + [65237].name = "read_profile",
84769 + [65237].param3 = 1,
84770 + [65343].file = "kernel/trace/trace.c",
84771 + [65343].name = "tracing_clock_write",
84772 + [65343].param3 = 1,
84773 + [65345].file = "lib/xz/xz_dec_lzma2.c",
84774 + [65345].name = "xz_dec_lzma2_create",
84775 + [65345].param2 = 1,
84776 + [65409].file = "net/802/garp.c",
84777 + [65409].name = "garp_request_join",
84778 + [65409].param4 = 1,
84779 + [65432].file = "drivers/hid/hid-roccat-kone.c",
84780 + [65432].name = "kone_receive",
84781 + [65432].param4 = 1,
84782 + [65514].file = "drivers/media/video/gspca/t613.c",
84783 + [65514].name = "reg_w_ixbuf",
84784 + [65514].param4 = 1,
84785 + [6551].file = "drivers/usb/host/xhci-mem.c",
84786 + [6551].name = "xhci_alloc_stream_info",
84787 + [6551].param3 = 1,
84788 + [65535].file = "drivers/media/dvb/dvb-usb/opera1.c",
84789 + [65535].name = "opera1_xilinx_rw",
84790 + [65535].param5 = 1,
84791 + [6672].file = "drivers/net/wireless/b43/debugfs.c",
84792 + [6672].name = "b43_debugfs_write",
84793 + [6672].param3 = 1,
84794 + [6691].file = "drivers/acpi/proc.c",
84795 + [6691].name = "acpi_system_write_wakeup_device",
84796 + [6691].param3 = 1,
84797 + [6865].file = "drivers/staging/iio/ring_sw.c",
84798 + [6865].name = "iio_read_first_n_sw_rb",
84799 + [6865].param2 = 1,
84800 + [6867].file = "fs/coda/psdev.c",
84801 + [6867].name = "coda_psdev_read",
84802 + [6867].param3 = 1,
84803 + [6891].file = "drivers/bluetooth/btmrvl_debugfs.c",
84804 + [6891].name = "btmrvl_gpiogap_write",
84805 + [6891].param3 = 1,
84806 + [6944].file = "drivers/ide/ide-proc.c",
84807 + [6944].name = "ide_settings_proc_write",
84808 + [6944].param3 = 1,
84809 + [6950].file = "drivers/isdn/capi/capi.c",
84810 + [6950].name = "capi_write",
84811 + [6950].param3 = 1,
84812 + [697].file = "sound/isa/gus/gus_dram.c",
84813 + [697].name = "snd_gus_dram_peek",
84814 + [697].param4 = 1,
84815 + [7066].file = "security/keys/keyctl.c",
84816 + [7066].name = "keyctl_instantiate_key_common",
84817 + [7066].param4 = 1,
84818 + [7125].file = "include/net/nfc/nci_core.h",
84819 + [7125].name = "nci_skb_alloc",
84820 + [7125].param2 = 1,
84821 + [7129].file = "mm/maccess.c",
84822 + [7129].name = "__probe_kernel_read",
84823 + [7129].param3 = 1,
84824 + [7158].file = "kernel/trace/trace.c",
84825 + [7158].name = "tracing_read_pipe",
84826 + [7158].param3 = 1,
84827 + [720].file = "sound/pci/rme9652/hdsp.c",
84828 + [720].name = "snd_hdsp_playback_copy",
84829 + [720].param5 = 1,
84830 + [7236].file = "drivers/gpu/drm/drm_crtc.c",
84831 + [7236].name = "drm_plane_init",
84832 + [7236].param6 = 1,
84833 + [7411].file = "drivers/vhost/vhost.c",
84834 + [7411].name = "__vhost_add_used_n",
84835 + [7411].param3 = 1,
84836 + [7432].file = "net/bluetooth/mgmt.c",
84837 + [7432].name = "mgmt_event",
84838 + [7432].param4 = 1,
84839 + [7488].file = "security/keys/user_defined.c",
84840 + [7488].name = "user_read",
84841 + [7488].param3 = 1,
84842 + [7551].file = "drivers/input/touchscreen/ad7879-spi.c",
84843 + [7551].name = "ad7879_spi_xfer",
84844 + [7551].param3 = 1,
84845 + [7671].file = "mm/nobootmem.c",
84846 + [7671].name = "__alloc_bootmem_node_high",
84847 + [7671].param2 = 1,
84848 + [7676].file = "drivers/acpi/custom_method.c",
84849 + [7676].name = "cm_write",
84850 + [7676].param3 = 1,
84851 + [7693].file = "net/sctp/socket.c",
84852 + [7693].name = "sctp_setsockopt_associnfo",
84853 + [7693].param3 = 1,
84854 + [7697].file = "security/selinux/selinuxfs.c",
84855 + [7697].name = "sel_write_access",
84856 + [7697].param3 = 1,
84857 + [7843].file = "fs/compat.c",
84858 + [7843].name = "compat_sys_readv",
84859 + [7843].param3 = 1,
84860 + [7883].file = "net/sched/sch_sfq.c",
84861 + [7883].name = "sfq_alloc",
84862 + [7883].param1 = 1,
84863 + [7924].file = "drivers/media/video/cx18/cx18-fileops.c",
84864 + [7924].name = "cx18_read_pos",
84865 + [7924].param3 = 1,
84866 + [7958].file = "drivers/gpu/vga/vgaarb.c",
84867 + [7958].name = "vga_arb_write",
84868 + [7958].param3 = 1,
84869 + [7976].file = "drivers/usb/gadget/rndis.c",
84870 + [7976].name = "rndis_add_response",
84871 + [7976].param2 = 1,
84872 + [7985].file = "net/mac80211/cfg.c",
84873 + [7985].name = "ieee80211_mgmt_tx",
84874 + [7985].param9 = 1,
84875 + [8014].file = "net/netfilter/ipset/ip_set_list_set.c",
84876 + [8014].name = "init_list_set",
84877 + [8014].param2 = 1,
84878 + [8014].param3 = 1,
84879 + [8126].file = "sound/soc/soc-core.c",
84880 + [8126].name = "codec_reg_read_file",
84881 + [8126].param3 = 1,
84882 + [8317].file = "security/smack/smackfs.c",
84883 + [8317].name = "smk_write_ambient",
84884 + [8317].param3 = 1,
84885 + [8335].file = "drivers/media/dvb/dvb-core/dmxdev.c",
84886 + [8335].name = "dvb_dvr_set_buffer_size",
84887 + [8335].param2 = 1,
84888 + [8383].file = "kernel/module.c",
84889 + [8383].name = "copy_and_check",
84890 + [8383].param3 = 1,
84891 + [8411].file = "net/caif/cfpkt_skbuff.c",
84892 + [8411].name = "cfpkt_append",
84893 + [8411].param3 = 1,
84894 + [8536].file = "fs/cifs/dns_resolve.c",
84895 + [8536].name = "dns_resolve_server_name_to_ip",
84896 + [8536].param1 = 1,
84897 + [857].file = "drivers/virtio/virtio_ring.c",
84898 + [857].name = "virtqueue_add_buf",
84899 + [857].param3 = 1,
84900 + [857].param4 = 1,
84901 + [8650].file = "drivers/gpu/drm/vmwgfx/vmwgfx_kms.c",
84902 + [8650].name = "vmw_kms_present",
84903 + [8650].param9 = 1,
84904 + [8654].file = "drivers/net/wireless/mwifiex/sdio.c",
84905 + [8654].name = "mwifiex_alloc_sdio_mpa_buffers",
84906 + [8654].param2 = 1,
84907 + [8654].param3 = 1,
84908 + [865].file = "drivers/base/regmap/regmap-debugfs.c",
84909 + [865].name = "regmap_access_read_file",
84910 + [865].param3 = 1,
84911 + [8663].file = "net/bridge/netfilter/ebtables.c",
84912 + [8663].name = "do_update_counters",
84913 + [8663].param4 = 1,
84914 + [8684].file = "fs/read_write.c",
84915 + [8684].name = "sys_writev",
84916 + [8684].param3 = 1,
84917 + [8699].file = "security/selinux/selinuxfs.c",
84918 + [8699].name = "sel_commit_bools_write",
84919 + [8699].param3 = 1,
84920 + [8764].file = "drivers/usb/core/devio.c",
84921 + [8764].name = "usbdev_read",
84922 + [8764].param3 = 1,
84923 + [8802].file = "fs/dlm/user.c",
84924 + [8802].name = "device_write",
84925 + [8802].param3 = 1,
84926 + [8810].file = "net/mac80211/debugfs_sta.c",
84927 + [8810].name = "sta_agg_status_write",
84928 + [8810].param3 = 1,
84929 + [8815].file = "security/tomoyo/securityfs_if.c",
84930 + [8815].name = "tomoyo_write_self",
84931 + [8815].param3 = 1,
84932 + [8821].file = "net/wireless/sme.c",
84933 + [8821].name = "cfg80211_roamed",
84934 + [8821].param5 = 1,
84935 + [8821].param7 = 1,
84936 + [8833].file = "security/selinux/ss/services.c",
84937 + [8833].name = "security_context_to_sid",
84938 + [8833].param2 = 1,
84939 + [8838].file = "lib/mpi/mpi-bit.c",
84940 + [8838].name = "mpi_lshift_limbs",
84941 + [8838].param2 = 1,
84942 + [8851].file = "net/key/af_key.c",
84943 + [8851].name = "pfkey_sendmsg",
84944 + [8851].param4 = 1,
84945 + [8917].file = "net/can/raw.c",
84946 + [8917].name = "raw_setsockopt",
84947 + [8917].param5 = 1,
84948 + [8983].file = "include/linux/skbuff.h",
84949 + [8983].name = "alloc_skb",
84950 + [8983].param1 = 1,
84951 + [9117].file = "drivers/base/regmap/regcache-rbtree.c",
84952 + [9117].name = "regcache_rbtree_insert_to_block",
84953 + [9117].param5 = 1,
84954 + [9226].file = "mm/migrate.c",
84955 + [9226].name = "sys_move_pages",
84956 + [9226].param2 = 1,
84957 + [9304].file = "kernel/auditfilter.c",
84958 + [9304].name = "audit_init_entry",
84959 + [9304].param1 = 1,
84960 + [9317].file = "drivers/usb/wusbcore/wa-nep.c",
84961 + [9317].name = "wa_nep_queue",
84962 + [9317].param2 = 1,
84963 + [9341].file = "drivers/acpi/apei/erst-dbg.c",
84964 + [9341].name = "erst_dbg_write",
84965 + [9341].param3 = 1,
84966 + [9386].file = "fs/exofs/ore.c",
84967 + [9386].name = "_ore_get_io_state",
84968 + [9386].param3 = 1,
84969 + [9386].param4 = 1,
84970 + [9386].param5 = 1,
84971 + [9538].file = "crypto/blkcipher.c",
84972 + [9538].name = "blkcipher_copy_iv",
84973 + [9538].param3 = 1,
84974 + [9546].file = "drivers/video/fbmem.c",
84975 + [9546].name = "fb_write",
84976 + [9546].param3 = 1,
84977 + [9601].file = "kernel/kfifo.c",
84978 + [9601].name = "__kfifo_from_user",
84979 + [9601].param3 = 1,
84980 + [9618].file = "security/selinux/selinuxfs.c",
84981 + [9618].name = "sel_write_bool",
84982 + [9618].param3 = 1,
84983 + [9768].file = "drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c",
84984 + [9768].name = "vmw_execbuf_process",
84985 + [9768].param5 = 1,
84986 + [9828].file = "drivers/media/dvb/dvb-core/dmxdev.c",
84987 + [9828].name = "dvb_demux_do_ioctl",
84988 + [9828].param3 = 1,
84989 + [9870].file = "net/atm/addr.c",
84990 + [9870].name = "atm_get_addr",
84991 + [9870].param3 = 1,
84992 + [9977].file = "drivers/net/wireless/zd1211rw/zd_usb.c",
84993 + [9977].name = "zd_usb_iowrite16v_async",
84994 + [9977].param3 = 1,
84995 + [16344].collision = 1,
84996 + [307].collision = 1,
84997 + [31649].collision = 1,
84998 + [33040].collision = 1,
84999 + [45231].collision = 1,
85000 + [60651].collision = 1,
85001 +};
85002 diff --git a/tools/gcc/size_overflow_hash2.h b/tools/gcc/size_overflow_hash2.h
85003 new file mode 100644
85004 index 0000000..9ec45ae
85005 --- /dev/null
85006 +++ b/tools/gcc/size_overflow_hash2.h
85007 @@ -0,0 +1,35 @@
85008 +struct size_overflow_hash size_overflow_hash2[65536] = {
85009 + [22224].file = "fs/proc/vmcore.c",
85010 + [22224].name = "read_from_oldmem",
85011 + [22224].param2 = 1,
85012 + [2344].file = "fs/ecryptfs/crypto.c",
85013 + [2344].name = "ecryptfs_decode_and_decrypt_filename",
85014 + [2344].param5 = 1,
85015 + [2515].file = "fs/ecryptfs/crypto.c",
85016 + [2515].name = "ecryptfs_copy_filename",
85017 + [2515].param4 = 1,
85018 + [26518].file = "drivers/gpu/vga/vgaarb.c",
85019 + [26518].name = "vga_arb_read",
85020 + [26518].param3 = 1,
85021 + [30632].file = "drivers/ide/ide-proc.c",
85022 + [30632].name = "ide_driver_proc_write",
85023 + [30632].param3 = 1,
85024 + [39024].file = "lib/scatterlist.c",
85025 + [39024].name = "sg_kmalloc",
85026 + [39024].param1 = 1,
85027 + [50359].file = "kernel/sched/core.c",
85028 + [50359].name = "alloc_sched_domains",
85029 + [50359].param1 = 1,
85030 + [53262].file = "drivers/block/aoe/aoechr.c",
85031 + [53262].name = "revalidate",
85032 + [53262].param2 = 1,
85033 + [56432].file = "drivers/base/regmap/regmap-debugfs.c",
85034 + [56432].name = "regmap_map_read_file",
85035 + [56432].param3 = 1,
85036 + [57500].file = "drivers/spi/spidev.c",
85037 + [57500].name = "spidev_write",
85038 + [57500].param3 = 1,
85039 + [8155].file = "drivers/hv/channel.c",
85040 + [8155].name = "vmbus_establish_gpadl",
85041 + [8155].param3 = 1,
85042 +};
85043 diff --git a/tools/gcc/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin.c
85044 new file mode 100644
85045 index 0000000..4ec0934
85046 --- /dev/null
85047 +++ b/tools/gcc/size_overflow_plugin.c
85048 @@ -0,0 +1,1150 @@
85049 +/*
85050 + * Copyright 2011, 2012 by Emese Revfy <re.emese@gmail.com>
85051 + * Licensed under the GPL v2, or (at your option) v3
85052 + *
85053 + * Homepage:
85054 + * http://www.grsecurity.net/~ephox/overflow_plugin/
85055 + *
85056 + * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
85057 + * with double integer precision (DImode/TImode for 32/64 bit integer types).
85058 + * The recomputed argument is checked against INT_MAX and an event is logged on overflow and the triggering process is killed.
85059 + *
85060 + * Usage:
85061 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o size_overflow_plugin.so size_overflow_plugin.c
85062 + * $ gcc -fplugin=size_overflow_plugin.so test.c -O2
85063 + */
85064 +
85065 +#include "gcc-plugin.h"
85066 +#include "config.h"
85067 +#include "system.h"
85068 +#include "coretypes.h"
85069 +#include "tree.h"
85070 +#include "tree-pass.h"
85071 +#include "intl.h"
85072 +#include "plugin-version.h"
85073 +#include "tm.h"
85074 +#include "toplev.h"
85075 +#include "function.h"
85076 +#include "tree-flow.h"
85077 +#include "plugin.h"
85078 +#include "gimple.h"
85079 +#include "c-common.h"
85080 +#include "diagnostic.h"
85081 +#include "cfgloop.h"
85082 +
85083 +struct size_overflow_hash {
85084 + const char *name;
85085 + const char *file;
85086 + unsigned short collision:1;
85087 + unsigned short param1:1;
85088 + unsigned short param2:1;
85089 + unsigned short param3:1;
85090 + unsigned short param4:1;
85091 + unsigned short param5:1;
85092 + unsigned short param6:1;
85093 + unsigned short param7:1;
85094 + unsigned short param8:1;
85095 + unsigned short param9:1;
85096 +};
85097 +
85098 +#include "size_overflow_hash1.h"
85099 +#include "size_overflow_hash2.h"
85100 +
85101 +#define __unused __attribute__((__unused__))
85102 +#define NAME(node) IDENTIFIER_POINTER(DECL_NAME(node))
85103 +#define NAME_LEN(node) IDENTIFIER_LENGTH(DECL_NAME(node))
85104 +#define BEFORE_STMT true
85105 +#define AFTER_STMT false
85106 +#define CREATE_NEW_VAR NULL_TREE
85107 +
85108 +int plugin_is_GPL_compatible;
85109 +void debug_gimple_stmt(gimple gs);
85110 +
85111 +static tree expand(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var);
85112 +static tree signed_size_overflow_type;
85113 +static tree unsigned_size_overflow_type;
85114 +static tree report_size_overflow_decl;
85115 +static tree const_char_ptr_type_node;
85116 +static unsigned int handle_function(void);
85117 +static bool file_match = true;
85118 +
85119 +static struct plugin_info size_overflow_plugin_info = {
85120 + .version = "20120502beta",
85121 + .help = "no-size_overflow\tturn off size overflow checking\n",
85122 +};
85123 +
85124 +static tree handle_size_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs)
85125 +{
85126 + unsigned int arg_count = type_num_arguments(*node);
85127 +
85128 + for (; args; args = TREE_CHAIN(args)) {
85129 + tree position = TREE_VALUE(args);
85130 + if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_HIGH(position) || TREE_INT_CST_LOW(position) < 1 || TREE_INT_CST_LOW(position) > arg_count ) {
85131 + error("handle_size_overflow_attribute: overflow parameter outside range.");
85132 + *no_add_attrs = true;
85133 + }
85134 + }
85135 + return NULL_TREE;
85136 +}
85137 +
85138 +static struct attribute_spec no_size_overflow_attr = {
85139 + .name = "size_overflow",
85140 + .min_length = 1,
85141 + .max_length = -1,
85142 + .decl_required = false,
85143 + .type_required = true,
85144 + .function_type_required = true,
85145 + .handler = handle_size_overflow_attribute
85146 +};
85147 +
85148 +static void register_attributes(void __unused *event_data, void __unused *data)
85149 +{
85150 + register_attribute(&no_size_overflow_attr);
85151 +}
85152 +
85153 +// http://www.team5150.com/~andrew/noncryptohashzoo2~/CrapWow.html
85154 +static unsigned int CrapWow(const char *key, unsigned int len, unsigned int seed)
85155 +{
85156 +#define cwfold( a, b, lo, hi ) { p = (unsigned int)(a) * (unsigned long long)(b); lo ^= (unsigned int)p; hi ^= (unsigned int)(p >> 32); }
85157 +#define cwmixa( in ) { cwfold( in, m, k, h ); }
85158 +#define cwmixb( in ) { cwfold( in, n, h, k ); }
85159 +
85160 + const unsigned int m = 0x57559429;
85161 + const unsigned int n = 0x5052acdb;
85162 + const unsigned int *key4 = (const unsigned int *)key;
85163 + unsigned int h = len;
85164 + unsigned int k = len + seed + n;
85165 + unsigned long long p;
85166 +
85167 + while (len >= 8) {
85168 + cwmixb(key4[0]) cwmixa(key4[1]) key4 += 2;
85169 + len -= 8;
85170 + }
85171 + if (len >= 4) {
85172 + cwmixb(key4[0]) key4 += 1;
85173 + len -= 4;
85174 + }
85175 + if (len)
85176 + cwmixa(key4[0] & ((1 << (len * 8)) - 1 ));
85177 + cwmixb(h ^ (k + n));
85178 + return k ^ h;
85179 +
85180 +#undef cwfold
85181 +#undef cwmixa
85182 +#undef cwmixb
85183 +}
85184 +
85185 +static inline unsigned int size_overflow_hash(const char *fndecl, unsigned int seed)
85186 +{
85187 + return CrapWow(fndecl, strlen(fndecl), seed) & 0xffff;
85188 +}
85189 +
85190 +static inline tree get_original_function_decl(tree fndecl)
85191 +{
85192 + if (DECL_ABSTRACT_ORIGIN(fndecl))
85193 + return DECL_ABSTRACT_ORIGIN(fndecl);
85194 + return fndecl;
85195 +}
85196 +
85197 +static inline gimple get_def_stmt(tree node)
85198 +{
85199 + gcc_assert(TREE_CODE(node) == SSA_NAME);
85200 + return SSA_NAME_DEF_STMT(node);
85201 +}
85202 +
85203 +static struct size_overflow_hash *get_function_hash(tree fndecl)
85204 +{
85205 + unsigned int hash;
85206 + const char *func = NAME(fndecl);
85207 +
85208 + hash = size_overflow_hash(func, 0);
85209 +
85210 + if (size_overflow_hash1[hash].collision) {
85211 + hash = size_overflow_hash(func, 23432);
85212 + return &size_overflow_hash2[hash];
85213 + }
85214 + return &size_overflow_hash1[hash];
85215 +}
85216 +
85217 +static void check_arg_type(tree var)
85218 +{
85219 + tree type = TREE_TYPE(var);
85220 + enum tree_code code = TREE_CODE(type);
85221 +
85222 + gcc_assert(code == INTEGER_TYPE ||
85223 + (code == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == VOID_TYPE) ||
85224 + (code == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == INTEGER_TYPE));
85225 +}
85226 +
85227 +static void check_missing_attribute(tree arg)
85228 +{
85229 + tree var, type, func = get_original_function_decl(current_function_decl);
85230 + const char *curfunc = NAME(func);
85231 + unsigned int new_hash, argnum = 1;
85232 + struct size_overflow_hash *hash;
85233 + location_t loc;
85234 + expanded_location xloc;
85235 + bool match = false;
85236 +
85237 + type = TREE_TYPE(arg);
85238 + // skip function pointers
85239 + if (TREE_CODE(type) == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == FUNCTION_TYPE)
85240 + return;
85241 +
85242 + loc = DECL_SOURCE_LOCATION(func);
85243 + xloc = expand_location(loc);
85244 +
85245 + if (lookup_attribute("size_overflow", TYPE_ATTRIBUTES(TREE_TYPE(func))))
85246 + return;
85247 +
85248 + hash = get_function_hash(func);
85249 + if (hash->name && !strcmp(hash->name, NAME(func)))
85250 + return;
85251 + if (file_match && hash->file && !strcmp(hash->file, xloc.file))
85252 + return;
85253 +
85254 + gcc_assert(TREE_CODE(arg) != COMPONENT_REF);
85255 +
85256 + if (TREE_CODE(arg) == SSA_NAME)
85257 + arg = SSA_NAME_VAR(arg);
85258 +
85259 + for (var = DECL_ARGUMENTS(func); var; var = TREE_CHAIN(var)) {
85260 + if (strcmp(NAME(arg), NAME(var))) {
85261 + argnum++;
85262 + continue;
85263 + }
85264 + check_arg_type(var);
85265 +
85266 + match = true;
85267 + if (!TYPE_UNSIGNED(TREE_TYPE(var)))
85268 + return;
85269 + break;
85270 + }
85271 + if (!match) {
85272 + warning(0, "check_missing_attribute: cannot find the %s argument in %s", NAME(arg), NAME(func));
85273 + return;
85274 + }
85275 +
85276 +#define check_param(num) \
85277 + if (num == argnum && hash->param##num) \
85278 + return;
85279 + check_param(1);
85280 + check_param(2);
85281 + check_param(3);
85282 + check_param(4);
85283 + check_param(5);
85284 + check_param(6);
85285 + check_param(7);
85286 + check_param(8);
85287 + check_param(9);
85288 +#undef check_param
85289 +
85290 + new_hash = size_overflow_hash(curfunc, 0);
85291 + inform(loc, "Function %s is missing from the size_overflow hash table +%s+%d+%u+%s", curfunc, curfunc, argnum, new_hash, xloc.file);
85292 +}
85293 +
85294 +static tree create_new_var(tree type)
85295 +{
85296 + tree new_var = create_tmp_var(type, "cicus");
85297 +
85298 + add_referenced_var(new_var);
85299 + mark_sym_for_renaming(new_var);
85300 + return new_var;
85301 +}
85302 +
85303 +static bool is_bool(tree node)
85304 +{
85305 + tree type;
85306 +
85307 + if (node == NULL_TREE)
85308 + return false;
85309 +
85310 + type = TREE_TYPE(node);
85311 + if (!INTEGRAL_TYPE_P(type))
85312 + return false;
85313 + if (TREE_CODE(type) == BOOLEAN_TYPE)
85314 + return true;
85315 + if (TYPE_PRECISION(type) == 1)
85316 + return true;
85317 + return false;
85318 +}
85319 +
85320 +static tree cast_a_tree(tree type, tree var)
85321 +{
85322 + gcc_assert(fold_convertible_p(type, var));
85323 +
85324 + return fold_convert(type, var);
85325 +}
85326 +
85327 +static gimple build_cast_stmt(tree type, tree var, tree new_var, location_t loc)
85328 +{
85329 + gimple assign;
85330 +
85331 + if (new_var == CREATE_NEW_VAR)
85332 + new_var = create_new_var(type);
85333 +
85334 + assign = gimple_build_assign(new_var, cast_a_tree(type, var));
85335 + gimple_set_location(assign, loc);
85336 + gimple_set_lhs(assign, make_ssa_name(new_var, assign));
85337 +
85338 + return assign;
85339 +}
85340 +
85341 +static tree create_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt, tree rhs1, bool before)
85342 +{
85343 + tree oldstmt_rhs1;
85344 + enum tree_code code;
85345 + gimple stmt;
85346 + gimple_stmt_iterator gsi;
85347 +
85348 + if (!*potentionally_overflowed)
85349 + return NULL_TREE;
85350 +
85351 + if (rhs1 == NULL_TREE) {
85352 + debug_gimple_stmt(oldstmt);
85353 + error("create_assign: rhs1 is NULL_TREE");
85354 + gcc_unreachable();
85355 + }
85356 +
85357 + oldstmt_rhs1 = gimple_assign_rhs1(oldstmt);
85358 + code = TREE_CODE(oldstmt_rhs1);
85359 + if (code == PARM_DECL || (code == SSA_NAME && gimple_code(get_def_stmt(oldstmt_rhs1)) == GIMPLE_NOP))
85360 + check_missing_attribute(oldstmt_rhs1);
85361 +
85362 + stmt = build_cast_stmt(signed_size_overflow_type, rhs1, CREATE_NEW_VAR, gimple_location(oldstmt));
85363 + gsi = gsi_for_stmt(oldstmt);
85364 + if (lookup_stmt_eh_lp(oldstmt) != 0) {
85365 + basic_block next_bb, cur_bb;
85366 + edge e;
85367 +
85368 + gcc_assert(before == false);
85369 + gcc_assert(stmt_can_throw_internal(oldstmt));
85370 + gcc_assert(gimple_code(oldstmt) == GIMPLE_CALL);
85371 + gcc_assert(!gsi_end_p(gsi));
85372 +
85373 + cur_bb = gimple_bb(oldstmt);
85374 + next_bb = cur_bb->next_bb;
85375 + e = find_edge(cur_bb, next_bb);
85376 + gcc_assert(e != NULL);
85377 + gcc_assert(e->flags & EDGE_FALLTHRU);
85378 +
85379 + gsi = gsi_after_labels(next_bb);
85380 + gcc_assert(!gsi_end_p(gsi));
85381 + before = true;
85382 + }
85383 + if (before)
85384 + gsi_insert_before(&gsi, stmt, GSI_NEW_STMT);
85385 + else
85386 + gsi_insert_after(&gsi, stmt, GSI_NEW_STMT);
85387 + update_stmt(stmt);
85388 + pointer_set_insert(visited, oldstmt);
85389 + return gimple_get_lhs(stmt);
85390 +}
85391 +
85392 +static tree dup_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt, tree rhs1, tree rhs2, tree __unused rhs3)
85393 +{
85394 + tree new_var, lhs = gimple_get_lhs(oldstmt);
85395 + gimple stmt;
85396 + gimple_stmt_iterator gsi;
85397 +
85398 + if (!*potentionally_overflowed)
85399 + return NULL_TREE;
85400 +
85401 + if (gimple_num_ops(oldstmt) != 4 && rhs1 == NULL_TREE) {
85402 + rhs1 = gimple_assign_rhs1(oldstmt);
85403 + rhs1 = create_assign(visited, potentionally_overflowed, oldstmt, rhs1, BEFORE_STMT);
85404 + }
85405 + if (gimple_num_ops(oldstmt) == 3 && rhs2 == NULL_TREE) {
85406 + rhs2 = gimple_assign_rhs2(oldstmt);
85407 + rhs2 = create_assign(visited, potentionally_overflowed, oldstmt, rhs2, BEFORE_STMT);
85408 + }
85409 +
85410 + stmt = gimple_copy(oldstmt);
85411 + gimple_set_location(stmt, gimple_location(oldstmt));
85412 +
85413 + if (gimple_assign_rhs_code(oldstmt) == WIDEN_MULT_EXPR)
85414 + gimple_assign_set_rhs_code(stmt, MULT_EXPR);
85415 +
85416 + if (is_bool(lhs))
85417 + new_var = SSA_NAME_VAR(lhs);
85418 + else
85419 + new_var = create_new_var(signed_size_overflow_type);
85420 + new_var = make_ssa_name(new_var, stmt);
85421 + gimple_set_lhs(stmt, new_var);
85422 +
85423 + if (rhs1 != NULL_TREE) {
85424 + if (!gimple_assign_cast_p(oldstmt))
85425 + rhs1 = cast_a_tree(signed_size_overflow_type, rhs1);
85426 + gimple_assign_set_rhs1(stmt, rhs1);
85427 + }
85428 +
85429 + if (rhs2 != NULL_TREE)
85430 + gimple_assign_set_rhs2(stmt, rhs2);
85431 +#if BUILDING_GCC_VERSION >= 4007
85432 + if (rhs3 != NULL_TREE)
85433 + gimple_assign_set_rhs3(stmt, rhs3);
85434 +#endif
85435 + gimple_set_vuse(stmt, gimple_vuse(oldstmt));
85436 + gimple_set_vdef(stmt, gimple_vdef(oldstmt));
85437 +
85438 + gsi = gsi_for_stmt(oldstmt);
85439 + gsi_insert_after(&gsi, stmt, GSI_SAME_STMT);
85440 + update_stmt(stmt);
85441 + pointer_set_insert(visited, oldstmt);
85442 + return gimple_get_lhs(stmt);
85443 +}
85444 +
85445 +static gimple overflow_create_phi_node(gimple oldstmt, tree var)
85446 +{
85447 + basic_block bb;
85448 + gimple phi;
85449 + gimple_stmt_iterator gsi = gsi_for_stmt(oldstmt);
85450 +
85451 + bb = gsi_bb(gsi);
85452 +
85453 + phi = create_phi_node(var, bb);
85454 + gsi = gsi_last(phi_nodes(bb));
85455 + gsi_remove(&gsi, false);
85456 +
85457 + gsi = gsi_for_stmt(oldstmt);
85458 + gsi_insert_after(&gsi, phi, GSI_NEW_STMT);
85459 + gimple_set_bb(phi, bb);
85460 + return phi;
85461 +}
85462 +
85463 +static tree signed_cast_constant(tree node)
85464 +{
85465 + gcc_assert(is_gimple_constant(node));
85466 +
85467 + return cast_a_tree(signed_size_overflow_type, node);
85468 +}
85469 +
85470 +static gimple cast_old_phi_arg(gimple oldstmt, tree arg, tree new_var, unsigned int i)
85471 +{
85472 + basic_block bb;
85473 + gimple newstmt, def_stmt;
85474 + gimple_stmt_iterator gsi;
85475 +
85476 + newstmt = build_cast_stmt(signed_size_overflow_type, arg, new_var, gimple_location(oldstmt));
85477 + if (TREE_CODE(arg) == SSA_NAME) {
85478 + def_stmt = get_def_stmt(arg);
85479 + if (gimple_code(def_stmt) != GIMPLE_NOP) {
85480 + gsi = gsi_for_stmt(def_stmt);
85481 + gsi_insert_after(&gsi, newstmt, GSI_NEW_STMT);
85482 + return newstmt;
85483 + }
85484 + }
85485 +
85486 + bb = gimple_phi_arg_edge(oldstmt, i)->src;
85487 + gsi = gsi_after_labels(bb);
85488 + gsi_insert_before(&gsi, newstmt, GSI_NEW_STMT);
85489 + return newstmt;
85490 +}
85491 +
85492 +static gimple handle_new_phi_arg(tree arg, tree new_var, tree new_rhs)
85493 +{
85494 + gimple newstmt;
85495 + gimple_stmt_iterator gsi;
85496 + void (*gsi_insert)(gimple_stmt_iterator *, gimple, enum gsi_iterator_update);
85497 + gimple def_newstmt = get_def_stmt(new_rhs);
85498 +
85499 + gsi_insert = gsi_insert_after;
85500 + gsi = gsi_for_stmt(def_newstmt);
85501 +
85502 + switch (gimple_code(get_def_stmt(arg))) {
85503 + case GIMPLE_PHI:
85504 + newstmt = gimple_build_assign(new_var, new_rhs);
85505 + gsi = gsi_after_labels(gimple_bb(def_newstmt));
85506 + gsi_insert = gsi_insert_before;
85507 + break;
85508 + case GIMPLE_ASM:
85509 + case GIMPLE_CALL:
85510 + newstmt = gimple_build_assign(new_var, new_rhs);
85511 + break;
85512 + case GIMPLE_ASSIGN:
85513 + newstmt = gimple_build_assign(new_var, gimple_get_lhs(def_newstmt));
85514 + break;
85515 + default:
85516 + /* unknown gimple_code (handle_build_new_phi_arg) */
85517 + gcc_unreachable();
85518 + }
85519 +
85520 + gimple_set_lhs(newstmt, make_ssa_name(new_var, newstmt));
85521 + gsi_insert(&gsi, newstmt, GSI_NEW_STMT);
85522 + update_stmt(newstmt);
85523 + return newstmt;
85524 +}
85525 +
85526 +static tree build_new_phi_arg(struct pointer_set_t *visited, bool *potentionally_overflowed, tree arg, tree new_var)
85527 +{
85528 + gimple newstmt;
85529 + tree new_rhs;
85530 +
85531 + new_rhs = expand(visited, potentionally_overflowed, arg);
85532 +
85533 + if (new_rhs == NULL_TREE)
85534 + return NULL_TREE;
85535 +
85536 + newstmt = handle_new_phi_arg(arg, new_var, new_rhs);
85537 + return gimple_get_lhs(newstmt);
85538 +}
85539 +
85540 +static tree build_new_phi(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt)
85541 +{
85542 + gimple phi;
85543 + tree new_var = create_new_var(signed_size_overflow_type);
85544 + unsigned int i, n = gimple_phi_num_args(oldstmt);
85545 +
85546 + pointer_set_insert(visited, oldstmt);
85547 + phi = overflow_create_phi_node(oldstmt, new_var);
85548 + for (i = 0; i < n; i++) {
85549 + tree arg, lhs;
85550 +
85551 + arg = gimple_phi_arg_def(oldstmt, i);
85552 + if (is_gimple_constant(arg))
85553 + arg = signed_cast_constant(arg);
85554 + lhs = build_new_phi_arg(visited, potentionally_overflowed, arg, new_var);
85555 + if (lhs == NULL_TREE)
85556 + lhs = gimple_get_lhs(cast_old_phi_arg(oldstmt, arg, new_var, i));
85557 + add_phi_arg(phi, lhs, gimple_phi_arg_edge(oldstmt, i), gimple_location(oldstmt));
85558 + }
85559 +
85560 + update_stmt(phi);
85561 + return gimple_phi_result(phi);
85562 +}
85563 +
85564 +static tree handle_unary_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
85565 +{
85566 + gimple def_stmt = get_def_stmt(var);
85567 + tree new_rhs1, rhs1 = gimple_assign_rhs1(def_stmt);
85568 +
85569 + *potentionally_overflowed = true;
85570 + new_rhs1 = expand(visited, potentionally_overflowed, rhs1);
85571 + if (new_rhs1 == NULL_TREE) {
85572 + if (TREE_CODE(TREE_TYPE(rhs1)) == POINTER_TYPE)
85573 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
85574 + else
85575 + return create_assign(visited, potentionally_overflowed, def_stmt, rhs1, AFTER_STMT);
85576 + }
85577 + return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, NULL_TREE, NULL_TREE);
85578 +}
85579 +
85580 +static tree handle_unary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
85581 +{
85582 + gimple def_stmt = get_def_stmt(var);
85583 + tree rhs1 = gimple_assign_rhs1(def_stmt);
85584 +
85585 + if (is_gimple_constant(rhs1))
85586 + return dup_assign(visited, potentionally_overflowed, def_stmt, signed_cast_constant(rhs1), NULL_TREE, NULL_TREE);
85587 +
85588 + switch (TREE_CODE(rhs1)) {
85589 + case SSA_NAME:
85590 + return handle_unary_rhs(visited, potentionally_overflowed, var);
85591 +
85592 + case ARRAY_REF:
85593 + case BIT_FIELD_REF:
85594 + case ADDR_EXPR:
85595 + case COMPONENT_REF:
85596 + case COND_EXPR:
85597 + case INDIRECT_REF:
85598 +#if BUILDING_GCC_VERSION >= 4006
85599 + case MEM_REF:
85600 +#endif
85601 + case PARM_DECL:
85602 + case TARGET_MEM_REF:
85603 + case VAR_DECL:
85604 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
85605 +
85606 + default:
85607 + debug_gimple_stmt(def_stmt);
85608 + debug_tree(rhs1);
85609 + gcc_unreachable();
85610 + }
85611 +}
85612 +
85613 +static void insert_cond(basic_block cond_bb, tree arg, enum tree_code cond_code, tree type_value)
85614 +{
85615 + gimple cond_stmt;
85616 + gimple_stmt_iterator gsi = gsi_last_bb(cond_bb);
85617 +
85618 + cond_stmt = gimple_build_cond(cond_code, arg, type_value, NULL_TREE, NULL_TREE);
85619 + gsi_insert_after(&gsi, cond_stmt, GSI_CONTINUE_LINKING);
85620 + update_stmt(cond_stmt);
85621 +}
85622 +
85623 +static tree create_string_param(tree string)
85624 +{
85625 + tree i_type, a_type;
85626 + int length = TREE_STRING_LENGTH(string);
85627 +
85628 + gcc_assert(length > 0);
85629 +
85630 + i_type = build_index_type(build_int_cst(NULL_TREE, length - 1));
85631 + a_type = build_array_type(char_type_node, i_type);
85632 +
85633 + TREE_TYPE(string) = a_type;
85634 + TREE_CONSTANT(string) = 1;
85635 + TREE_READONLY(string) = 1;
85636 +
85637 + return build1(ADDR_EXPR, ptr_type_node, string);
85638 +}
85639 +
85640 +static void insert_cond_result(basic_block bb_true, gimple stmt, tree arg)
85641 +{
85642 + gimple func_stmt, def_stmt;
85643 + tree current_func, loc_file, loc_line;
85644 + expanded_location xloc;
85645 + gimple_stmt_iterator gsi = gsi_start_bb(bb_true);
85646 +
85647 + def_stmt = get_def_stmt(arg);
85648 + xloc = expand_location(gimple_location(def_stmt));
85649 +
85650 + if (!gimple_has_location(def_stmt)) {
85651 + xloc = expand_location(gimple_location(stmt));
85652 + if (!gimple_has_location(stmt))
85653 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
85654 + }
85655 +
85656 + loc_line = build_int_cstu(unsigned_type_node, xloc.line);
85657 +
85658 + loc_file = build_string(strlen(xloc.file) + 1, xloc.file);
85659 + loc_file = create_string_param(loc_file);
85660 +
85661 + current_func = build_string(NAME_LEN(current_function_decl) + 1, NAME(current_function_decl));
85662 + current_func = create_string_param(current_func);
85663 +
85664 + // void report_size_overflow(const char *file, unsigned int line, const char *func)
85665 + func_stmt = gimple_build_call(report_size_overflow_decl, 3, loc_file, loc_line, current_func);
85666 +
85667 + gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING);
85668 +}
85669 +
85670 +static void insert_check_size_overflow(gimple stmt, enum tree_code cond_code, tree arg, tree type_value)
85671 +{
85672 + basic_block cond_bb, join_bb, bb_true;
85673 + edge e;
85674 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
85675 +// location_t loc = gimple_location(stmt);
85676 +
85677 + cond_bb = gimple_bb(stmt);
85678 + gsi_prev(&gsi);
85679 + if (gsi_end_p(gsi))
85680 + e = split_block_after_labels(cond_bb);
85681 + else
85682 + e = split_block(cond_bb, gsi_stmt(gsi));
85683 + cond_bb = e->src;
85684 + join_bb = e->dest;
85685 + e->flags = EDGE_FALSE_VALUE;
85686 + e->probability = REG_BR_PROB_BASE;
85687 +
85688 + bb_true = create_empty_bb(cond_bb);
85689 + make_edge(cond_bb, bb_true, EDGE_TRUE_VALUE);
85690 + make_edge(cond_bb, join_bb, EDGE_FALSE_VALUE);
85691 + make_edge(bb_true, join_bb, EDGE_FALLTHRU);
85692 +
85693 + if (dom_info_available_p(CDI_DOMINATORS)) {
85694 + set_immediate_dominator(CDI_DOMINATORS, bb_true, cond_bb);
85695 + set_immediate_dominator(CDI_DOMINATORS, join_bb, cond_bb);
85696 + }
85697 +
85698 + if (current_loops != NULL) {
85699 + gcc_assert(cond_bb->loop_father == join_bb->loop_father);
85700 + add_bb_to_loop(bb_true, cond_bb->loop_father);
85701 + }
85702 +
85703 + insert_cond(cond_bb, arg, cond_code, type_value);
85704 + insert_cond_result(bb_true, stmt, arg);
85705 +
85706 +// inform(loc, "Integer size_overflow check applied here.");
85707 +}
85708 +
85709 +static tree get_type_for_check(tree rhs)
85710 +{
85711 + tree def_rhs;
85712 + gimple def_stmt = get_def_stmt(rhs);
85713 +
85714 + if (!gimple_assign_cast_p(def_stmt))
85715 + return TREE_TYPE(rhs);
85716 + def_rhs = gimple_assign_rhs1(def_stmt);
85717 + if (TREE_CODE(TREE_TYPE(def_rhs)) == INTEGER_TYPE)
85718 + return TREE_TYPE(def_rhs);
85719 + return TREE_TYPE(rhs);
85720 +}
85721 +
85722 +static gimple cast_to_unsigned_size_overflow_type(gimple stmt, tree cast_rhs)
85723 +{
85724 + gimple ucast_stmt;
85725 + gimple_stmt_iterator gsi;
85726 + location_t loc = gimple_location(stmt);
85727 +
85728 + ucast_stmt = build_cast_stmt(unsigned_size_overflow_type, cast_rhs, CREATE_NEW_VAR, loc);
85729 + gsi = gsi_for_stmt(stmt);
85730 + gsi_insert_before(&gsi, ucast_stmt, GSI_SAME_STMT);
85731 + return ucast_stmt;
85732 +}
85733 +
85734 +static void check_size_overflow(gimple stmt, tree cast_rhs, tree rhs, bool *potentionally_overflowed)
85735 +{
85736 + tree type_max, type_min, rhs_type;
85737 + gimple ucast_stmt;
85738 +
85739 + if (!*potentionally_overflowed)
85740 + return;
85741 +
85742 + rhs_type = get_type_for_check(rhs);
85743 +
85744 + if (TYPE_UNSIGNED(rhs_type)) {
85745 + ucast_stmt = cast_to_unsigned_size_overflow_type(stmt, cast_rhs);
85746 + type_max = cast_a_tree(unsigned_size_overflow_type, TYPE_MAX_VALUE(rhs_type));
85747 + insert_check_size_overflow(stmt, GT_EXPR, gimple_get_lhs(ucast_stmt), type_max);
85748 + } else {
85749 + type_max = cast_a_tree(signed_size_overflow_type, TYPE_MAX_VALUE(rhs_type));
85750 + insert_check_size_overflow(stmt, GT_EXPR, cast_rhs, type_max);
85751 +
85752 + type_min = cast_a_tree(signed_size_overflow_type, TYPE_MIN_VALUE(rhs_type));
85753 + insert_check_size_overflow(stmt, LT_EXPR, cast_rhs, type_min);
85754 + }
85755 +}
85756 +
85757 +static tree change_assign_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple stmt, tree orig_rhs)
85758 +{
85759 + gimple assign;
85760 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
85761 + tree new_rhs, origtype = TREE_TYPE(orig_rhs);
85762 +
85763 + gcc_assert(gimple_code(stmt) == GIMPLE_ASSIGN);
85764 +
85765 + new_rhs = expand(visited, potentionally_overflowed, orig_rhs);
85766 + if (new_rhs == NULL_TREE)
85767 + return NULL_TREE;
85768 +
85769 + assign = build_cast_stmt(origtype, new_rhs, CREATE_NEW_VAR, gimple_location(stmt));
85770 + gsi_insert_before(&gsi, assign, GSI_SAME_STMT);
85771 + update_stmt(assign);
85772 + return gimple_get_lhs(assign);
85773 +}
85774 +
85775 +static tree handle_const_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple def_stmt, tree var, tree rhs, tree new_rhs1, tree new_rhs2, void (*gimple_assign_set_rhs)(gimple, tree))
85776 +{
85777 + tree new_rhs, cast_rhs;
85778 +
85779 + if (gimple_assign_rhs_code(def_stmt) == MIN_EXPR)
85780 + return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, NULL_TREE);
85781 +
85782 + new_rhs = change_assign_rhs(visited, potentionally_overflowed, def_stmt, rhs);
85783 + if (new_rhs != NULL_TREE) {
85784 + gimple_assign_set_rhs(def_stmt, new_rhs);
85785 + update_stmt(def_stmt);
85786 +
85787 + cast_rhs = gimple_assign_rhs1(get_def_stmt(new_rhs));
85788 +
85789 + check_size_overflow(def_stmt, cast_rhs, rhs, potentionally_overflowed);
85790 + }
85791 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
85792 +}
85793 +
85794 +static tree handle_binary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
85795 +{
85796 + tree rhs1, rhs2;
85797 + gimple def_stmt = get_def_stmt(var);
85798 + tree new_rhs1 = NULL_TREE;
85799 + tree new_rhs2 = NULL_TREE;
85800 +
85801 + rhs1 = gimple_assign_rhs1(def_stmt);
85802 + rhs2 = gimple_assign_rhs2(def_stmt);
85803 +
85804 + /* no DImode/TImode division in the 32/64 bit kernel */
85805 + switch (gimple_assign_rhs_code(def_stmt)) {
85806 + case RDIV_EXPR:
85807 + case TRUNC_DIV_EXPR:
85808 + case CEIL_DIV_EXPR:
85809 + case FLOOR_DIV_EXPR:
85810 + case ROUND_DIV_EXPR:
85811 + case TRUNC_MOD_EXPR:
85812 + case CEIL_MOD_EXPR:
85813 + case FLOOR_MOD_EXPR:
85814 + case ROUND_MOD_EXPR:
85815 + case EXACT_DIV_EXPR:
85816 + case POINTER_PLUS_EXPR:
85817 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
85818 + default:
85819 + break;
85820 + }
85821 +
85822 + *potentionally_overflowed = true;
85823 +
85824 + if (TREE_CODE(rhs1) == SSA_NAME)
85825 + new_rhs1 = expand(visited, potentionally_overflowed, rhs1);
85826 + if (TREE_CODE(rhs2) == SSA_NAME)
85827 + new_rhs2 = expand(visited, potentionally_overflowed, rhs2);
85828 +
85829 + if (is_gimple_constant(rhs2))
85830 + return handle_const_assign(visited, potentionally_overflowed, def_stmt, var, rhs1, new_rhs1, signed_cast_constant(rhs2), &gimple_assign_set_rhs1);
85831 +
85832 + if (is_gimple_constant(rhs1))
85833 + return handle_const_assign(visited, potentionally_overflowed, def_stmt, var, rhs2, signed_cast_constant(rhs1), new_rhs2, &gimple_assign_set_rhs2);
85834 +
85835 + return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, NULL_TREE);
85836 +}
85837 +
85838 +#if BUILDING_GCC_VERSION >= 4007
85839 +static tree get_new_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, tree rhs)
85840 +{
85841 + if (is_gimple_constant(rhs))
85842 + return signed_cast_constant(rhs);
85843 + if (TREE_CODE(rhs) != SSA_NAME)
85844 + return NULL_TREE;
85845 + return expand(visited, potentionally_overflowed, rhs);
85846 +}
85847 +
85848 +static tree handle_ternary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
85849 +{
85850 + tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3;
85851 + gimple def_stmt = get_def_stmt(var);
85852 +
85853 + *potentionally_overflowed = true;
85854 +
85855 + rhs1 = gimple_assign_rhs1(def_stmt);
85856 + rhs2 = gimple_assign_rhs2(def_stmt);
85857 + rhs3 = gimple_assign_rhs3(def_stmt);
85858 + new_rhs1 = get_new_rhs(visited, potentionally_overflowed, rhs1);
85859 + new_rhs2 = get_new_rhs(visited, potentionally_overflowed, rhs2);
85860 + new_rhs3 = get_new_rhs(visited, potentionally_overflowed, rhs3);
85861 +
85862 + if (new_rhs1 == NULL_TREE && new_rhs2 != NULL_TREE && new_rhs3 != NULL_TREE)
85863 + return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, new_rhs3);
85864 + error("handle_ternary_ops: unknown rhs");
85865 + gcc_unreachable();
85866 +}
85867 +#endif
85868 +
85869 +static void set_size_overflow_type(tree node)
85870 +{
85871 + switch (TYPE_MODE(TREE_TYPE(node))) {
85872 + case SImode:
85873 + signed_size_overflow_type = intDI_type_node;
85874 + unsigned_size_overflow_type = unsigned_intDI_type_node;
85875 + break;
85876 + case DImode:
85877 + if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode)) {
85878 + signed_size_overflow_type = intDI_type_node;
85879 + unsigned_size_overflow_type = unsigned_intDI_type_node;
85880 + } else {
85881 + signed_size_overflow_type = intTI_type_node;
85882 + unsigned_size_overflow_type = unsigned_intTI_type_node;
85883 + }
85884 + break;
85885 + default:
85886 + error("set_size_overflow_type: unsupported gcc configuration.");
85887 + gcc_unreachable();
85888 + }
85889 +}
85890 +
85891 +static tree expand_visited(gimple def_stmt)
85892 +{
85893 + gimple tmp;
85894 + gimple_stmt_iterator gsi = gsi_for_stmt(def_stmt);
85895 +
85896 + gsi_next(&gsi);
85897 + tmp = gsi_stmt(gsi);
85898 + switch (gimple_code(tmp)) {
85899 + case GIMPLE_ASSIGN:
85900 + return gimple_get_lhs(tmp);
85901 + case GIMPLE_PHI:
85902 + return gimple_phi_result(tmp);
85903 + case GIMPLE_CALL:
85904 + return gimple_call_lhs(tmp);
85905 + default:
85906 + return NULL_TREE;
85907 + }
85908 +}
85909 +
85910 +static tree expand(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
85911 +{
85912 + gimple def_stmt;
85913 + enum tree_code code = TREE_CODE(TREE_TYPE(var));
85914 +
85915 + if (is_gimple_constant(var))
85916 + return NULL_TREE;
85917 +
85918 + if (TREE_CODE(var) == ADDR_EXPR)
85919 + return NULL_TREE;
85920 +
85921 + gcc_assert(code == INTEGER_TYPE || code == POINTER_TYPE || code == BOOLEAN_TYPE);
85922 + if (code != INTEGER_TYPE)
85923 + return NULL_TREE;
85924 +
85925 + if (SSA_NAME_IS_DEFAULT_DEF(var)) {
85926 + check_missing_attribute(var);
85927 + return NULL_TREE;
85928 + }
85929 +
85930 + def_stmt = get_def_stmt(var);
85931 +
85932 + if (!def_stmt)
85933 + return NULL_TREE;
85934 +
85935 + if (pointer_set_contains(visited, def_stmt))
85936 + return expand_visited(def_stmt);
85937 +
85938 + switch (gimple_code(def_stmt)) {
85939 + case GIMPLE_NOP:
85940 + check_missing_attribute(var);
85941 + return NULL_TREE;
85942 + case GIMPLE_PHI:
85943 + return build_new_phi(visited, potentionally_overflowed, def_stmt);
85944 + case GIMPLE_CALL:
85945 + case GIMPLE_ASM:
85946 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
85947 + case GIMPLE_ASSIGN:
85948 + switch (gimple_num_ops(def_stmt)) {
85949 + case 2:
85950 + return handle_unary_ops(visited, potentionally_overflowed, var);
85951 + case 3:
85952 + return handle_binary_ops(visited, potentionally_overflowed, var);
85953 +#if BUILDING_GCC_VERSION >= 4007
85954 + case 4:
85955 + return handle_ternary_ops(visited, potentionally_overflowed, var);
85956 +#endif
85957 + }
85958 + default:
85959 + debug_gimple_stmt(def_stmt);
85960 + error("expand: unknown gimple code");
85961 + gcc_unreachable();
85962 + }
85963 +}
85964 +
85965 +static void change_function_arg(gimple stmt, tree origarg, unsigned int argnum, tree newarg)
85966 +{
85967 + gimple assign;
85968 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
85969 + tree origtype = TREE_TYPE(origarg);
85970 +
85971 + gcc_assert(gimple_code(stmt) == GIMPLE_CALL);
85972 +
85973 + assign = build_cast_stmt(origtype, newarg, CREATE_NEW_VAR, gimple_location(stmt));
85974 + gsi_insert_before(&gsi, assign, GSI_SAME_STMT);
85975 + update_stmt(assign);
85976 +
85977 + gimple_call_set_arg(stmt, argnum, gimple_get_lhs(assign));
85978 + update_stmt(stmt);
85979 +}
85980 +
85981 +static tree get_function_arg(unsigned int argnum, gimple stmt, tree fndecl)
85982 +{
85983 + const char *origid;
85984 + tree arg, origarg;
85985 +
85986 + if (!DECL_ABSTRACT_ORIGIN(fndecl)) {
85987 + gcc_assert(gimple_call_num_args(stmt) > argnum);
85988 + return gimple_call_arg(stmt, argnum);
85989 + }
85990 +
85991 + origarg = DECL_ARGUMENTS(DECL_ABSTRACT_ORIGIN(fndecl));
85992 + while (origarg && argnum) {
85993 + argnum--;
85994 + origarg = TREE_CHAIN(origarg);
85995 + }
85996 +
85997 + gcc_assert(argnum == 0);
85998 +
85999 + gcc_assert(origarg != NULL_TREE);
86000 + origid = NAME(origarg);
86001 + for (arg = DECL_ARGUMENTS(fndecl); arg; arg = TREE_CHAIN(arg)) {
86002 + if (!strcmp(origid, NAME(arg)))
86003 + return arg;
86004 + }
86005 + return NULL_TREE;
86006 +}
86007 +
86008 +static void handle_function_arg(gimple stmt, tree fndecl, unsigned int argnum)
86009 +{
86010 + struct pointer_set_t *visited;
86011 + tree arg, newarg, type_max;
86012 + gimple ucast_stmt;
86013 + bool potentionally_overflowed;
86014 +
86015 + arg = get_function_arg(argnum, stmt, fndecl);
86016 + if (arg == NULL_TREE)
86017 + return;
86018 +
86019 + if (is_gimple_constant(arg))
86020 + return;
86021 + if (TREE_CODE(arg) != SSA_NAME)
86022 + return;
86023 +
86024 + check_arg_type(arg);
86025 +
86026 + set_size_overflow_type(arg);
86027 +
86028 + visited = pointer_set_create();
86029 + potentionally_overflowed = false;
86030 + newarg = expand(visited, &potentionally_overflowed, arg);
86031 + pointer_set_destroy(visited);
86032 +
86033 + if (newarg == NULL_TREE || !potentionally_overflowed)
86034 + return;
86035 +
86036 + change_function_arg(stmt, arg, argnum, newarg);
86037 +
86038 + ucast_stmt = cast_to_unsigned_size_overflow_type(stmt, newarg);
86039 +
86040 + type_max = build_int_cstu(unsigned_size_overflow_type, 0x7fffffff);
86041 + insert_check_size_overflow(stmt, GT_EXPR, gimple_get_lhs(ucast_stmt), type_max);
86042 +}
86043 +
86044 +static void handle_function_by_attribute(gimple stmt, tree attr, tree fndecl)
86045 +{
86046 + tree p = TREE_VALUE(attr);
86047 + do {
86048 + handle_function_arg(stmt, fndecl, TREE_INT_CST_LOW(TREE_VALUE(p))-1);
86049 + p = TREE_CHAIN(p);
86050 + } while (p);
86051 +}
86052 +
86053 +static void handle_function_by_hash(gimple stmt, tree fndecl)
86054 +{
86055 + struct size_overflow_hash *hash;
86056 + expanded_location xloc;
86057 +
86058 + hash = get_function_hash(fndecl);
86059 + xloc = expand_location(DECL_SOURCE_LOCATION(fndecl));
86060 +
86061 + fndecl = get_original_function_decl(fndecl);
86062 + if (!hash->name)
86063 + return;
86064 + if (file_match && !hash->file)
86065 + return;
86066 + if (strcmp(hash->name, NAME(fndecl)))
86067 + return;
86068 + if (file_match && strcmp(hash->file, xloc.file))
86069 + return;
86070 +
86071 +#define search_param(argnum) \
86072 + if (hash->param##argnum) \
86073 + handle_function_arg(stmt, fndecl, argnum - 1);
86074 +
86075 + search_param(1);
86076 + search_param(2);
86077 + search_param(3);
86078 + search_param(4);
86079 + search_param(5);
86080 + search_param(6);
86081 + search_param(7);
86082 + search_param(8);
86083 + search_param(9);
86084 +#undef search_param
86085 +}
86086 +
86087 +static unsigned int handle_function(void)
86088 +{
86089 + basic_block bb = ENTRY_BLOCK_PTR->next_bb;
86090 + int saved_last_basic_block = last_basic_block;
86091 +
86092 + do {
86093 + gimple_stmt_iterator gsi;
86094 + basic_block next = bb->next_bb;
86095 +
86096 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
86097 + tree fndecl, attr;
86098 + gimple stmt = gsi_stmt(gsi);
86099 +
86100 + if (!(is_gimple_call(stmt)))
86101 + continue;
86102 + fndecl = gimple_call_fndecl(stmt);
86103 + if (fndecl == NULL_TREE)
86104 + continue;
86105 + if (gimple_call_num_args(stmt) == 0)
86106 + continue;
86107 + attr = lookup_attribute("size_overflow", TYPE_ATTRIBUTES(TREE_TYPE(fndecl)));
86108 + if (!attr || !TREE_VALUE(attr))
86109 + handle_function_by_hash(stmt, fndecl);
86110 + else
86111 + handle_function_by_attribute(stmt, attr, fndecl);
86112 + gsi = gsi_for_stmt(stmt);
86113 + }
86114 + bb = next;
86115 + } while (bb && bb->index <= saved_last_basic_block);
86116 + return 0;
86117 +}
86118 +
86119 +static struct gimple_opt_pass size_overflow_pass = {
86120 + .pass = {
86121 + .type = GIMPLE_PASS,
86122 + .name = "size_overflow",
86123 + .gate = NULL,
86124 + .execute = handle_function,
86125 + .sub = NULL,
86126 + .next = NULL,
86127 + .static_pass_number = 0,
86128 + .tv_id = TV_NONE,
86129 + .properties_required = PROP_cfg | PROP_referenced_vars,
86130 + .properties_provided = 0,
86131 + .properties_destroyed = 0,
86132 + .todo_flags_start = 0,
86133 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi | TODO_cleanup_cfg | TODO_ggc_collect | TODO_verify_flow
86134 + }
86135 +};
86136 +
86137 +static void start_unit_callback(void __unused *gcc_data, void __unused *user_data)
86138 +{
86139 + tree fntype;
86140 +
86141 + const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1, 0));
86142 +
86143 + // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func)
86144 + fntype = build_function_type_list(void_type_node,
86145 + const_char_ptr_type_node,
86146 + unsigned_type_node,
86147 + const_char_ptr_type_node,
86148 + NULL_TREE);
86149 + report_size_overflow_decl = build_fn_decl("report_size_overflow", fntype);
86150 +
86151 + DECL_ASSEMBLER_NAME(report_size_overflow_decl);
86152 + TREE_PUBLIC(report_size_overflow_decl) = 1;
86153 + DECL_EXTERNAL(report_size_overflow_decl) = 1;
86154 + DECL_ARTIFICIAL(report_size_overflow_decl) = 1;
86155 +}
86156 +
86157 +extern struct gimple_opt_pass pass_dce;
86158 +
86159 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
86160 +{
86161 + int i;
86162 + const char * const plugin_name = plugin_info->base_name;
86163 + const int argc = plugin_info->argc;
86164 + const struct plugin_argument * const argv = plugin_info->argv;
86165 + bool enable = true;
86166 +
86167 + struct register_pass_info size_overflow_pass_info = {
86168 + .pass = &size_overflow_pass.pass,
86169 + .reference_pass_name = "ssa",
86170 + .ref_pass_instance_number = 1,
86171 + .pos_op = PASS_POS_INSERT_AFTER
86172 + };
86173 +
86174 + if (!plugin_default_version_check(version, &gcc_version)) {
86175 + error(G_("incompatible gcc/plugin versions"));
86176 + return 1;
86177 + }
86178 +
86179 + for (i = 0; i < argc; ++i) {
86180 + if (!strcmp(argv[i].key, "no-size-overflow")) {
86181 + enable = false;
86182 + continue;
86183 + } else if (!(strcmp(argv[i].key, "no-file-match"))) {
86184 + file_match = false;
86185 + continue;
86186 + }
86187 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
86188 + }
86189 +
86190 + register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info);
86191 + if (enable) {
86192 + register_callback ("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
86193 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &size_overflow_pass_info);
86194 + }
86195 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
86196 +
86197 + return 0;
86198 +}
86199 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
86200 new file mode 100644
86201 index 0000000..b87ec9d
86202 --- /dev/null
86203 +++ b/tools/gcc/stackleak_plugin.c
86204 @@ -0,0 +1,313 @@
86205 +/*
86206 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
86207 + * Licensed under the GPL v2
86208 + *
86209 + * Note: the choice of the license means that the compilation process is
86210 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
86211 + * but for the kernel it doesn't matter since it doesn't link against
86212 + * any of the gcc libraries
86213 + *
86214 + * gcc plugin to help implement various PaX features
86215 + *
86216 + * - track lowest stack pointer
86217 + *
86218 + * TODO:
86219 + * - initialize all local variables
86220 + *
86221 + * BUGS:
86222 + * - none known
86223 + */
86224 +#include "gcc-plugin.h"
86225 +#include "config.h"
86226 +#include "system.h"
86227 +#include "coretypes.h"
86228 +#include "tree.h"
86229 +#include "tree-pass.h"
86230 +#include "flags.h"
86231 +#include "intl.h"
86232 +#include "toplev.h"
86233 +#include "plugin.h"
86234 +//#include "expr.h" where are you...
86235 +#include "diagnostic.h"
86236 +#include "plugin-version.h"
86237 +#include "tm.h"
86238 +#include "function.h"
86239 +#include "basic-block.h"
86240 +#include "gimple.h"
86241 +#include "rtl.h"
86242 +#include "emit-rtl.h"
86243 +
86244 +extern void print_gimple_stmt(FILE *, gimple, int, int);
86245 +
86246 +int plugin_is_GPL_compatible;
86247 +
86248 +static int track_frame_size = -1;
86249 +static const char track_function[] = "pax_track_stack";
86250 +static const char check_function[] = "pax_check_alloca";
86251 +static bool init_locals;
86252 +
86253 +static struct plugin_info stackleak_plugin_info = {
86254 + .version = "201203140940",
86255 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
86256 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
86257 +};
86258 +
86259 +static bool gate_stackleak_track_stack(void);
86260 +static unsigned int execute_stackleak_tree_instrument(void);
86261 +static unsigned int execute_stackleak_final(void);
86262 +
86263 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
86264 + .pass = {
86265 + .type = GIMPLE_PASS,
86266 + .name = "stackleak_tree_instrument",
86267 + .gate = gate_stackleak_track_stack,
86268 + .execute = execute_stackleak_tree_instrument,
86269 + .sub = NULL,
86270 + .next = NULL,
86271 + .static_pass_number = 0,
86272 + .tv_id = TV_NONE,
86273 + .properties_required = PROP_gimple_leh | PROP_cfg,
86274 + .properties_provided = 0,
86275 + .properties_destroyed = 0,
86276 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
86277 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
86278 + }
86279 +};
86280 +
86281 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
86282 + .pass = {
86283 + .type = RTL_PASS,
86284 + .name = "stackleak_final",
86285 + .gate = gate_stackleak_track_stack,
86286 + .execute = execute_stackleak_final,
86287 + .sub = NULL,
86288 + .next = NULL,
86289 + .static_pass_number = 0,
86290 + .tv_id = TV_NONE,
86291 + .properties_required = 0,
86292 + .properties_provided = 0,
86293 + .properties_destroyed = 0,
86294 + .todo_flags_start = 0,
86295 + .todo_flags_finish = TODO_dump_func
86296 + }
86297 +};
86298 +
86299 +static bool gate_stackleak_track_stack(void)
86300 +{
86301 + return track_frame_size >= 0;
86302 +}
86303 +
86304 +static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
86305 +{
86306 + gimple check_alloca;
86307 + tree fntype, fndecl, alloca_size;
86308 +
86309 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
86310 + fndecl = build_fn_decl(check_function, fntype);
86311 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
86312 +
86313 + // insert call to void pax_check_alloca(unsigned long size)
86314 + alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
86315 + check_alloca = gimple_build_call(fndecl, 1, alloca_size);
86316 + gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
86317 +}
86318 +
86319 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
86320 +{
86321 + gimple track_stack;
86322 + tree fntype, fndecl;
86323 +
86324 + fntype = build_function_type_list(void_type_node, NULL_TREE);
86325 + fndecl = build_fn_decl(track_function, fntype);
86326 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
86327 +
86328 + // insert call to void pax_track_stack(void)
86329 + track_stack = gimple_build_call(fndecl, 0);
86330 + gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
86331 +}
86332 +
86333 +#if BUILDING_GCC_VERSION == 4005
86334 +static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
86335 +{
86336 + tree fndecl;
86337 +
86338 + if (!is_gimple_call(stmt))
86339 + return false;
86340 + fndecl = gimple_call_fndecl(stmt);
86341 + if (!fndecl)
86342 + return false;
86343 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
86344 + return false;
86345 +// print_node(stderr, "pax", fndecl, 4);
86346 + return DECL_FUNCTION_CODE(fndecl) == code;
86347 +}
86348 +#endif
86349 +
86350 +static bool is_alloca(gimple stmt)
86351 +{
86352 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
86353 + return true;
86354 +
86355 +#if BUILDING_GCC_VERSION >= 4007
86356 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
86357 + return true;
86358 +#endif
86359 +
86360 + return false;
86361 +}
86362 +
86363 +static unsigned int execute_stackleak_tree_instrument(void)
86364 +{
86365 + basic_block bb, entry_bb;
86366 + bool prologue_instrumented = false, is_leaf = true;
86367 +
86368 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
86369 +
86370 + // 1. loop through BBs and GIMPLE statements
86371 + FOR_EACH_BB(bb) {
86372 + gimple_stmt_iterator gsi;
86373 +
86374 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
86375 + gimple stmt;
86376 +
86377 + stmt = gsi_stmt(gsi);
86378 +
86379 + if (is_gimple_call(stmt))
86380 + is_leaf = false;
86381 +
86382 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
86383 + if (!is_alloca(stmt))
86384 + continue;
86385 +
86386 + // 2. insert stack overflow check before each __builtin_alloca call
86387 + stackleak_check_alloca(&gsi);
86388 +
86389 + // 3. insert track call after each __builtin_alloca call
86390 + stackleak_add_instrumentation(&gsi);
86391 + if (bb == entry_bb)
86392 + prologue_instrumented = true;
86393 + }
86394 + }
86395 +
86396 + // special cases for some bad linux code: taking the address of static inline functions will materialize them
86397 + // but we mustn't instrument some of them as the resulting stack alignment required by the function call ABI
86398 + // will break other assumptions regarding the expected (but not otherwise enforced) register clobbering ABI.
86399 + // case in point: native_save_fl on amd64 when optimized for size clobbers rdx if it were instrumented here.
86400 + if (is_leaf && !TREE_PUBLIC(current_function_decl) && DECL_DECLARED_INLINE_P(current_function_decl))
86401 + return 0;
86402 + if (is_leaf && !strncmp(IDENTIFIER_POINTER(DECL_NAME(current_function_decl)), "_paravirt_", 10))
86403 + return 0;
86404 +
86405 + // 4. insert track call at the beginning
86406 + if (!prologue_instrumented) {
86407 + gimple_stmt_iterator gsi;
86408 +
86409 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
86410 + if (dom_info_available_p(CDI_DOMINATORS))
86411 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
86412 + gsi = gsi_start_bb(bb);
86413 + stackleak_add_instrumentation(&gsi);
86414 + }
86415 +
86416 + return 0;
86417 +}
86418 +
86419 +static unsigned int execute_stackleak_final(void)
86420 +{
86421 + rtx insn;
86422 +
86423 + if (cfun->calls_alloca)
86424 + return 0;
86425 +
86426 + // keep calls only if function frame is big enough
86427 + if (get_frame_size() >= track_frame_size)
86428 + return 0;
86429 +
86430 + // 1. find pax_track_stack calls
86431 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
86432 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
86433 + rtx body;
86434 +
86435 + if (!CALL_P(insn))
86436 + continue;
86437 + body = PATTERN(insn);
86438 + if (GET_CODE(body) != CALL)
86439 + continue;
86440 + body = XEXP(body, 0);
86441 + if (GET_CODE(body) != MEM)
86442 + continue;
86443 + body = XEXP(body, 0);
86444 + if (GET_CODE(body) != SYMBOL_REF)
86445 + continue;
86446 + if (strcmp(XSTR(body, 0), track_function))
86447 + continue;
86448 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
86449 + // 2. delete call
86450 + insn = delete_insn_and_edges(insn);
86451 +#if BUILDING_GCC_VERSION >= 4007
86452 + if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
86453 + insn = delete_insn_and_edges(insn);
86454 +#endif
86455 + }
86456 +
86457 +// print_simple_rtl(stderr, get_insns());
86458 +// print_rtl(stderr, get_insns());
86459 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
86460 +
86461 + return 0;
86462 +}
86463 +
86464 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
86465 +{
86466 + const char * const plugin_name = plugin_info->base_name;
86467 + const int argc = plugin_info->argc;
86468 + const struct plugin_argument * const argv = plugin_info->argv;
86469 + int i;
86470 + struct register_pass_info stackleak_tree_instrument_pass_info = {
86471 + .pass = &stackleak_tree_instrument_pass.pass,
86472 +// .reference_pass_name = "tree_profile",
86473 + .reference_pass_name = "optimized",
86474 + .ref_pass_instance_number = 0,
86475 + .pos_op = PASS_POS_INSERT_BEFORE
86476 + };
86477 + struct register_pass_info stackleak_final_pass_info = {
86478 + .pass = &stackleak_final_rtl_opt_pass.pass,
86479 + .reference_pass_name = "final",
86480 + .ref_pass_instance_number = 0,
86481 + .pos_op = PASS_POS_INSERT_BEFORE
86482 + };
86483 +
86484 + if (!plugin_default_version_check(version, &gcc_version)) {
86485 + error(G_("incompatible gcc/plugin versions"));
86486 + return 1;
86487 + }
86488 +
86489 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
86490 +
86491 + for (i = 0; i < argc; ++i) {
86492 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
86493 + if (!argv[i].value) {
86494 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
86495 + continue;
86496 + }
86497 + track_frame_size = atoi(argv[i].value);
86498 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
86499 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
86500 + continue;
86501 + }
86502 + if (!strcmp(argv[i].key, "initialize-locals")) {
86503 + if (argv[i].value) {
86504 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
86505 + continue;
86506 + }
86507 + init_locals = true;
86508 + continue;
86509 + }
86510 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
86511 + }
86512 +
86513 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
86514 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
86515 +
86516 + return 0;
86517 +}
86518 diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
86519 index 6789d78..4afd019 100644
86520 --- a/tools/perf/util/include/asm/alternative-asm.h
86521 +++ b/tools/perf/util/include/asm/alternative-asm.h
86522 @@ -5,4 +5,7 @@
86523
86524 #define altinstruction_entry #
86525
86526 + .macro pax_force_retaddr rip=0, reload=0
86527 + .endm
86528 +
86529 #endif
86530 diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
86531 index af0f22f..9a7d479 100644
86532 --- a/usr/gen_init_cpio.c
86533 +++ b/usr/gen_init_cpio.c
86534 @@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name, const char *location,
86535 int retval;
86536 int rc = -1;
86537 int namesize;
86538 - int i;
86539 + unsigned int i;
86540
86541 mode |= S_IFREG;
86542
86543 @@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_location)
86544 *env_var = *expanded = '\0';
86545 strncat(env_var, start + 2, end - start - 2);
86546 strncat(expanded, new_location, start - new_location);
86547 - strncat(expanded, getenv(env_var), PATH_MAX);
86548 - strncat(expanded, end + 1, PATH_MAX);
86549 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
86550 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
86551 strncpy(new_location, expanded, PATH_MAX);
86552 + new_location[PATH_MAX] = 0;
86553 } else
86554 break;
86555 }
86556 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
86557 index 7858228..2919715 100644
86558 --- a/virt/kvm/kvm_main.c
86559 +++ b/virt/kvm/kvm_main.c
86560 @@ -75,7 +75,7 @@ LIST_HEAD(vm_list);
86561
86562 static cpumask_var_t cpus_hardware_enabled;
86563 static int kvm_usage_count = 0;
86564 -static atomic_t hardware_enable_failed;
86565 +static atomic_unchecked_t hardware_enable_failed;
86566
86567 struct kmem_cache *kvm_vcpu_cache;
86568 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
86569 @@ -2318,7 +2318,7 @@ static void hardware_enable_nolock(void *junk)
86570
86571 if (r) {
86572 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
86573 - atomic_inc(&hardware_enable_failed);
86574 + atomic_inc_unchecked(&hardware_enable_failed);
86575 printk(KERN_INFO "kvm: enabling virtualization on "
86576 "CPU%d failed\n", cpu);
86577 }
86578 @@ -2372,10 +2372,10 @@ static int hardware_enable_all(void)
86579
86580 kvm_usage_count++;
86581 if (kvm_usage_count == 1) {
86582 - atomic_set(&hardware_enable_failed, 0);
86583 + atomic_set_unchecked(&hardware_enable_failed, 0);
86584 on_each_cpu(hardware_enable_nolock, NULL, 1);
86585
86586 - if (atomic_read(&hardware_enable_failed)) {
86587 + if (atomic_read_unchecked(&hardware_enable_failed)) {
86588 hardware_disable_all_nolock();
86589 r = -EBUSY;
86590 }
86591 @@ -2738,7 +2738,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
86592 kvm_arch_vcpu_put(vcpu);
86593 }
86594
86595 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
86596 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
86597 struct module *module)
86598 {
86599 int r;
86600 @@ -2801,7 +2801,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
86601 if (!vcpu_align)
86602 vcpu_align = __alignof__(struct kvm_vcpu);
86603 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
86604 - 0, NULL);
86605 + SLAB_USERCOPY, NULL);
86606 if (!kvm_vcpu_cache) {
86607 r = -ENOMEM;
86608 goto out_free_3;
86609 @@ -2811,9 +2811,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
86610 if (r)
86611 goto out_free;
86612
86613 - kvm_chardev_ops.owner = module;
86614 - kvm_vm_fops.owner = module;
86615 - kvm_vcpu_fops.owner = module;
86616 + pax_open_kernel();
86617 + *(void **)&kvm_chardev_ops.owner = module;
86618 + *(void **)&kvm_vm_fops.owner = module;
86619 + *(void **)&kvm_vcpu_fops.owner = module;
86620 + pax_close_kernel();
86621
86622 r = misc_register(&kvm_dev);
86623 if (r) {