]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.9.1-3.4.6-201207211444.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9.1-3.4.6-201207211444.patch
1 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2 index b4a898f..781c7ad 100644
3 --- a/Documentation/dontdiff
4 +++ b/Documentation/dontdiff
5 @@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9 +*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13 +*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17 @@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21 +*.gmo
22 *.grep
23 *.grp
24 *.gz
25 @@ -48,14 +51,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29 +*.vim
30 *.xml
31 *.xz
32 *_MODULES
33 +*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38 -.*
39 +.[^g]*
40 +.gen*
41 .*.d
42 .mm
43 53c700_d.h
44 @@ -69,6 +75,7 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48 +PERF*
49 SCCS
50 System.map*
51 TAGS
52 @@ -80,6 +87,7 @@ aic7*seq.h*
53 aicasm
54 aicdb.h*
55 altivec*.c
56 +ashldi3.S
57 asm-offsets.h
58 asm_offsets.h
59 autoconf.h*
60 @@ -92,19 +100,24 @@ bounds.h
61 bsetup
62 btfixupprep
63 build
64 +builtin-policy.h
65 bvmlinux
66 bzImage*
67 capability_names.h
68 capflags.c
69 classlist.h*
70 +clut_vga16.c
71 +common-cmds.h
72 comp*.log
73 compile.h*
74 conf
75 config
76 config-*
77 config_data.h*
78 +config.c
79 config.mak
80 config.mak.autogen
81 +config.tmp
82 conmakehash
83 consolemap_deftbl.c*
84 cpustr.h
85 @@ -115,9 +128,11 @@ devlist.h*
86 dnotify_test
87 docproc
88 dslm
89 +dtc-lexer.lex.c
90 elf2ecoff
91 elfconfig.h*
92 evergreen_reg_safe.h
93 +exception_policy.conf
94 fixdep
95 flask.h
96 fore200e_mkfirm
97 @@ -125,12 +140,15 @@ fore200e_pca_fw.c*
98 gconf
99 gconf.glade.h
100 gen-devlist
101 +gen-kdb_cmds.c
102 gen_crc32table
103 gen_init_cpio
104 generated
105 genheaders
106 genksyms
107 *_gray256.c
108 +hash
109 +hid-example
110 hpet_example
111 hugepage-mmap
112 hugepage-shm
113 @@ -145,7 +163,7 @@ int32.c
114 int4.c
115 int8.c
116 kallsyms
117 -kconfig
118 +kern_constants.h
119 keywords.c
120 ksym.c*
121 ksym.h*
122 @@ -153,7 +171,7 @@ kxgettext
123 lkc_defs.h
124 lex.c
125 lex.*.c
126 -linux
127 +lib1funcs.S
128 logo_*.c
129 logo_*_clut224.c
130 logo_*_mono.c
131 @@ -164,14 +182,15 @@ machtypes.h
132 map
133 map_hugetlb
134 maui_boot.h
135 -media
136 mconf
137 +mdp
138 miboot*
139 mk_elfconfig
140 mkboot
141 mkbugboot
142 mkcpustr
143 mkdep
144 +mkpiggy
145 mkprep
146 mkregtable
147 mktables
148 @@ -188,6 +207,7 @@ oui.c*
149 page-types
150 parse.c
151 parse.h
152 +parse-events*
153 patches*
154 pca200e.bin
155 pca200e_ecd.bin2
156 @@ -197,6 +217,7 @@ perf-archive
157 piggyback
158 piggy.gzip
159 piggy.S
160 +pmu-*
161 pnmtologo
162 ppc_defs.h*
163 pss_boot.h
164 @@ -207,6 +228,7 @@ r300_reg_safe.h
165 r420_reg_safe.h
166 r600_reg_safe.h
167 recordmcount
168 +regdb.c
169 relocs
170 rlim_names.h
171 rn50_reg_safe.h
172 @@ -216,7 +238,9 @@ series
173 setup
174 setup.bin
175 setup.elf
176 +size_overflow_hash.h
177 sImage
178 +slabinfo
179 sm_tbl*
180 split-include
181 syscalltab.h
182 @@ -227,6 +251,7 @@ tftpboot.img
183 timeconst.h
184 times.h*
185 trix_boot.h
186 +user_constants.h
187 utsrelease.h*
188 vdso-syms.lds
189 vdso.lds
190 @@ -238,13 +263,17 @@ vdso32.lds
191 vdso32.so.dbg
192 vdso64.lds
193 vdso64.so.dbg
194 +vdsox32.lds
195 +vdsox32-syms.lds
196 version.h*
197 vmImage
198 vmlinux
199 vmlinux-*
200 vmlinux.aout
201 vmlinux.bin.all
202 +vmlinux.bin.bz2
203 vmlinux.lds
204 +vmlinux.relocs
205 vmlinuz
206 voffset.h
207 vsyscall.lds
208 @@ -252,9 +281,11 @@ vsyscall_32.lds
209 wanxlfw.inc
210 uImage
211 unifdef
212 +utsrelease.h
213 wakeup.bin
214 wakeup.elf
215 wakeup.lds
216 zImage*
217 zconf.hash.c
218 +zconf.lex.c
219 zoffset.h
220 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
221 index c1601e5..08557ce 100644
222 --- a/Documentation/kernel-parameters.txt
223 +++ b/Documentation/kernel-parameters.txt
224 @@ -2021,6 +2021,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
225 the specified number of seconds. This is to be used if
226 your oopses keep scrolling off the screen.
227
228 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
229 + virtualization environments that don't cope well with the
230 + expand down segment used by UDEREF on X86-32 or the frequent
231 + page table updates on X86-64.
232 +
233 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
234 +
235 pcbit= [HW,ISDN]
236
237 pcd. [PARIDE]
238 diff --git a/Makefile b/Makefile
239 index 5d0edcb..121c424 100644
240 --- a/Makefile
241 +++ b/Makefile
242 @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
243
244 HOSTCC = gcc
245 HOSTCXX = g++
246 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
247 -HOSTCXXFLAGS = -O2
248 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
249 +HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
250 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
251
252 # Decide whether to build built-in, modular, or both.
253 # Normally, just do built-in.
254 @@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
255 # Rules shared between *config targets and build targets
256
257 # Basic helpers built in scripts/
258 -PHONY += scripts_basic
259 -scripts_basic:
260 +PHONY += scripts_basic gcc-plugins
261 +scripts_basic: gcc-plugins
262 $(Q)$(MAKE) $(build)=scripts/basic
263 $(Q)rm -f .tmp_quiet_recordmcount
264
265 @@ -564,6 +565,60 @@ else
266 KBUILD_CFLAGS += -O2
267 endif
268
269 +ifndef DISABLE_PAX_PLUGINS
270 +PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
271 +ifneq ($(PLUGINCC),)
272 +ifndef DISABLE_PAX_CONSTIFY_PLUGIN
273 +ifndef CONFIG_UML
274 +CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
275 +endif
276 +endif
277 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
278 +STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
279 +STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
280 +endif
281 +ifdef CONFIG_KALLOCSTAT_PLUGIN
282 +KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
283 +endif
284 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
285 +KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
286 +KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
287 +KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
288 +endif
289 +ifdef CONFIG_CHECKER_PLUGIN
290 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
291 +CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
292 +endif
293 +endif
294 +COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
295 +ifdef CONFIG_PAX_SIZE_OVERFLOW
296 +SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
297 +endif
298 +ifdef CONFIG_PAX_LATENT_ENTROPY
299 +LATENT_ENTROPY := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so
300 +endif
301 +GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
302 +GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
303 +GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY)
304 +GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
305 +export PLUGINCC CONSTIFY_PLUGIN
306 +ifeq ($(KBUILD_EXTMOD),)
307 +gcc-plugins:
308 + $(Q)$(MAKE) $(build)=tools/gcc
309 +else
310 +gcc-plugins: ;
311 +endif
312 +else
313 +gcc-plugins:
314 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
315 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
316 +else
317 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
318 +endif
319 + $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
320 +endif
321 +endif
322 +
323 include $(srctree)/arch/$(SRCARCH)/Makefile
324
325 ifneq ($(CONFIG_FRAME_WARN),0)
326 @@ -708,7 +763,7 @@ export mod_strip_cmd
327
328
329 ifeq ($(KBUILD_EXTMOD),)
330 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
331 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
332
333 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
334 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
335 @@ -932,6 +987,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
336
337 # The actual objects are generated when descending,
338 # make sure no implicit rule kicks in
339 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
340 +$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
341 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
342
343 # Handle descending into subdirectories listed in $(vmlinux-dirs)
344 @@ -941,7 +998,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
345 # Error messages still appears in the original language
346
347 PHONY += $(vmlinux-dirs)
348 -$(vmlinux-dirs): prepare scripts
349 +$(vmlinux-dirs): gcc-plugins prepare scripts
350 $(Q)$(MAKE) $(build)=$@
351
352 # Store (new) KERNELRELASE string in include/config/kernel.release
353 @@ -985,6 +1042,7 @@ prepare0: archprepare FORCE
354 $(Q)$(MAKE) $(build)=.
355
356 # All the preparing..
357 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
358 prepare: prepare0
359
360 # Generate some files
361 @@ -1092,6 +1150,8 @@ all: modules
362 # using awk while concatenating to the final file.
363
364 PHONY += modules
365 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
366 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
367 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
368 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
369 @$(kecho) ' Building modules, stage 2.';
370 @@ -1107,7 +1167,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
371
372 # Target to prepare building external modules
373 PHONY += modules_prepare
374 -modules_prepare: prepare scripts
375 +modules_prepare: gcc-plugins prepare scripts
376
377 # Target to install modules
378 PHONY += modules_install
379 @@ -1166,7 +1226,7 @@ CLEAN_FILES += vmlinux System.map \
380 MRPROPER_DIRS += include/config usr/include include/generated \
381 arch/*/include/generated
382 MRPROPER_FILES += .config .config.old .version .old_version \
383 - include/linux/version.h \
384 + include/linux/version.h tools/gcc/size_overflow_hash.h\
385 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
386
387 # clean - Delete most, but leave enough to build external modules
388 @@ -1204,6 +1264,7 @@ distclean: mrproper
389 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
390 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
391 -o -name '.*.rej' \
392 + -o -name '.*.rej' -o -name '*.so' \
393 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
394 -type f -print | xargs rm -f
395
396 @@ -1364,6 +1425,8 @@ PHONY += $(module-dirs) modules
397 $(module-dirs): crmodverdir $(objtree)/Module.symvers
398 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
399
400 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
401 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
402 modules: $(module-dirs)
403 @$(kecho) ' Building modules, stage 2.';
404 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
405 @@ -1490,17 +1553,21 @@ else
406 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
407 endif
408
409 -%.s: %.c prepare scripts FORCE
410 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
411 +%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
412 +%.s: %.c gcc-plugins prepare scripts FORCE
413 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
414 %.i: %.c prepare scripts FORCE
415 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
416 -%.o: %.c prepare scripts FORCE
417 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
418 +%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
419 +%.o: %.c gcc-plugins prepare scripts FORCE
420 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
421 %.lst: %.c prepare scripts FORCE
422 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
423 -%.s: %.S prepare scripts FORCE
424 +%.s: %.S gcc-plugins prepare scripts FORCE
425 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
426 -%.o: %.S prepare scripts FORCE
427 +%.o: %.S gcc-plugins prepare scripts FORCE
428 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
429 %.symtypes: %.c prepare scripts FORCE
430 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
431 @@ -1510,11 +1577,15 @@ endif
432 $(cmd_crmodverdir)
433 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
434 $(build)=$(build-dir)
435 -%/: prepare scripts FORCE
436 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
437 +%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
438 +%/: gcc-plugins prepare scripts FORCE
439 $(cmd_crmodverdir)
440 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
441 $(build)=$(build-dir)
442 -%.ko: prepare scripts FORCE
443 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
444 +%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
445 +%.ko: gcc-plugins prepare scripts FORCE
446 $(cmd_crmodverdir)
447 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
448 $(build)=$(build-dir) $(@:.ko=.o)
449 diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
450 index 3bb7ffe..347a54c 100644
451 --- a/arch/alpha/include/asm/atomic.h
452 +++ b/arch/alpha/include/asm/atomic.h
453 @@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
454 #define atomic_dec(v) atomic_sub(1,(v))
455 #define atomic64_dec(v) atomic64_sub(1,(v))
456
457 +#define atomic64_read_unchecked(v) atomic64_read(v)
458 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
459 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
460 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
461 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
462 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
463 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
464 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
465 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
466 +
467 #define smp_mb__before_atomic_dec() smp_mb()
468 #define smp_mb__after_atomic_dec() smp_mb()
469 #define smp_mb__before_atomic_inc() smp_mb()
470 diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
471 index ad368a9..fbe0f25 100644
472 --- a/arch/alpha/include/asm/cache.h
473 +++ b/arch/alpha/include/asm/cache.h
474 @@ -4,19 +4,19 @@
475 #ifndef __ARCH_ALPHA_CACHE_H
476 #define __ARCH_ALPHA_CACHE_H
477
478 +#include <linux/const.h>
479
480 /* Bytes per L1 (data) cache line. */
481 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
482 -# define L1_CACHE_BYTES 64
483 # define L1_CACHE_SHIFT 6
484 #else
485 /* Both EV4 and EV5 are write-through, read-allocate,
486 direct-mapped, physical.
487 */
488 -# define L1_CACHE_BYTES 32
489 # define L1_CACHE_SHIFT 5
490 #endif
491
492 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
493 #define SMP_CACHE_BYTES L1_CACHE_BYTES
494
495 #endif
496 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
497 index 968d999..d36b2df 100644
498 --- a/arch/alpha/include/asm/elf.h
499 +++ b/arch/alpha/include/asm/elf.h
500 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
501
502 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
503
504 +#ifdef CONFIG_PAX_ASLR
505 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
506 +
507 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
508 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
509 +#endif
510 +
511 /* $0 is set by ld.so to a pointer to a function which might be
512 registered using atexit. This provides a mean for the dynamic
513 linker to call DT_FINI functions for shared libraries that have
514 diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
515 index bc2a0da..8ad11ee 100644
516 --- a/arch/alpha/include/asm/pgalloc.h
517 +++ b/arch/alpha/include/asm/pgalloc.h
518 @@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
519 pgd_set(pgd, pmd);
520 }
521
522 +static inline void
523 +pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
524 +{
525 + pgd_populate(mm, pgd, pmd);
526 +}
527 +
528 extern pgd_t *pgd_alloc(struct mm_struct *mm);
529
530 static inline void
531 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
532 index 81a4342..348b927 100644
533 --- a/arch/alpha/include/asm/pgtable.h
534 +++ b/arch/alpha/include/asm/pgtable.h
535 @@ -102,6 +102,17 @@ struct vm_area_struct;
536 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
537 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
538 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
539 +
540 +#ifdef CONFIG_PAX_PAGEEXEC
541 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
542 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
543 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
544 +#else
545 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
546 +# define PAGE_COPY_NOEXEC PAGE_COPY
547 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
548 +#endif
549 +
550 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
551
552 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
553 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
554 index 2fd00b7..cfd5069 100644
555 --- a/arch/alpha/kernel/module.c
556 +++ b/arch/alpha/kernel/module.c
557 @@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
558
559 /* The small sections were sorted to the end of the segment.
560 The following should definitely cover them. */
561 - gp = (u64)me->module_core + me->core_size - 0x8000;
562 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
563 got = sechdrs[me->arch.gotsecindex].sh_addr;
564
565 for (i = 0; i < n; i++) {
566 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
567 index 49ee319..9ee7d14 100644
568 --- a/arch/alpha/kernel/osf_sys.c
569 +++ b/arch/alpha/kernel/osf_sys.c
570 @@ -1146,7 +1146,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
571 /* At this point: (!vma || addr < vma->vm_end). */
572 if (limit - len < addr)
573 return -ENOMEM;
574 - if (!vma || addr + len <= vma->vm_start)
575 + if (check_heap_stack_gap(vma, addr, len))
576 return addr;
577 addr = vma->vm_end;
578 vma = vma->vm_next;
579 @@ -1182,6 +1182,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
580 merely specific addresses, but regions of memory -- perhaps
581 this feature should be incorporated into all ports? */
582
583 +#ifdef CONFIG_PAX_RANDMMAP
584 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
585 +#endif
586 +
587 if (addr) {
588 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
589 if (addr != (unsigned long) -ENOMEM)
590 @@ -1189,8 +1193,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
591 }
592
593 /* Next, try allocating at TASK_UNMAPPED_BASE. */
594 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
595 - len, limit);
596 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
597 +
598 if (addr != (unsigned long) -ENOMEM)
599 return addr;
600
601 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
602 index 5eecab1..609abc0 100644
603 --- a/arch/alpha/mm/fault.c
604 +++ b/arch/alpha/mm/fault.c
605 @@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
606 __reload_thread(pcb);
607 }
608
609 +#ifdef CONFIG_PAX_PAGEEXEC
610 +/*
611 + * PaX: decide what to do with offenders (regs->pc = fault address)
612 + *
613 + * returns 1 when task should be killed
614 + * 2 when patched PLT trampoline was detected
615 + * 3 when unpatched PLT trampoline was detected
616 + */
617 +static int pax_handle_fetch_fault(struct pt_regs *regs)
618 +{
619 +
620 +#ifdef CONFIG_PAX_EMUPLT
621 + int err;
622 +
623 + do { /* PaX: patched PLT emulation #1 */
624 + unsigned int ldah, ldq, jmp;
625 +
626 + err = get_user(ldah, (unsigned int *)regs->pc);
627 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
628 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
629 +
630 + if (err)
631 + break;
632 +
633 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
634 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
635 + jmp == 0x6BFB0000U)
636 + {
637 + unsigned long r27, addr;
638 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
639 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
640 +
641 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
642 + err = get_user(r27, (unsigned long *)addr);
643 + if (err)
644 + break;
645 +
646 + regs->r27 = r27;
647 + regs->pc = r27;
648 + return 2;
649 + }
650 + } while (0);
651 +
652 + do { /* PaX: patched PLT emulation #2 */
653 + unsigned int ldah, lda, br;
654 +
655 + err = get_user(ldah, (unsigned int *)regs->pc);
656 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
657 + err |= get_user(br, (unsigned int *)(regs->pc+8));
658 +
659 + if (err)
660 + break;
661 +
662 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
663 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
664 + (br & 0xFFE00000U) == 0xC3E00000U)
665 + {
666 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
667 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
668 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
669 +
670 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
671 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
672 + return 2;
673 + }
674 + } while (0);
675 +
676 + do { /* PaX: unpatched PLT emulation */
677 + unsigned int br;
678 +
679 + err = get_user(br, (unsigned int *)regs->pc);
680 +
681 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
682 + unsigned int br2, ldq, nop, jmp;
683 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
684 +
685 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
686 + err = get_user(br2, (unsigned int *)addr);
687 + err |= get_user(ldq, (unsigned int *)(addr+4));
688 + err |= get_user(nop, (unsigned int *)(addr+8));
689 + err |= get_user(jmp, (unsigned int *)(addr+12));
690 + err |= get_user(resolver, (unsigned long *)(addr+16));
691 +
692 + if (err)
693 + break;
694 +
695 + if (br2 == 0xC3600000U &&
696 + ldq == 0xA77B000CU &&
697 + nop == 0x47FF041FU &&
698 + jmp == 0x6B7B0000U)
699 + {
700 + regs->r28 = regs->pc+4;
701 + regs->r27 = addr+16;
702 + regs->pc = resolver;
703 + return 3;
704 + }
705 + }
706 + } while (0);
707 +#endif
708 +
709 + return 1;
710 +}
711 +
712 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
713 +{
714 + unsigned long i;
715 +
716 + printk(KERN_ERR "PAX: bytes at PC: ");
717 + for (i = 0; i < 5; i++) {
718 + unsigned int c;
719 + if (get_user(c, (unsigned int *)pc+i))
720 + printk(KERN_CONT "???????? ");
721 + else
722 + printk(KERN_CONT "%08x ", c);
723 + }
724 + printk("\n");
725 +}
726 +#endif
727
728 /*
729 * This routine handles page faults. It determines the address,
730 @@ -130,8 +248,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
731 good_area:
732 si_code = SEGV_ACCERR;
733 if (cause < 0) {
734 - if (!(vma->vm_flags & VM_EXEC))
735 + if (!(vma->vm_flags & VM_EXEC)) {
736 +
737 +#ifdef CONFIG_PAX_PAGEEXEC
738 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
739 + goto bad_area;
740 +
741 + up_read(&mm->mmap_sem);
742 + switch (pax_handle_fetch_fault(regs)) {
743 +
744 +#ifdef CONFIG_PAX_EMUPLT
745 + case 2:
746 + case 3:
747 + return;
748 +#endif
749 +
750 + }
751 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
752 + do_group_exit(SIGKILL);
753 +#else
754 goto bad_area;
755 +#endif
756 +
757 + }
758 } else if (!cause) {
759 /* Allow reads even for write-only mappings */
760 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
761 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
762 index 68374ba..cff7196 100644
763 --- a/arch/arm/include/asm/atomic.h
764 +++ b/arch/arm/include/asm/atomic.h
765 @@ -17,17 +17,35 @@
766 #include <asm/barrier.h>
767 #include <asm/cmpxchg.h>
768
769 +#ifdef CONFIG_GENERIC_ATOMIC64
770 +#include <asm-generic/atomic64.h>
771 +#endif
772 +
773 #define ATOMIC_INIT(i) { (i) }
774
775 #ifdef __KERNEL__
776
777 +#define _ASM_EXTABLE(from, to) \
778 +" .pushsection __ex_table,\"a\"\n"\
779 +" .align 3\n" \
780 +" .long " #from ", " #to"\n" \
781 +" .popsection"
782 +
783 /*
784 * On ARM, ordinary assignment (str instruction) doesn't clear the local
785 * strex/ldrex monitor on some implementations. The reason we can use it for
786 * atomic_set() is the clrex or dummy strex done on every exception return.
787 */
788 #define atomic_read(v) (*(volatile int *)&(v)->counter)
789 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
790 +{
791 + return v->counter;
792 +}
793 #define atomic_set(v,i) (((v)->counter) = (i))
794 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
795 +{
796 + v->counter = i;
797 +}
798
799 #if __LINUX_ARM_ARCH__ >= 6
800
801 @@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
802 int result;
803
804 __asm__ __volatile__("@ atomic_add\n"
805 +"1: ldrex %1, [%3]\n"
806 +" adds %0, %1, %4\n"
807 +
808 +#ifdef CONFIG_PAX_REFCOUNT
809 +" bvc 3f\n"
810 +"2: bkpt 0xf103\n"
811 +"3:\n"
812 +#endif
813 +
814 +" strex %1, %0, [%3]\n"
815 +" teq %1, #0\n"
816 +" bne 1b"
817 +
818 +#ifdef CONFIG_PAX_REFCOUNT
819 +"\n4:\n"
820 + _ASM_EXTABLE(2b, 4b)
821 +#endif
822 +
823 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
824 + : "r" (&v->counter), "Ir" (i)
825 + : "cc");
826 +}
827 +
828 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
829 +{
830 + unsigned long tmp;
831 + int result;
832 +
833 + __asm__ __volatile__("@ atomic_add_unchecked\n"
834 "1: ldrex %0, [%3]\n"
835 " add %0, %0, %4\n"
836 " strex %1, %0, [%3]\n"
837 @@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
838 smp_mb();
839
840 __asm__ __volatile__("@ atomic_add_return\n"
841 +"1: ldrex %1, [%3]\n"
842 +" adds %0, %1, %4\n"
843 +
844 +#ifdef CONFIG_PAX_REFCOUNT
845 +" bvc 3f\n"
846 +" mov %0, %1\n"
847 +"2: bkpt 0xf103\n"
848 +"3:\n"
849 +#endif
850 +
851 +" strex %1, %0, [%3]\n"
852 +" teq %1, #0\n"
853 +" bne 1b"
854 +
855 +#ifdef CONFIG_PAX_REFCOUNT
856 +"\n4:\n"
857 + _ASM_EXTABLE(2b, 4b)
858 +#endif
859 +
860 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
861 + : "r" (&v->counter), "Ir" (i)
862 + : "cc");
863 +
864 + smp_mb();
865 +
866 + return result;
867 +}
868 +
869 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
870 +{
871 + unsigned long tmp;
872 + int result;
873 +
874 + smp_mb();
875 +
876 + __asm__ __volatile__("@ atomic_add_return_unchecked\n"
877 "1: ldrex %0, [%3]\n"
878 " add %0, %0, %4\n"
879 " strex %1, %0, [%3]\n"
880 @@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v)
881 int result;
882
883 __asm__ __volatile__("@ atomic_sub\n"
884 +"1: ldrex %1, [%3]\n"
885 +" subs %0, %1, %4\n"
886 +
887 +#ifdef CONFIG_PAX_REFCOUNT
888 +" bvc 3f\n"
889 +"2: bkpt 0xf103\n"
890 +"3:\n"
891 +#endif
892 +
893 +" strex %1, %0, [%3]\n"
894 +" teq %1, #0\n"
895 +" bne 1b"
896 +
897 +#ifdef CONFIG_PAX_REFCOUNT
898 +"\n4:\n"
899 + _ASM_EXTABLE(2b, 4b)
900 +#endif
901 +
902 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
903 + : "r" (&v->counter), "Ir" (i)
904 + : "cc");
905 +}
906 +
907 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
908 +{
909 + unsigned long tmp;
910 + int result;
911 +
912 + __asm__ __volatile__("@ atomic_sub_unchecked\n"
913 "1: ldrex %0, [%3]\n"
914 " sub %0, %0, %4\n"
915 " strex %1, %0, [%3]\n"
916 @@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
917 smp_mb();
918
919 __asm__ __volatile__("@ atomic_sub_return\n"
920 -"1: ldrex %0, [%3]\n"
921 -" sub %0, %0, %4\n"
922 +"1: ldrex %1, [%3]\n"
923 +" sub %0, %1, %4\n"
924 +
925 +#ifdef CONFIG_PAX_REFCOUNT
926 +" bvc 3f\n"
927 +" mov %0, %1\n"
928 +"2: bkpt 0xf103\n"
929 +"3:\n"
930 +#endif
931 +
932 " strex %1, %0, [%3]\n"
933 " teq %1, #0\n"
934 " bne 1b"
935 +
936 +#ifdef CONFIG_PAX_REFCOUNT
937 +"\n4:\n"
938 + _ASM_EXTABLE(2b, 4b)
939 +#endif
940 +
941 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
942 : "r" (&v->counter), "Ir" (i)
943 : "cc");
944 @@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
945 return oldval;
946 }
947
948 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
949 +{
950 + unsigned long oldval, res;
951 +
952 + smp_mb();
953 +
954 + do {
955 + __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
956 + "ldrex %1, [%3]\n"
957 + "mov %0, #0\n"
958 + "teq %1, %4\n"
959 + "strexeq %0, %5, [%3]\n"
960 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
961 + : "r" (&ptr->counter), "Ir" (old), "r" (new)
962 + : "cc");
963 + } while (res);
964 +
965 + smp_mb();
966 +
967 + return oldval;
968 +}
969 +
970 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
971 {
972 unsigned long tmp, tmp2;
973 @@ -167,7 +315,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
974
975 return val;
976 }
977 +
978 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
979 +{
980 + return atomic_add_return(i, v);
981 +}
982 +
983 #define atomic_add(i, v) (void) atomic_add_return(i, v)
984 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
985 +{
986 + (void) atomic_add_return(i, v);
987 +}
988
989 static inline int atomic_sub_return(int i, atomic_t *v)
990 {
991 @@ -182,6 +340,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
992 return val;
993 }
994 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
995 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
996 +{
997 + (void) atomic_sub_return(i, v);
998 +}
999
1000 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1001 {
1002 @@ -197,6 +359,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1003 return ret;
1004 }
1005
1006 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1007 +{
1008 + return atomic_cmpxchg(v, old, new);
1009 +}
1010 +
1011 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1012 {
1013 unsigned long flags;
1014 @@ -209,6 +376,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1015 #endif /* __LINUX_ARM_ARCH__ */
1016
1017 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1018 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1019 +{
1020 + return xchg(&v->counter, new);
1021 +}
1022
1023 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1024 {
1025 @@ -221,11 +392,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1026 }
1027
1028 #define atomic_inc(v) atomic_add(1, v)
1029 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1030 +{
1031 + atomic_add_unchecked(1, v);
1032 +}
1033 #define atomic_dec(v) atomic_sub(1, v)
1034 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1035 +{
1036 + atomic_sub_unchecked(1, v);
1037 +}
1038
1039 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1040 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1041 +{
1042 + return atomic_add_return_unchecked(1, v) == 0;
1043 +}
1044 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1045 #define atomic_inc_return(v) (atomic_add_return(1, v))
1046 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1047 +{
1048 + return atomic_add_return_unchecked(1, v);
1049 +}
1050 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1051 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1052
1053 @@ -241,6 +428,14 @@ typedef struct {
1054 u64 __aligned(8) counter;
1055 } atomic64_t;
1056
1057 +#ifdef CONFIG_PAX_REFCOUNT
1058 +typedef struct {
1059 + u64 __aligned(8) counter;
1060 +} atomic64_unchecked_t;
1061 +#else
1062 +typedef atomic64_t atomic64_unchecked_t;
1063 +#endif
1064 +
1065 #define ATOMIC64_INIT(i) { (i) }
1066
1067 static inline u64 atomic64_read(atomic64_t *v)
1068 @@ -256,6 +451,19 @@ static inline u64 atomic64_read(atomic64_t *v)
1069 return result;
1070 }
1071
1072 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
1073 +{
1074 + u64 result;
1075 +
1076 + __asm__ __volatile__("@ atomic64_read_unchecked\n"
1077 +" ldrexd %0, %H0, [%1]"
1078 + : "=&r" (result)
1079 + : "r" (&v->counter), "Qo" (v->counter)
1080 + );
1081 +
1082 + return result;
1083 +}
1084 +
1085 static inline void atomic64_set(atomic64_t *v, u64 i)
1086 {
1087 u64 tmp;
1088 @@ -270,6 +478,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1089 : "cc");
1090 }
1091
1092 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1093 +{
1094 + u64 tmp;
1095 +
1096 + __asm__ __volatile__("@ atomic64_set_unchecked\n"
1097 +"1: ldrexd %0, %H0, [%2]\n"
1098 +" strexd %0, %3, %H3, [%2]\n"
1099 +" teq %0, #0\n"
1100 +" bne 1b"
1101 + : "=&r" (tmp), "=Qo" (v->counter)
1102 + : "r" (&v->counter), "r" (i)
1103 + : "cc");
1104 +}
1105 +
1106 static inline void atomic64_add(u64 i, atomic64_t *v)
1107 {
1108 u64 result;
1109 @@ -278,6 +500,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1110 __asm__ __volatile__("@ atomic64_add\n"
1111 "1: ldrexd %0, %H0, [%3]\n"
1112 " adds %0, %0, %4\n"
1113 +" adcs %H0, %H0, %H4\n"
1114 +
1115 +#ifdef CONFIG_PAX_REFCOUNT
1116 +" bvc 3f\n"
1117 +"2: bkpt 0xf103\n"
1118 +"3:\n"
1119 +#endif
1120 +
1121 +" strexd %1, %0, %H0, [%3]\n"
1122 +" teq %1, #0\n"
1123 +" bne 1b"
1124 +
1125 +#ifdef CONFIG_PAX_REFCOUNT
1126 +"\n4:\n"
1127 + _ASM_EXTABLE(2b, 4b)
1128 +#endif
1129 +
1130 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1131 + : "r" (&v->counter), "r" (i)
1132 + : "cc");
1133 +}
1134 +
1135 +static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1136 +{
1137 + u64 result;
1138 + unsigned long tmp;
1139 +
1140 + __asm__ __volatile__("@ atomic64_add_unchecked\n"
1141 +"1: ldrexd %0, %H0, [%3]\n"
1142 +" adds %0, %0, %4\n"
1143 " adc %H0, %H0, %H4\n"
1144 " strexd %1, %0, %H0, [%3]\n"
1145 " teq %1, #0\n"
1146 @@ -289,12 +541,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1147
1148 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1149 {
1150 - u64 result;
1151 - unsigned long tmp;
1152 + u64 result, tmp;
1153
1154 smp_mb();
1155
1156 __asm__ __volatile__("@ atomic64_add_return\n"
1157 +"1: ldrexd %1, %H1, [%3]\n"
1158 +" adds %0, %1, %4\n"
1159 +" adcs %H0, %H1, %H4\n"
1160 +
1161 +#ifdef CONFIG_PAX_REFCOUNT
1162 +" bvc 3f\n"
1163 +" mov %0, %1\n"
1164 +" mov %H0, %H1\n"
1165 +"2: bkpt 0xf103\n"
1166 +"3:\n"
1167 +#endif
1168 +
1169 +" strexd %1, %0, %H0, [%3]\n"
1170 +" teq %1, #0\n"
1171 +" bne 1b"
1172 +
1173 +#ifdef CONFIG_PAX_REFCOUNT
1174 +"\n4:\n"
1175 + _ASM_EXTABLE(2b, 4b)
1176 +#endif
1177 +
1178 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1179 + : "r" (&v->counter), "r" (i)
1180 + : "cc");
1181 +
1182 + smp_mb();
1183 +
1184 + return result;
1185 +}
1186 +
1187 +static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1188 +{
1189 + u64 result;
1190 + unsigned long tmp;
1191 +
1192 + smp_mb();
1193 +
1194 + __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1195 "1: ldrexd %0, %H0, [%3]\n"
1196 " adds %0, %0, %4\n"
1197 " adc %H0, %H0, %H4\n"
1198 @@ -318,6 +607,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1199 __asm__ __volatile__("@ atomic64_sub\n"
1200 "1: ldrexd %0, %H0, [%3]\n"
1201 " subs %0, %0, %4\n"
1202 +" sbcs %H0, %H0, %H4\n"
1203 +
1204 +#ifdef CONFIG_PAX_REFCOUNT
1205 +" bvc 3f\n"
1206 +"2: bkpt 0xf103\n"
1207 +"3:\n"
1208 +#endif
1209 +
1210 +" strexd %1, %0, %H0, [%3]\n"
1211 +" teq %1, #0\n"
1212 +" bne 1b"
1213 +
1214 +#ifdef CONFIG_PAX_REFCOUNT
1215 +"\n4:\n"
1216 + _ASM_EXTABLE(2b, 4b)
1217 +#endif
1218 +
1219 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1220 + : "r" (&v->counter), "r" (i)
1221 + : "cc");
1222 +}
1223 +
1224 +static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1225 +{
1226 + u64 result;
1227 + unsigned long tmp;
1228 +
1229 + __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1230 +"1: ldrexd %0, %H0, [%3]\n"
1231 +" subs %0, %0, %4\n"
1232 " sbc %H0, %H0, %H4\n"
1233 " strexd %1, %0, %H0, [%3]\n"
1234 " teq %1, #0\n"
1235 @@ -329,18 +648,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1236
1237 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1238 {
1239 - u64 result;
1240 - unsigned long tmp;
1241 + u64 result, tmp;
1242
1243 smp_mb();
1244
1245 __asm__ __volatile__("@ atomic64_sub_return\n"
1246 -"1: ldrexd %0, %H0, [%3]\n"
1247 -" subs %0, %0, %4\n"
1248 -" sbc %H0, %H0, %H4\n"
1249 +"1: ldrexd %1, %H1, [%3]\n"
1250 +" subs %0, %1, %4\n"
1251 +" sbc %H0, %H1, %H4\n"
1252 +
1253 +#ifdef CONFIG_PAX_REFCOUNT
1254 +" bvc 3f\n"
1255 +" mov %0, %1\n"
1256 +" mov %H0, %H1\n"
1257 +"2: bkpt 0xf103\n"
1258 +"3:\n"
1259 +#endif
1260 +
1261 " strexd %1, %0, %H0, [%3]\n"
1262 " teq %1, #0\n"
1263 " bne 1b"
1264 +
1265 +#ifdef CONFIG_PAX_REFCOUNT
1266 +"\n4:\n"
1267 + _ASM_EXTABLE(2b, 4b)
1268 +#endif
1269 +
1270 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1271 : "r" (&v->counter), "r" (i)
1272 : "cc");
1273 @@ -374,6 +707,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1274 return oldval;
1275 }
1276
1277 +static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1278 +{
1279 + u64 oldval;
1280 + unsigned long res;
1281 +
1282 + smp_mb();
1283 +
1284 + do {
1285 + __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1286 + "ldrexd %1, %H1, [%3]\n"
1287 + "mov %0, #0\n"
1288 + "teq %1, %4\n"
1289 + "teqeq %H1, %H4\n"
1290 + "strexdeq %0, %5, %H5, [%3]"
1291 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1292 + : "r" (&ptr->counter), "r" (old), "r" (new)
1293 + : "cc");
1294 + } while (res);
1295 +
1296 + smp_mb();
1297 +
1298 + return oldval;
1299 +}
1300 +
1301 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1302 {
1303 u64 result;
1304 @@ -397,21 +754,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1305
1306 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1307 {
1308 - u64 result;
1309 - unsigned long tmp;
1310 + u64 result, tmp;
1311
1312 smp_mb();
1313
1314 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1315 -"1: ldrexd %0, %H0, [%3]\n"
1316 -" subs %0, %0, #1\n"
1317 -" sbc %H0, %H0, #0\n"
1318 +"1: ldrexd %1, %H1, [%3]\n"
1319 +" subs %0, %1, #1\n"
1320 +" sbc %H0, %H1, #0\n"
1321 +
1322 +#ifdef CONFIG_PAX_REFCOUNT
1323 +" bvc 3f\n"
1324 +" mov %0, %1\n"
1325 +" mov %H0, %H1\n"
1326 +"2: bkpt 0xf103\n"
1327 +"3:\n"
1328 +#endif
1329 +
1330 " teq %H0, #0\n"
1331 -" bmi 2f\n"
1332 +" bmi 4f\n"
1333 " strexd %1, %0, %H0, [%3]\n"
1334 " teq %1, #0\n"
1335 " bne 1b\n"
1336 -"2:"
1337 +"4:\n"
1338 +
1339 +#ifdef CONFIG_PAX_REFCOUNT
1340 + _ASM_EXTABLE(2b, 4b)
1341 +#endif
1342 +
1343 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1344 : "r" (&v->counter)
1345 : "cc");
1346 @@ -434,13 +804,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1347 " teq %0, %5\n"
1348 " teqeq %H0, %H5\n"
1349 " moveq %1, #0\n"
1350 -" beq 2f\n"
1351 +" beq 4f\n"
1352 " adds %0, %0, %6\n"
1353 " adc %H0, %H0, %H6\n"
1354 +
1355 +#ifdef CONFIG_PAX_REFCOUNT
1356 +" bvc 3f\n"
1357 +"2: bkpt 0xf103\n"
1358 +"3:\n"
1359 +#endif
1360 +
1361 " strexd %2, %0, %H0, [%4]\n"
1362 " teq %2, #0\n"
1363 " bne 1b\n"
1364 -"2:"
1365 +"4:\n"
1366 +
1367 +#ifdef CONFIG_PAX_REFCOUNT
1368 + _ASM_EXTABLE(2b, 4b)
1369 +#endif
1370 +
1371 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1372 : "r" (&v->counter), "r" (u), "r" (a)
1373 : "cc");
1374 @@ -453,10 +835,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1375
1376 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1377 #define atomic64_inc(v) atomic64_add(1LL, (v))
1378 +#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1379 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1380 +#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1381 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1382 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1383 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1384 +#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1385 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1386 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1387 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1388 diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1389 index 75fe66b..2255c86 100644
1390 --- a/arch/arm/include/asm/cache.h
1391 +++ b/arch/arm/include/asm/cache.h
1392 @@ -4,8 +4,10 @@
1393 #ifndef __ASMARM_CACHE_H
1394 #define __ASMARM_CACHE_H
1395
1396 +#include <linux/const.h>
1397 +
1398 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1399 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1400 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1401
1402 /*
1403 * Memory returned by kmalloc() may be used for DMA, so we must make
1404 diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1405 index 1252a26..9dc17b5 100644
1406 --- a/arch/arm/include/asm/cacheflush.h
1407 +++ b/arch/arm/include/asm/cacheflush.h
1408 @@ -108,7 +108,7 @@ struct cpu_cache_fns {
1409 void (*dma_unmap_area)(const void *, size_t, int);
1410
1411 void (*dma_flush_range)(const void *, const void *);
1412 -};
1413 +} __no_const;
1414
1415 /*
1416 * Select the calling method
1417 diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1418 index d41d7cb..9bea5e0 100644
1419 --- a/arch/arm/include/asm/cmpxchg.h
1420 +++ b/arch/arm/include/asm/cmpxchg.h
1421 @@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1422
1423 #define xchg(ptr,x) \
1424 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1425 +#define xchg_unchecked(ptr,x) \
1426 + ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1427
1428 #include <asm-generic/cmpxchg-local.h>
1429
1430 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1431 index 38050b1..9d90e8b 100644
1432 --- a/arch/arm/include/asm/elf.h
1433 +++ b/arch/arm/include/asm/elf.h
1434 @@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1435 the loader. We need to make sure that it is out of the way of the program
1436 that it will "exec", and that there is sufficient room for the brk. */
1437
1438 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1439 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1440 +
1441 +#ifdef CONFIG_PAX_ASLR
1442 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1443 +
1444 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1445 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1446 +#endif
1447
1448 /* When the program starts, a1 contains a pointer to a function to be
1449 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1450 @@ -126,8 +133,4 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1451 extern void elf_set_personality(const struct elf32_hdr *);
1452 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1453
1454 -struct mm_struct;
1455 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1456 -#define arch_randomize_brk arch_randomize_brk
1457 -
1458 #endif
1459 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1460 index e51b1e8..32a3113 100644
1461 --- a/arch/arm/include/asm/kmap_types.h
1462 +++ b/arch/arm/include/asm/kmap_types.h
1463 @@ -21,6 +21,7 @@ enum km_type {
1464 KM_L1_CACHE,
1465 KM_L2_CACHE,
1466 KM_KDB,
1467 + KM_CLEARPAGE,
1468 KM_TYPE_NR
1469 };
1470
1471 diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1472 index 53426c6..c7baff3 100644
1473 --- a/arch/arm/include/asm/outercache.h
1474 +++ b/arch/arm/include/asm/outercache.h
1475 @@ -35,7 +35,7 @@ struct outer_cache_fns {
1476 #endif
1477 void (*set_debug)(unsigned long);
1478 void (*resume)(void);
1479 -};
1480 +} __no_const;
1481
1482 #ifdef CONFIG_OUTER_CACHE
1483
1484 diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1485 index 5838361..da6e813 100644
1486 --- a/arch/arm/include/asm/page.h
1487 +++ b/arch/arm/include/asm/page.h
1488 @@ -123,7 +123,7 @@ struct cpu_user_fns {
1489 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1490 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1491 unsigned long vaddr, struct vm_area_struct *vma);
1492 -};
1493 +} __no_const;
1494
1495 #ifdef MULTI_USER
1496 extern struct cpu_user_fns cpu_user;
1497 diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1498 index 943504f..bf8d667 100644
1499 --- a/arch/arm/include/asm/pgalloc.h
1500 +++ b/arch/arm/include/asm/pgalloc.h
1501 @@ -43,6 +43,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1502 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1503 }
1504
1505 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1506 +{
1507 + pud_populate(mm, pud, pmd);
1508 +}
1509 +
1510 #else /* !CONFIG_ARM_LPAE */
1511
1512 /*
1513 @@ -51,6 +56,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1514 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1515 #define pmd_free(mm, pmd) do { } while (0)
1516 #define pud_populate(mm,pmd,pte) BUG()
1517 +#define pud_populate_kernel(mm,pmd,pte) BUG()
1518
1519 #endif /* CONFIG_ARM_LPAE */
1520
1521 diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
1522 index 0f04d84..2be5648 100644
1523 --- a/arch/arm/include/asm/thread_info.h
1524 +++ b/arch/arm/include/asm/thread_info.h
1525 @@ -148,6 +148,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
1526 #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
1527 #define TIF_SYSCALL_TRACE 8
1528 #define TIF_SYSCALL_AUDIT 9
1529 +
1530 +/* within 8 bits of TIF_SYSCALL_TRACE
1531 + to meet flexible second operand requirements
1532 +*/
1533 +#define TIF_GRSEC_SETXID 10
1534 +
1535 #define TIF_POLLING_NRFLAG 16
1536 #define TIF_USING_IWMMXT 17
1537 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
1538 @@ -163,9 +169,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
1539 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
1540 #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
1541 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
1542 +#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
1543
1544 /* Checks for any syscall work in entry-common.S */
1545 -#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
1546 +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
1547 + _TIF_GRSEC_SETXID)
1548
1549 /*
1550 * Change these and you break ASM code in entry-common.S
1551 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
1552 index 71f6536..602f279 100644
1553 --- a/arch/arm/include/asm/uaccess.h
1554 +++ b/arch/arm/include/asm/uaccess.h
1555 @@ -22,6 +22,8 @@
1556 #define VERIFY_READ 0
1557 #define VERIFY_WRITE 1
1558
1559 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
1560 +
1561 /*
1562 * The exception table consists of pairs of addresses: the first is the
1563 * address of an instruction that is allowed to fault, and the second is
1564 @@ -387,8 +389,23 @@ do { \
1565
1566
1567 #ifdef CONFIG_MMU
1568 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
1569 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
1570 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
1571 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
1572 +
1573 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
1574 +{
1575 + if (!__builtin_constant_p(n))
1576 + check_object_size(to, n, false);
1577 + return ___copy_from_user(to, from, n);
1578 +}
1579 +
1580 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
1581 +{
1582 + if (!__builtin_constant_p(n))
1583 + check_object_size(from, n, true);
1584 + return ___copy_to_user(to, from, n);
1585 +}
1586 +
1587 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
1588 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
1589 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
1590 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
1591
1592 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1593 {
1594 + if ((long)n < 0)
1595 + return n;
1596 +
1597 if (access_ok(VERIFY_READ, from, n))
1598 n = __copy_from_user(to, from, n);
1599 else /* security hole - plug it */
1600 @@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
1601
1602 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1603 {
1604 + if ((long)n < 0)
1605 + return n;
1606 +
1607 if (access_ok(VERIFY_WRITE, to, n))
1608 n = __copy_to_user(to, from, n);
1609 return n;
1610 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
1611 index b57c75e..ed2d6b2 100644
1612 --- a/arch/arm/kernel/armksyms.c
1613 +++ b/arch/arm/kernel/armksyms.c
1614 @@ -94,8 +94,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
1615 #ifdef CONFIG_MMU
1616 EXPORT_SYMBOL(copy_page);
1617
1618 -EXPORT_SYMBOL(__copy_from_user);
1619 -EXPORT_SYMBOL(__copy_to_user);
1620 +EXPORT_SYMBOL(___copy_from_user);
1621 +EXPORT_SYMBOL(___copy_to_user);
1622 EXPORT_SYMBOL(__clear_user);
1623
1624 EXPORT_SYMBOL(__get_user_1);
1625 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
1626 index 2b7b017..c380fa2 100644
1627 --- a/arch/arm/kernel/process.c
1628 +++ b/arch/arm/kernel/process.c
1629 @@ -28,7 +28,6 @@
1630 #include <linux/tick.h>
1631 #include <linux/utsname.h>
1632 #include <linux/uaccess.h>
1633 -#include <linux/random.h>
1634 #include <linux/hw_breakpoint.h>
1635 #include <linux/cpuidle.h>
1636
1637 @@ -275,9 +274,10 @@ void machine_power_off(void)
1638 machine_shutdown();
1639 if (pm_power_off)
1640 pm_power_off();
1641 + BUG();
1642 }
1643
1644 -void machine_restart(char *cmd)
1645 +__noreturn void machine_restart(char *cmd)
1646 {
1647 machine_shutdown();
1648
1649 @@ -519,12 +519,6 @@ unsigned long get_wchan(struct task_struct *p)
1650 return 0;
1651 }
1652
1653 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1654 -{
1655 - unsigned long range_end = mm->brk + 0x02000000;
1656 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
1657 -}
1658 -
1659 #ifdef CONFIG_MMU
1660 /*
1661 * The vectors page is always readable from user space for the
1662 diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
1663 index 9650c14..ae30cdd 100644
1664 --- a/arch/arm/kernel/ptrace.c
1665 +++ b/arch/arm/kernel/ptrace.c
1666 @@ -906,10 +906,19 @@ long arch_ptrace(struct task_struct *child, long request,
1667 return ret;
1668 }
1669
1670 +#ifdef CONFIG_GRKERNSEC_SETXID
1671 +extern void gr_delayed_cred_worker(void);
1672 +#endif
1673 +
1674 asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
1675 {
1676 unsigned long ip;
1677
1678 +#ifdef CONFIG_GRKERNSEC_SETXID
1679 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
1680 + gr_delayed_cred_worker();
1681 +#endif
1682 +
1683 if (why)
1684 audit_syscall_exit(regs);
1685 else
1686 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
1687 index ebfac78..cbea9c0 100644
1688 --- a/arch/arm/kernel/setup.c
1689 +++ b/arch/arm/kernel/setup.c
1690 @@ -111,13 +111,13 @@ struct processor processor __read_mostly;
1691 struct cpu_tlb_fns cpu_tlb __read_mostly;
1692 #endif
1693 #ifdef MULTI_USER
1694 -struct cpu_user_fns cpu_user __read_mostly;
1695 +struct cpu_user_fns cpu_user __read_only;
1696 #endif
1697 #ifdef MULTI_CACHE
1698 -struct cpu_cache_fns cpu_cache __read_mostly;
1699 +struct cpu_cache_fns cpu_cache __read_only;
1700 #endif
1701 #ifdef CONFIG_OUTER_CACHE
1702 -struct outer_cache_fns outer_cache __read_mostly;
1703 +struct outer_cache_fns outer_cache __read_only;
1704 EXPORT_SYMBOL(outer_cache);
1705 #endif
1706
1707 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
1708 index 63d402f..db1d714 100644
1709 --- a/arch/arm/kernel/traps.c
1710 +++ b/arch/arm/kernel/traps.c
1711 @@ -264,6 +264,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
1712
1713 static DEFINE_RAW_SPINLOCK(die_lock);
1714
1715 +extern void gr_handle_kernel_exploit(void);
1716 +
1717 /*
1718 * This function is protected against re-entrancy.
1719 */
1720 @@ -296,6 +298,9 @@ void die(const char *str, struct pt_regs *regs, int err)
1721 panic("Fatal exception in interrupt");
1722 if (panic_on_oops)
1723 panic("Fatal exception");
1724 +
1725 + gr_handle_kernel_exploit();
1726 +
1727 if (ret != NOTIFY_STOP)
1728 do_exit(SIGSEGV);
1729 }
1730 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
1731 index 66a477a..bee61d3 100644
1732 --- a/arch/arm/lib/copy_from_user.S
1733 +++ b/arch/arm/lib/copy_from_user.S
1734 @@ -16,7 +16,7 @@
1735 /*
1736 * Prototype:
1737 *
1738 - * size_t __copy_from_user(void *to, const void *from, size_t n)
1739 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
1740 *
1741 * Purpose:
1742 *
1743 @@ -84,11 +84,11 @@
1744
1745 .text
1746
1747 -ENTRY(__copy_from_user)
1748 +ENTRY(___copy_from_user)
1749
1750 #include "copy_template.S"
1751
1752 -ENDPROC(__copy_from_user)
1753 +ENDPROC(___copy_from_user)
1754
1755 .pushsection .fixup,"ax"
1756 .align 0
1757 diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
1758 index 6ee2f67..d1cce76 100644
1759 --- a/arch/arm/lib/copy_page.S
1760 +++ b/arch/arm/lib/copy_page.S
1761 @@ -10,6 +10,7 @@
1762 * ASM optimised string functions
1763 */
1764 #include <linux/linkage.h>
1765 +#include <linux/const.h>
1766 #include <asm/assembler.h>
1767 #include <asm/asm-offsets.h>
1768 #include <asm/cache.h>
1769 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
1770 index d066df6..df28194 100644
1771 --- a/arch/arm/lib/copy_to_user.S
1772 +++ b/arch/arm/lib/copy_to_user.S
1773 @@ -16,7 +16,7 @@
1774 /*
1775 * Prototype:
1776 *
1777 - * size_t __copy_to_user(void *to, const void *from, size_t n)
1778 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
1779 *
1780 * Purpose:
1781 *
1782 @@ -88,11 +88,11 @@
1783 .text
1784
1785 ENTRY(__copy_to_user_std)
1786 -WEAK(__copy_to_user)
1787 +WEAK(___copy_to_user)
1788
1789 #include "copy_template.S"
1790
1791 -ENDPROC(__copy_to_user)
1792 +ENDPROC(___copy_to_user)
1793 ENDPROC(__copy_to_user_std)
1794
1795 .pushsection .fixup,"ax"
1796 diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
1797 index 5c908b1..e712687 100644
1798 --- a/arch/arm/lib/uaccess.S
1799 +++ b/arch/arm/lib/uaccess.S
1800 @@ -20,7 +20,7 @@
1801
1802 #define PAGE_SHIFT 12
1803
1804 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
1805 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
1806 * Purpose : copy a block to user memory from kernel memory
1807 * Params : to - user memory
1808 * : from - kernel memory
1809 @@ -40,7 +40,7 @@ USER( TUSER( strgtb) r3, [r0], #1) @ May fault
1810 sub r2, r2, ip
1811 b .Lc2u_dest_aligned
1812
1813 -ENTRY(__copy_to_user)
1814 +ENTRY(___copy_to_user)
1815 stmfd sp!, {r2, r4 - r7, lr}
1816 cmp r2, #4
1817 blt .Lc2u_not_enough
1818 @@ -278,14 +278,14 @@ USER( TUSER( strgeb) r3, [r0], #1) @ May fault
1819 ldrgtb r3, [r1], #0
1820 USER( TUSER( strgtb) r3, [r0], #1) @ May fault
1821 b .Lc2u_finished
1822 -ENDPROC(__copy_to_user)
1823 +ENDPROC(___copy_to_user)
1824
1825 .pushsection .fixup,"ax"
1826 .align 0
1827 9001: ldmfd sp!, {r0, r4 - r7, pc}
1828 .popsection
1829
1830 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
1831 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
1832 * Purpose : copy a block from user memory to kernel memory
1833 * Params : to - kernel memory
1834 * : from - user memory
1835 @@ -304,7 +304,7 @@ USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
1836 sub r2, r2, ip
1837 b .Lcfu_dest_aligned
1838
1839 -ENTRY(__copy_from_user)
1840 +ENTRY(___copy_from_user)
1841 stmfd sp!, {r0, r2, r4 - r7, lr}
1842 cmp r2, #4
1843 blt .Lcfu_not_enough
1844 @@ -544,7 +544,7 @@ USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
1845 USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
1846 strgtb r3, [r0], #1
1847 b .Lcfu_finished
1848 -ENDPROC(__copy_from_user)
1849 +ENDPROC(___copy_from_user)
1850
1851 .pushsection .fixup,"ax"
1852 .align 0
1853 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
1854 index 025f742..8432b08 100644
1855 --- a/arch/arm/lib/uaccess_with_memcpy.c
1856 +++ b/arch/arm/lib/uaccess_with_memcpy.c
1857 @@ -104,7 +104,7 @@ out:
1858 }
1859
1860 unsigned long
1861 -__copy_to_user(void __user *to, const void *from, unsigned long n)
1862 +___copy_to_user(void __user *to, const void *from, unsigned long n)
1863 {
1864 /*
1865 * This test is stubbed out of the main function above to keep
1866 diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
1867 index 518091c..eae9a76 100644
1868 --- a/arch/arm/mach-omap2/board-n8x0.c
1869 +++ b/arch/arm/mach-omap2/board-n8x0.c
1870 @@ -596,7 +596,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
1871 }
1872 #endif
1873
1874 -static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
1875 +static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
1876 .late_init = n8x0_menelaus_late_init,
1877 };
1878
1879 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1880 index 5bb4835..4760f68 100644
1881 --- a/arch/arm/mm/fault.c
1882 +++ b/arch/arm/mm/fault.c
1883 @@ -174,6 +174,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1884 }
1885 #endif
1886
1887 +#ifdef CONFIG_PAX_PAGEEXEC
1888 + if (fsr & FSR_LNX_PF) {
1889 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1890 + do_group_exit(SIGKILL);
1891 + }
1892 +#endif
1893 +
1894 tsk->thread.address = addr;
1895 tsk->thread.error_code = fsr;
1896 tsk->thread.trap_no = 14;
1897 @@ -397,6 +404,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1898 }
1899 #endif /* CONFIG_MMU */
1900
1901 +#ifdef CONFIG_PAX_PAGEEXEC
1902 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1903 +{
1904 + long i;
1905 +
1906 + printk(KERN_ERR "PAX: bytes at PC: ");
1907 + for (i = 0; i < 20; i++) {
1908 + unsigned char c;
1909 + if (get_user(c, (__force unsigned char __user *)pc+i))
1910 + printk(KERN_CONT "?? ");
1911 + else
1912 + printk(KERN_CONT "%02x ", c);
1913 + }
1914 + printk("\n");
1915 +
1916 + printk(KERN_ERR "PAX: bytes at SP-4: ");
1917 + for (i = -1; i < 20; i++) {
1918 + unsigned long c;
1919 + if (get_user(c, (__force unsigned long __user *)sp+i))
1920 + printk(KERN_CONT "???????? ");
1921 + else
1922 + printk(KERN_CONT "%08lx ", c);
1923 + }
1924 + printk("\n");
1925 +}
1926 +#endif
1927 +
1928 /*
1929 * First Level Translation Fault Handler
1930 *
1931 @@ -577,6 +611,20 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
1932 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
1933 struct siginfo info;
1934
1935 +#ifdef CONFIG_PAX_REFCOUNT
1936 + if (fsr_fs(ifsr) == 2) {
1937 + unsigned int bkpt;
1938 +
1939 + if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
1940 + current->thread.error_code = ifsr;
1941 + current->thread.trap_no = 0;
1942 + pax_report_refcount_overflow(regs);
1943 + fixup_exception(regs);
1944 + return;
1945 + }
1946 + }
1947 +#endif
1948 +
1949 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
1950 return;
1951
1952 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1953 index ce8cb19..3ec539d 100644
1954 --- a/arch/arm/mm/mmap.c
1955 +++ b/arch/arm/mm/mmap.c
1956 @@ -93,6 +93,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1957 if (len > TASK_SIZE)
1958 return -ENOMEM;
1959
1960 +#ifdef CONFIG_PAX_RANDMMAP
1961 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1962 +#endif
1963 +
1964 if (addr) {
1965 if (do_align)
1966 addr = COLOUR_ALIGN(addr, pgoff);
1967 @@ -100,15 +104,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1968 addr = PAGE_ALIGN(addr);
1969
1970 vma = find_vma(mm, addr);
1971 - if (TASK_SIZE - len >= addr &&
1972 - (!vma || addr + len <= vma->vm_start))
1973 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1974 return addr;
1975 }
1976 if (len > mm->cached_hole_size) {
1977 - start_addr = addr = mm->free_area_cache;
1978 + start_addr = addr = mm->free_area_cache;
1979 } else {
1980 - start_addr = addr = mm->mmap_base;
1981 - mm->cached_hole_size = 0;
1982 + start_addr = addr = mm->mmap_base;
1983 + mm->cached_hole_size = 0;
1984 }
1985
1986 full_search:
1987 @@ -124,14 +127,14 @@ full_search:
1988 * Start a new search - just in case we missed
1989 * some holes.
1990 */
1991 - if (start_addr != TASK_UNMAPPED_BASE) {
1992 - start_addr = addr = TASK_UNMAPPED_BASE;
1993 + if (start_addr != mm->mmap_base) {
1994 + start_addr = addr = mm->mmap_base;
1995 mm->cached_hole_size = 0;
1996 goto full_search;
1997 }
1998 return -ENOMEM;
1999 }
2000 - if (!vma || addr + len <= vma->vm_start) {
2001 + if (check_heap_stack_gap(vma, addr, len)) {
2002 /*
2003 * Remember the place where we stopped the search:
2004 */
2005 @@ -266,10 +269,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2006
2007 if (mmap_is_legacy()) {
2008 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
2009 +
2010 +#ifdef CONFIG_PAX_RANDMMAP
2011 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2012 + mm->mmap_base += mm->delta_mmap;
2013 +#endif
2014 +
2015 mm->get_unmapped_area = arch_get_unmapped_area;
2016 mm->unmap_area = arch_unmap_area;
2017 } else {
2018 mm->mmap_base = mmap_base(random_factor);
2019 +
2020 +#ifdef CONFIG_PAX_RANDMMAP
2021 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2022 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2023 +#endif
2024 +
2025 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2026 mm->unmap_area = arch_unmap_area_topdown;
2027 }
2028 diff --git a/arch/arm/plat-orion/include/plat/addr-map.h b/arch/arm/plat-orion/include/plat/addr-map.h
2029 index fd556f7..af2e7d2 100644
2030 --- a/arch/arm/plat-orion/include/plat/addr-map.h
2031 +++ b/arch/arm/plat-orion/include/plat/addr-map.h
2032 @@ -26,7 +26,7 @@ struct orion_addr_map_cfg {
2033 value in bridge_virt_base */
2034 void __iomem *(*win_cfg_base) (const struct orion_addr_map_cfg *cfg,
2035 const int win);
2036 -};
2037 +} __no_const;
2038
2039 /*
2040 * Information needed to setup one address mapping.
2041 diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
2042 index 71a6827..e7fbc23 100644
2043 --- a/arch/arm/plat-samsung/include/plat/dma-ops.h
2044 +++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
2045 @@ -43,7 +43,7 @@ struct samsung_dma_ops {
2046 int (*started)(unsigned ch);
2047 int (*flush)(unsigned ch);
2048 int (*stop)(unsigned ch);
2049 -};
2050 +} __no_const;
2051
2052 extern void *samsung_dmadev_get_ops(void);
2053 extern void *s3c_dma_get_ops(void);
2054 diff --git a/arch/arm/plat-samsung/include/plat/ehci.h b/arch/arm/plat-samsung/include/plat/ehci.h
2055 index 5f28cae..3d23723 100644
2056 --- a/arch/arm/plat-samsung/include/plat/ehci.h
2057 +++ b/arch/arm/plat-samsung/include/plat/ehci.h
2058 @@ -14,7 +14,7 @@
2059 struct s5p_ehci_platdata {
2060 int (*phy_init)(struct platform_device *pdev, int type);
2061 int (*phy_exit)(struct platform_device *pdev, int type);
2062 -};
2063 +} __no_const;
2064
2065 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
2066
2067 diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
2068 index c3a58a1..78fbf54 100644
2069 --- a/arch/avr32/include/asm/cache.h
2070 +++ b/arch/avr32/include/asm/cache.h
2071 @@ -1,8 +1,10 @@
2072 #ifndef __ASM_AVR32_CACHE_H
2073 #define __ASM_AVR32_CACHE_H
2074
2075 +#include <linux/const.h>
2076 +
2077 #define L1_CACHE_SHIFT 5
2078 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2079 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2080
2081 /*
2082 * Memory returned by kmalloc() may be used for DMA, so we must make
2083 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
2084 index 3b3159b..425ea94 100644
2085 --- a/arch/avr32/include/asm/elf.h
2086 +++ b/arch/avr32/include/asm/elf.h
2087 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
2088 the loader. We need to make sure that it is out of the way of the program
2089 that it will "exec", and that there is sufficient room for the brk. */
2090
2091 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
2092 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2093
2094 +#ifdef CONFIG_PAX_ASLR
2095 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
2096 +
2097 +#define PAX_DELTA_MMAP_LEN 15
2098 +#define PAX_DELTA_STACK_LEN 15
2099 +#endif
2100
2101 /* This yields a mask that user programs can use to figure out what
2102 instruction set this CPU supports. This could be done in user space,
2103 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
2104 index b7f5c68..556135c 100644
2105 --- a/arch/avr32/include/asm/kmap_types.h
2106 +++ b/arch/avr32/include/asm/kmap_types.h
2107 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
2108 D(11) KM_IRQ1,
2109 D(12) KM_SOFTIRQ0,
2110 D(13) KM_SOFTIRQ1,
2111 -D(14) KM_TYPE_NR
2112 +D(14) KM_CLEARPAGE,
2113 +D(15) KM_TYPE_NR
2114 };
2115
2116 #undef D
2117 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
2118 index f7040a1..db9f300 100644
2119 --- a/arch/avr32/mm/fault.c
2120 +++ b/arch/avr32/mm/fault.c
2121 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
2122
2123 int exception_trace = 1;
2124
2125 +#ifdef CONFIG_PAX_PAGEEXEC
2126 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2127 +{
2128 + unsigned long i;
2129 +
2130 + printk(KERN_ERR "PAX: bytes at PC: ");
2131 + for (i = 0; i < 20; i++) {
2132 + unsigned char c;
2133 + if (get_user(c, (unsigned char *)pc+i))
2134 + printk(KERN_CONT "???????? ");
2135 + else
2136 + printk(KERN_CONT "%02x ", c);
2137 + }
2138 + printk("\n");
2139 +}
2140 +#endif
2141 +
2142 /*
2143 * This routine handles page faults. It determines the address and the
2144 * problem, and then passes it off to one of the appropriate routines.
2145 @@ -156,6 +173,16 @@ bad_area:
2146 up_read(&mm->mmap_sem);
2147
2148 if (user_mode(regs)) {
2149 +
2150 +#ifdef CONFIG_PAX_PAGEEXEC
2151 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2152 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
2153 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
2154 + do_group_exit(SIGKILL);
2155 + }
2156 + }
2157 +#endif
2158 +
2159 if (exception_trace && printk_ratelimit())
2160 printk("%s%s[%d]: segfault at %08lx pc %08lx "
2161 "sp %08lx ecr %lu\n",
2162 diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
2163 index 568885a..f8008df 100644
2164 --- a/arch/blackfin/include/asm/cache.h
2165 +++ b/arch/blackfin/include/asm/cache.h
2166 @@ -7,6 +7,7 @@
2167 #ifndef __ARCH_BLACKFIN_CACHE_H
2168 #define __ARCH_BLACKFIN_CACHE_H
2169
2170 +#include <linux/const.h>
2171 #include <linux/linkage.h> /* for asmlinkage */
2172
2173 /*
2174 @@ -14,7 +15,7 @@
2175 * Blackfin loads 32 bytes for cache
2176 */
2177 #define L1_CACHE_SHIFT 5
2178 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2179 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2180 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2181
2182 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2183 diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
2184 index aea2718..3639a60 100644
2185 --- a/arch/cris/include/arch-v10/arch/cache.h
2186 +++ b/arch/cris/include/arch-v10/arch/cache.h
2187 @@ -1,8 +1,9 @@
2188 #ifndef _ASM_ARCH_CACHE_H
2189 #define _ASM_ARCH_CACHE_H
2190
2191 +#include <linux/const.h>
2192 /* Etrax 100LX have 32-byte cache-lines. */
2193 -#define L1_CACHE_BYTES 32
2194 #define L1_CACHE_SHIFT 5
2195 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2196
2197 #endif /* _ASM_ARCH_CACHE_H */
2198 diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
2199 index 1de779f..336fad3 100644
2200 --- a/arch/cris/include/arch-v32/arch/cache.h
2201 +++ b/arch/cris/include/arch-v32/arch/cache.h
2202 @@ -1,11 +1,12 @@
2203 #ifndef _ASM_CRIS_ARCH_CACHE_H
2204 #define _ASM_CRIS_ARCH_CACHE_H
2205
2206 +#include <linux/const.h>
2207 #include <arch/hwregs/dma.h>
2208
2209 /* A cache-line is 32 bytes. */
2210 -#define L1_CACHE_BYTES 32
2211 #define L1_CACHE_SHIFT 5
2212 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2213
2214 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
2215
2216 diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
2217 index b86329d..6709906 100644
2218 --- a/arch/frv/include/asm/atomic.h
2219 +++ b/arch/frv/include/asm/atomic.h
2220 @@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
2221 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
2222 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
2223
2224 +#define atomic64_read_unchecked(v) atomic64_read(v)
2225 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2226 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2227 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2228 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2229 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2230 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2231 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2232 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2233 +
2234 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
2235 {
2236 int c, old;
2237 diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
2238 index 2797163..c2a401d 100644
2239 --- a/arch/frv/include/asm/cache.h
2240 +++ b/arch/frv/include/asm/cache.h
2241 @@ -12,10 +12,11 @@
2242 #ifndef __ASM_CACHE_H
2243 #define __ASM_CACHE_H
2244
2245 +#include <linux/const.h>
2246
2247 /* bytes per L1 cache line */
2248 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
2249 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2250 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2251
2252 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2253 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2254 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
2255 index f8e16b2..c73ff79 100644
2256 --- a/arch/frv/include/asm/kmap_types.h
2257 +++ b/arch/frv/include/asm/kmap_types.h
2258 @@ -23,6 +23,7 @@ enum km_type {
2259 KM_IRQ1,
2260 KM_SOFTIRQ0,
2261 KM_SOFTIRQ1,
2262 + KM_CLEARPAGE,
2263 KM_TYPE_NR
2264 };
2265
2266 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
2267 index 385fd30..6c3d97e 100644
2268 --- a/arch/frv/mm/elf-fdpic.c
2269 +++ b/arch/frv/mm/elf-fdpic.c
2270 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2271 if (addr) {
2272 addr = PAGE_ALIGN(addr);
2273 vma = find_vma(current->mm, addr);
2274 - if (TASK_SIZE - len >= addr &&
2275 - (!vma || addr + len <= vma->vm_start))
2276 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2277 goto success;
2278 }
2279
2280 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2281 for (; vma; vma = vma->vm_next) {
2282 if (addr > limit)
2283 break;
2284 - if (addr + len <= vma->vm_start)
2285 + if (check_heap_stack_gap(vma, addr, len))
2286 goto success;
2287 addr = vma->vm_end;
2288 }
2289 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
2290 for (; vma; vma = vma->vm_next) {
2291 if (addr > limit)
2292 break;
2293 - if (addr + len <= vma->vm_start)
2294 + if (check_heap_stack_gap(vma, addr, len))
2295 goto success;
2296 addr = vma->vm_end;
2297 }
2298 diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h
2299 index c635028..6d9445a 100644
2300 --- a/arch/h8300/include/asm/cache.h
2301 +++ b/arch/h8300/include/asm/cache.h
2302 @@ -1,8 +1,10 @@
2303 #ifndef __ARCH_H8300_CACHE_H
2304 #define __ARCH_H8300_CACHE_H
2305
2306 +#include <linux/const.h>
2307 +
2308 /* bytes per L1 cache line */
2309 -#define L1_CACHE_BYTES 4
2310 +#define L1_CACHE_BYTES _AC(4,UL)
2311
2312 /* m68k-elf-gcc 2.95.2 doesn't like these */
2313
2314 diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
2315 index 0f01de2..d37d309 100644
2316 --- a/arch/hexagon/include/asm/cache.h
2317 +++ b/arch/hexagon/include/asm/cache.h
2318 @@ -21,9 +21,11 @@
2319 #ifndef __ASM_CACHE_H
2320 #define __ASM_CACHE_H
2321
2322 +#include <linux/const.h>
2323 +
2324 /* Bytes per L1 cache line */
2325 -#define L1_CACHE_SHIFT (5)
2326 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2327 +#define L1_CACHE_SHIFT 5
2328 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2329
2330 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
2331 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
2332 diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
2333 index 7d91166..88ab87e 100644
2334 --- a/arch/ia64/include/asm/atomic.h
2335 +++ b/arch/ia64/include/asm/atomic.h
2336 @@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
2337 #define atomic64_inc(v) atomic64_add(1, (v))
2338 #define atomic64_dec(v) atomic64_sub(1, (v))
2339
2340 +#define atomic64_read_unchecked(v) atomic64_read(v)
2341 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2342 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2343 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2344 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2345 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2346 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2347 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2348 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2349 +
2350 /* Atomic operations are already serializing */
2351 #define smp_mb__before_atomic_dec() barrier()
2352 #define smp_mb__after_atomic_dec() barrier()
2353 diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
2354 index 988254a..e1ee885 100644
2355 --- a/arch/ia64/include/asm/cache.h
2356 +++ b/arch/ia64/include/asm/cache.h
2357 @@ -1,6 +1,7 @@
2358 #ifndef _ASM_IA64_CACHE_H
2359 #define _ASM_IA64_CACHE_H
2360
2361 +#include <linux/const.h>
2362
2363 /*
2364 * Copyright (C) 1998-2000 Hewlett-Packard Co
2365 @@ -9,7 +10,7 @@
2366
2367 /* Bytes per L1 (data) cache line. */
2368 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
2369 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2370 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2371
2372 #ifdef CONFIG_SMP
2373 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2374 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
2375 index b5298eb..67c6e62 100644
2376 --- a/arch/ia64/include/asm/elf.h
2377 +++ b/arch/ia64/include/asm/elf.h
2378 @@ -42,6 +42,13 @@
2379 */
2380 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
2381
2382 +#ifdef CONFIG_PAX_ASLR
2383 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
2384 +
2385 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2386 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2387 +#endif
2388 +
2389 #define PT_IA_64_UNWIND 0x70000001
2390
2391 /* IA-64 relocations: */
2392 diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
2393 index 96a8d92..617a1cf 100644
2394 --- a/arch/ia64/include/asm/pgalloc.h
2395 +++ b/arch/ia64/include/asm/pgalloc.h
2396 @@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
2397 pgd_val(*pgd_entry) = __pa(pud);
2398 }
2399
2400 +static inline void
2401 +pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
2402 +{
2403 + pgd_populate(mm, pgd_entry, pud);
2404 +}
2405 +
2406 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
2407 {
2408 return quicklist_alloc(0, GFP_KERNEL, NULL);
2409 @@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
2410 pud_val(*pud_entry) = __pa(pmd);
2411 }
2412
2413 +static inline void
2414 +pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
2415 +{
2416 + pud_populate(mm, pud_entry, pmd);
2417 +}
2418 +
2419 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
2420 {
2421 return quicklist_alloc(0, GFP_KERNEL, NULL);
2422 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
2423 index 815810c..d60bd4c 100644
2424 --- a/arch/ia64/include/asm/pgtable.h
2425 +++ b/arch/ia64/include/asm/pgtable.h
2426 @@ -12,7 +12,7 @@
2427 * David Mosberger-Tang <davidm@hpl.hp.com>
2428 */
2429
2430 -
2431 +#include <linux/const.h>
2432 #include <asm/mman.h>
2433 #include <asm/page.h>
2434 #include <asm/processor.h>
2435 @@ -142,6 +142,17 @@
2436 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2437 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2438 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
2439 +
2440 +#ifdef CONFIG_PAX_PAGEEXEC
2441 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
2442 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2443 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2444 +#else
2445 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
2446 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
2447 +# define PAGE_COPY_NOEXEC PAGE_COPY
2448 +#endif
2449 +
2450 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
2451 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
2452 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
2453 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
2454 index 54ff557..70c88b7 100644
2455 --- a/arch/ia64/include/asm/spinlock.h
2456 +++ b/arch/ia64/include/asm/spinlock.h
2457 @@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
2458 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
2459
2460 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
2461 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
2462 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
2463 }
2464
2465 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
2466 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
2467 index 449c8c0..432a3d2 100644
2468 --- a/arch/ia64/include/asm/uaccess.h
2469 +++ b/arch/ia64/include/asm/uaccess.h
2470 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2471 const void *__cu_from = (from); \
2472 long __cu_len = (n); \
2473 \
2474 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
2475 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
2476 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
2477 __cu_len; \
2478 })
2479 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
2480 long __cu_len = (n); \
2481 \
2482 __chk_user_ptr(__cu_from); \
2483 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
2484 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
2485 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
2486 __cu_len; \
2487 })
2488 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
2489 index 24603be..948052d 100644
2490 --- a/arch/ia64/kernel/module.c
2491 +++ b/arch/ia64/kernel/module.c
2492 @@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
2493 void
2494 module_free (struct module *mod, void *module_region)
2495 {
2496 - if (mod && mod->arch.init_unw_table &&
2497 - module_region == mod->module_init) {
2498 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
2499 unw_remove_unwind_table(mod->arch.init_unw_table);
2500 mod->arch.init_unw_table = NULL;
2501 }
2502 @@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
2503 }
2504
2505 static inline int
2506 +in_init_rx (const struct module *mod, uint64_t addr)
2507 +{
2508 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
2509 +}
2510 +
2511 +static inline int
2512 +in_init_rw (const struct module *mod, uint64_t addr)
2513 +{
2514 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
2515 +}
2516 +
2517 +static inline int
2518 in_init (const struct module *mod, uint64_t addr)
2519 {
2520 - return addr - (uint64_t) mod->module_init < mod->init_size;
2521 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
2522 +}
2523 +
2524 +static inline int
2525 +in_core_rx (const struct module *mod, uint64_t addr)
2526 +{
2527 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
2528 +}
2529 +
2530 +static inline int
2531 +in_core_rw (const struct module *mod, uint64_t addr)
2532 +{
2533 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
2534 }
2535
2536 static inline int
2537 in_core (const struct module *mod, uint64_t addr)
2538 {
2539 - return addr - (uint64_t) mod->module_core < mod->core_size;
2540 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
2541 }
2542
2543 static inline int
2544 @@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
2545 break;
2546
2547 case RV_BDREL:
2548 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
2549 + if (in_init_rx(mod, val))
2550 + val -= (uint64_t) mod->module_init_rx;
2551 + else if (in_init_rw(mod, val))
2552 + val -= (uint64_t) mod->module_init_rw;
2553 + else if (in_core_rx(mod, val))
2554 + val -= (uint64_t) mod->module_core_rx;
2555 + else if (in_core_rw(mod, val))
2556 + val -= (uint64_t) mod->module_core_rw;
2557 break;
2558
2559 case RV_LTV:
2560 @@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
2561 * addresses have been selected...
2562 */
2563 uint64_t gp;
2564 - if (mod->core_size > MAX_LTOFF)
2565 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
2566 /*
2567 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
2568 * at the end of the module.
2569 */
2570 - gp = mod->core_size - MAX_LTOFF / 2;
2571 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
2572 else
2573 - gp = mod->core_size / 2;
2574 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
2575 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
2576 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
2577 mod->arch.gp = gp;
2578 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
2579 }
2580 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
2581 index 609d500..7dde2a8 100644
2582 --- a/arch/ia64/kernel/sys_ia64.c
2583 +++ b/arch/ia64/kernel/sys_ia64.c
2584 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2585 if (REGION_NUMBER(addr) == RGN_HPAGE)
2586 addr = 0;
2587 #endif
2588 +
2589 +#ifdef CONFIG_PAX_RANDMMAP
2590 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2591 + addr = mm->free_area_cache;
2592 + else
2593 +#endif
2594 +
2595 if (!addr)
2596 addr = mm->free_area_cache;
2597
2598 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
2599 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
2600 /* At this point: (!vma || addr < vma->vm_end). */
2601 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
2602 - if (start_addr != TASK_UNMAPPED_BASE) {
2603 + if (start_addr != mm->mmap_base) {
2604 /* Start a new search --- just in case we missed some holes. */
2605 - addr = TASK_UNMAPPED_BASE;
2606 + addr = mm->mmap_base;
2607 goto full_search;
2608 }
2609 return -ENOMEM;
2610 }
2611 - if (!vma || addr + len <= vma->vm_start) {
2612 + if (check_heap_stack_gap(vma, addr, len)) {
2613 /* Remember the address where we stopped this search: */
2614 mm->free_area_cache = addr + len;
2615 return addr;
2616 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
2617 index 0ccb28f..8992469 100644
2618 --- a/arch/ia64/kernel/vmlinux.lds.S
2619 +++ b/arch/ia64/kernel/vmlinux.lds.S
2620 @@ -198,7 +198,7 @@ SECTIONS {
2621 /* Per-cpu data: */
2622 . = ALIGN(PERCPU_PAGE_SIZE);
2623 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
2624 - __phys_per_cpu_start = __per_cpu_load;
2625 + __phys_per_cpu_start = per_cpu_load;
2626 /*
2627 * ensure percpu data fits
2628 * into percpu page size
2629 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
2630 index 02d29c2..ea893df 100644
2631 --- a/arch/ia64/mm/fault.c
2632 +++ b/arch/ia64/mm/fault.c
2633 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
2634 return pte_present(pte);
2635 }
2636
2637 +#ifdef CONFIG_PAX_PAGEEXEC
2638 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2639 +{
2640 + unsigned long i;
2641 +
2642 + printk(KERN_ERR "PAX: bytes at PC: ");
2643 + for (i = 0; i < 8; i++) {
2644 + unsigned int c;
2645 + if (get_user(c, (unsigned int *)pc+i))
2646 + printk(KERN_CONT "???????? ");
2647 + else
2648 + printk(KERN_CONT "%08x ", c);
2649 + }
2650 + printk("\n");
2651 +}
2652 +#endif
2653 +
2654 void __kprobes
2655 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
2656 {
2657 @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
2658 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
2659 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
2660
2661 - if ((vma->vm_flags & mask) != mask)
2662 + if ((vma->vm_flags & mask) != mask) {
2663 +
2664 +#ifdef CONFIG_PAX_PAGEEXEC
2665 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
2666 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
2667 + goto bad_area;
2668 +
2669 + up_read(&mm->mmap_sem);
2670 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
2671 + do_group_exit(SIGKILL);
2672 + }
2673 +#endif
2674 +
2675 goto bad_area;
2676
2677 + }
2678 +
2679 /*
2680 * If for any reason at all we couldn't handle the fault, make
2681 * sure we exit gracefully rather than endlessly redo the
2682 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
2683 index 5ca674b..e0e1b70 100644
2684 --- a/arch/ia64/mm/hugetlbpage.c
2685 +++ b/arch/ia64/mm/hugetlbpage.c
2686 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
2687 /* At this point: (!vmm || addr < vmm->vm_end). */
2688 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
2689 return -ENOMEM;
2690 - if (!vmm || (addr + len) <= vmm->vm_start)
2691 + if (check_heap_stack_gap(vmm, addr, len))
2692 return addr;
2693 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
2694 }
2695 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
2696 index 0eab454..bd794f2 100644
2697 --- a/arch/ia64/mm/init.c
2698 +++ b/arch/ia64/mm/init.c
2699 @@ -120,6 +120,19 @@ ia64_init_addr_space (void)
2700 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
2701 vma->vm_end = vma->vm_start + PAGE_SIZE;
2702 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
2703 +
2704 +#ifdef CONFIG_PAX_PAGEEXEC
2705 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
2706 + vma->vm_flags &= ~VM_EXEC;
2707 +
2708 +#ifdef CONFIG_PAX_MPROTECT
2709 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
2710 + vma->vm_flags &= ~VM_MAYEXEC;
2711 +#endif
2712 +
2713 + }
2714 +#endif
2715 +
2716 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2717 down_write(&current->mm->mmap_sem);
2718 if (insert_vm_struct(current->mm, vma)) {
2719 diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
2720 index 40b3ee9..8c2c112 100644
2721 --- a/arch/m32r/include/asm/cache.h
2722 +++ b/arch/m32r/include/asm/cache.h
2723 @@ -1,8 +1,10 @@
2724 #ifndef _ASM_M32R_CACHE_H
2725 #define _ASM_M32R_CACHE_H
2726
2727 +#include <linux/const.h>
2728 +
2729 /* L1 cache line size */
2730 #define L1_CACHE_SHIFT 4
2731 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2732 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2733
2734 #endif /* _ASM_M32R_CACHE_H */
2735 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
2736 index 82abd15..d95ae5d 100644
2737 --- a/arch/m32r/lib/usercopy.c
2738 +++ b/arch/m32r/lib/usercopy.c
2739 @@ -14,6 +14,9 @@
2740 unsigned long
2741 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2742 {
2743 + if ((long)n < 0)
2744 + return n;
2745 +
2746 prefetch(from);
2747 if (access_ok(VERIFY_WRITE, to, n))
2748 __copy_user(to,from,n);
2749 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2750 unsigned long
2751 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
2752 {
2753 + if ((long)n < 0)
2754 + return n;
2755 +
2756 prefetchw(to);
2757 if (access_ok(VERIFY_READ, from, n))
2758 __copy_user_zeroing(to,from,n);
2759 diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
2760 index 0395c51..5f26031 100644
2761 --- a/arch/m68k/include/asm/cache.h
2762 +++ b/arch/m68k/include/asm/cache.h
2763 @@ -4,9 +4,11 @@
2764 #ifndef __ARCH_M68K_CACHE_H
2765 #define __ARCH_M68K_CACHE_H
2766
2767 +#include <linux/const.h>
2768 +
2769 /* bytes per L1 cache line */
2770 #define L1_CACHE_SHIFT 4
2771 -#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
2772 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2773
2774 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2775
2776 diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
2777 index 4efe96a..60e8699 100644
2778 --- a/arch/microblaze/include/asm/cache.h
2779 +++ b/arch/microblaze/include/asm/cache.h
2780 @@ -13,11 +13,12 @@
2781 #ifndef _ASM_MICROBLAZE_CACHE_H
2782 #define _ASM_MICROBLAZE_CACHE_H
2783
2784 +#include <linux/const.h>
2785 #include <asm/registers.h>
2786
2787 #define L1_CACHE_SHIFT 5
2788 /* word-granular cache in microblaze */
2789 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2790 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2791
2792 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2793
2794 diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
2795 index 3f4c5cb..3439c6e 100644
2796 --- a/arch/mips/include/asm/atomic.h
2797 +++ b/arch/mips/include/asm/atomic.h
2798 @@ -21,6 +21,10 @@
2799 #include <asm/cmpxchg.h>
2800 #include <asm/war.h>
2801
2802 +#ifdef CONFIG_GENERIC_ATOMIC64
2803 +#include <asm-generic/atomic64.h>
2804 +#endif
2805 +
2806 #define ATOMIC_INIT(i) { (i) }
2807
2808 /*
2809 @@ -765,6 +769,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2810 */
2811 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
2812
2813 +#define atomic64_read_unchecked(v) atomic64_read(v)
2814 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2815 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2816 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2817 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2818 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
2819 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2820 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
2821 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2822 +
2823 #endif /* CONFIG_64BIT */
2824
2825 /*
2826 diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
2827 index b4db69f..8f3b093 100644
2828 --- a/arch/mips/include/asm/cache.h
2829 +++ b/arch/mips/include/asm/cache.h
2830 @@ -9,10 +9,11 @@
2831 #ifndef _ASM_CACHE_H
2832 #define _ASM_CACHE_H
2833
2834 +#include <linux/const.h>
2835 #include <kmalloc.h>
2836
2837 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
2838 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2839 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2840
2841 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2842 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2843 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
2844 index 455c0ac..ad65fbe 100644
2845 --- a/arch/mips/include/asm/elf.h
2846 +++ b/arch/mips/include/asm/elf.h
2847 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
2848 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2849 #endif
2850
2851 +#ifdef CONFIG_PAX_ASLR
2852 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2853 +
2854 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2855 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2856 +#endif
2857 +
2858 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2859 struct linux_binprm;
2860 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2861 int uses_interp);
2862
2863 -struct mm_struct;
2864 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2865 -#define arch_randomize_brk arch_randomize_brk
2866 -
2867 #endif /* _ASM_ELF_H */
2868 diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
2869 index c1f6afa..38cc6e9 100644
2870 --- a/arch/mips/include/asm/exec.h
2871 +++ b/arch/mips/include/asm/exec.h
2872 @@ -12,6 +12,6 @@
2873 #ifndef _ASM_EXEC_H
2874 #define _ASM_EXEC_H
2875
2876 -extern unsigned long arch_align_stack(unsigned long sp);
2877 +#define arch_align_stack(x) ((x) & ~0xfUL)
2878
2879 #endif /* _ASM_EXEC_H */
2880 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
2881 index da9bd7d..91aa7ab 100644
2882 --- a/arch/mips/include/asm/page.h
2883 +++ b/arch/mips/include/asm/page.h
2884 @@ -98,7 +98,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
2885 #ifdef CONFIG_CPU_MIPS32
2886 typedef struct { unsigned long pte_low, pte_high; } pte_t;
2887 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
2888 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
2889 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
2890 #else
2891 typedef struct { unsigned long long pte; } pte_t;
2892 #define pte_val(x) ((x).pte)
2893 diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
2894 index 881d18b..cea38bc 100644
2895 --- a/arch/mips/include/asm/pgalloc.h
2896 +++ b/arch/mips/include/asm/pgalloc.h
2897 @@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2898 {
2899 set_pud(pud, __pud((unsigned long)pmd));
2900 }
2901 +
2902 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2903 +{
2904 + pud_populate(mm, pud, pmd);
2905 +}
2906 #endif
2907
2908 /*
2909 diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
2910 index 0d85d8e..ec71487 100644
2911 --- a/arch/mips/include/asm/thread_info.h
2912 +++ b/arch/mips/include/asm/thread_info.h
2913 @@ -123,6 +123,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
2914 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
2915 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
2916 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
2917 +/* li takes a 32bit immediate */
2918 +#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
2919 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
2920
2921 #ifdef CONFIG_MIPS32_O32
2922 @@ -146,15 +148,18 @@ register struct thread_info *__current_thread_info __asm__("$28");
2923 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
2924 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
2925 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
2926 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
2927 +
2928 +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
2929
2930 /* work to do in syscall_trace_leave() */
2931 -#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
2932 +#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
2933
2934 /* work to do on interrupt/exception return */
2935 #define _TIF_WORK_MASK (0x0000ffef & \
2936 ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT))
2937 /* work to do on any return to u-space */
2938 -#define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP)
2939 +#define _TIF_ALLWORK_MASK ((0x8000ffff & ~_TIF_SECCOMP) | _TIF_GRSEC_SETXID)
2940
2941 #endif /* __KERNEL__ */
2942
2943 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
2944 index 9fdd8bc..4bd7f1a 100644
2945 --- a/arch/mips/kernel/binfmt_elfn32.c
2946 +++ b/arch/mips/kernel/binfmt_elfn32.c
2947 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2948 #undef ELF_ET_DYN_BASE
2949 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2950
2951 +#ifdef CONFIG_PAX_ASLR
2952 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2953 +
2954 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2955 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2956 +#endif
2957 +
2958 #include <asm/processor.h>
2959 #include <linux/module.h>
2960 #include <linux/elfcore.h>
2961 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2962 index ff44823..97f8906 100644
2963 --- a/arch/mips/kernel/binfmt_elfo32.c
2964 +++ b/arch/mips/kernel/binfmt_elfo32.c
2965 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2966 #undef ELF_ET_DYN_BASE
2967 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2968
2969 +#ifdef CONFIG_PAX_ASLR
2970 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
2971 +
2972 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2973 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2974 +#endif
2975 +
2976 #include <asm/processor.h>
2977
2978 /*
2979 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2980 index e9a5fd7..378809a 100644
2981 --- a/arch/mips/kernel/process.c
2982 +++ b/arch/mips/kernel/process.c
2983 @@ -480,15 +480,3 @@ unsigned long get_wchan(struct task_struct *task)
2984 out:
2985 return pc;
2986 }
2987 -
2988 -/*
2989 - * Don't forget that the stack pointer must be aligned on a 8 bytes
2990 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2991 - */
2992 -unsigned long arch_align_stack(unsigned long sp)
2993 -{
2994 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2995 - sp -= get_random_int() & ~PAGE_MASK;
2996 -
2997 - return sp & ALMASK;
2998 -}
2999 diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
3000 index 7c24c29..e2f1981 100644
3001 --- a/arch/mips/kernel/ptrace.c
3002 +++ b/arch/mips/kernel/ptrace.c
3003 @@ -528,6 +528,10 @@ static inline int audit_arch(void)
3004 return arch;
3005 }
3006
3007 +#ifdef CONFIG_GRKERNSEC_SETXID
3008 +extern void gr_delayed_cred_worker(void);
3009 +#endif
3010 +
3011 /*
3012 * Notification of system call entry/exit
3013 * - triggered by current->work.syscall_trace
3014 @@ -537,6 +541,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
3015 /* do the secure computing check first */
3016 secure_computing(regs->regs[2]);
3017
3018 +#ifdef CONFIG_GRKERNSEC_SETXID
3019 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3020 + gr_delayed_cred_worker();
3021 +#endif
3022 +
3023 if (!(current->ptrace & PT_PTRACED))
3024 goto out;
3025
3026 diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
3027 index a632bc1..0b77c7c 100644
3028 --- a/arch/mips/kernel/scall32-o32.S
3029 +++ b/arch/mips/kernel/scall32-o32.S
3030 @@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
3031
3032 stack_done:
3033 lw t0, TI_FLAGS($28) # syscall tracing enabled?
3034 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3035 + li t1, _TIF_SYSCALL_WORK
3036 and t0, t1
3037 bnez t0, syscall_trace_entry # -> yes
3038
3039 diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
3040 index 3b5a5e9..e1ee86d 100644
3041 --- a/arch/mips/kernel/scall64-64.S
3042 +++ b/arch/mips/kernel/scall64-64.S
3043 @@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
3044
3045 sd a3, PT_R26(sp) # save a3 for syscall restarting
3046
3047 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3048 + li t1, _TIF_SYSCALL_WORK
3049 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3050 and t0, t1, t0
3051 bnez t0, syscall_trace_entry
3052 diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
3053 index 6be6f70..1859577 100644
3054 --- a/arch/mips/kernel/scall64-n32.S
3055 +++ b/arch/mips/kernel/scall64-n32.S
3056 @@ -53,7 +53,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
3057
3058 sd a3, PT_R26(sp) # save a3 for syscall restarting
3059
3060 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3061 + li t1, _TIF_SYSCALL_WORK
3062 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3063 and t0, t1, t0
3064 bnez t0, n32_syscall_trace_entry
3065 diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
3066 index 5422855..74e63a3 100644
3067 --- a/arch/mips/kernel/scall64-o32.S
3068 +++ b/arch/mips/kernel/scall64-o32.S
3069 @@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
3070 PTR 4b, bad_stack
3071 .previous
3072
3073 - li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3074 + li t1, _TIF_SYSCALL_WORK
3075 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3076 and t0, t1, t0
3077 bnez t0, trace_a_syscall
3078 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
3079 index c14f6df..537e729 100644
3080 --- a/arch/mips/mm/fault.c
3081 +++ b/arch/mips/mm/fault.c
3082 @@ -27,6 +27,23 @@
3083 #include <asm/highmem.h> /* For VMALLOC_END */
3084 #include <linux/kdebug.h>
3085
3086 +#ifdef CONFIG_PAX_PAGEEXEC
3087 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3088 +{
3089 + unsigned long i;
3090 +
3091 + printk(KERN_ERR "PAX: bytes at PC: ");
3092 + for (i = 0; i < 5; i++) {
3093 + unsigned int c;
3094 + if (get_user(c, (unsigned int *)pc+i))
3095 + printk(KERN_CONT "???????? ");
3096 + else
3097 + printk(KERN_CONT "%08x ", c);
3098 + }
3099 + printk("\n");
3100 +}
3101 +#endif
3102 +
3103 /*
3104 * This routine handles page faults. It determines the address,
3105 * and the problem, and then passes it off to one of the appropriate
3106 diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
3107 index 302d779..7d35bf8 100644
3108 --- a/arch/mips/mm/mmap.c
3109 +++ b/arch/mips/mm/mmap.c
3110 @@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3111 do_color_align = 1;
3112
3113 /* requesting a specific address */
3114 +
3115 +#ifdef CONFIG_PAX_RANDMMAP
3116 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
3117 +#endif
3118 +
3119 if (addr) {
3120 if (do_color_align)
3121 addr = COLOUR_ALIGN(addr, pgoff);
3122 @@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3123 addr = PAGE_ALIGN(addr);
3124
3125 vma = find_vma(mm, addr);
3126 - if (TASK_SIZE - len >= addr &&
3127 - (!vma || addr + len <= vma->vm_start))
3128 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
3129 return addr;
3130 }
3131
3132 @@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3133 /* At this point: (!vma || addr < vma->vm_end). */
3134 if (TASK_SIZE - len < addr)
3135 return -ENOMEM;
3136 - if (!vma || addr + len <= vma->vm_start)
3137 + if (check_heap_stack_gap(vmm, addr, len))
3138 return addr;
3139 addr = vma->vm_end;
3140 if (do_color_align)
3141 @@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3142 /* make sure it can fit in the remaining address space */
3143 if (likely(addr > len)) {
3144 vma = find_vma(mm, addr - len);
3145 - if (!vma || addr <= vma->vm_start) {
3146 + if (check_heap_stack_gap(vmm, addr - len, len))
3147 /* cache the address as a hint for next time */
3148 return mm->free_area_cache = addr - len;
3149 }
3150 @@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
3151 * return with success:
3152 */
3153 vma = find_vma(mm, addr);
3154 - if (likely(!vma || addr + len <= vma->vm_start)) {
3155 + if (check_heap_stack_gap(vmm, addr, len)) {
3156 /* cache the address as a hint for next time */
3157 return mm->free_area_cache = addr;
3158 }
3159 @@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3160 mm->unmap_area = arch_unmap_area_topdown;
3161 }
3162 }
3163 -
3164 -static inline unsigned long brk_rnd(void)
3165 -{
3166 - unsigned long rnd = get_random_int();
3167 -
3168 - rnd = rnd << PAGE_SHIFT;
3169 - /* 8MB for 32bit, 256MB for 64bit */
3170 - if (TASK_IS_32BIT_ADDR)
3171 - rnd = rnd & 0x7ffffful;
3172 - else
3173 - rnd = rnd & 0xffffffful;
3174 -
3175 - return rnd;
3176 -}
3177 -
3178 -unsigned long arch_randomize_brk(struct mm_struct *mm)
3179 -{
3180 - unsigned long base = mm->brk;
3181 - unsigned long ret;
3182 -
3183 - ret = PAGE_ALIGN(base + brk_rnd());
3184 -
3185 - if (ret < mm->brk)
3186 - return mm->brk;
3187 -
3188 - return ret;
3189 -}
3190 diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
3191 index 967d144..db12197 100644
3192 --- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
3193 +++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
3194 @@ -11,12 +11,14 @@
3195 #ifndef _ASM_PROC_CACHE_H
3196 #define _ASM_PROC_CACHE_H
3197
3198 +#include <linux/const.h>
3199 +
3200 /* L1 cache */
3201
3202 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
3203 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
3204 -#define L1_CACHE_BYTES 16 /* bytes per entry */
3205 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
3206 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
3207 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
3208
3209 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3210 diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3211 index bcb5df2..84fabd2 100644
3212 --- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3213 +++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3214 @@ -16,13 +16,15 @@
3215 #ifndef _ASM_PROC_CACHE_H
3216 #define _ASM_PROC_CACHE_H
3217
3218 +#include <linux/const.h>
3219 +
3220 /*
3221 * L1 cache
3222 */
3223 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
3224 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
3225 -#define L1_CACHE_BYTES 32 /* bytes per entry */
3226 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
3227 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
3228 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
3229
3230 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3231 diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
3232 index 4ce7a01..449202a 100644
3233 --- a/arch/openrisc/include/asm/cache.h
3234 +++ b/arch/openrisc/include/asm/cache.h
3235 @@ -19,11 +19,13 @@
3236 #ifndef __ASM_OPENRISC_CACHE_H
3237 #define __ASM_OPENRISC_CACHE_H
3238
3239 +#include <linux/const.h>
3240 +
3241 /* FIXME: How can we replace these with values from the CPU...
3242 * they shouldn't be hard-coded!
3243 */
3244
3245 -#define L1_CACHE_BYTES 16
3246 #define L1_CACHE_SHIFT 4
3247 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3248
3249 #endif /* __ASM_OPENRISC_CACHE_H */
3250 diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
3251 index 6c6defc..d30653d 100644
3252 --- a/arch/parisc/include/asm/atomic.h
3253 +++ b/arch/parisc/include/asm/atomic.h
3254 @@ -229,6 +229,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
3255
3256 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3257
3258 +#define atomic64_read_unchecked(v) atomic64_read(v)
3259 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3260 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3261 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3262 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3263 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
3264 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3265 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
3266 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3267 +
3268 #endif /* !CONFIG_64BIT */
3269
3270
3271 diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
3272 index 47f11c7..3420df2 100644
3273 --- a/arch/parisc/include/asm/cache.h
3274 +++ b/arch/parisc/include/asm/cache.h
3275 @@ -5,6 +5,7 @@
3276 #ifndef __ARCH_PARISC_CACHE_H
3277 #define __ARCH_PARISC_CACHE_H
3278
3279 +#include <linux/const.h>
3280
3281 /*
3282 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
3283 @@ -15,13 +16,13 @@
3284 * just ruin performance.
3285 */
3286 #ifdef CONFIG_PA20
3287 -#define L1_CACHE_BYTES 64
3288 #define L1_CACHE_SHIFT 6
3289 #else
3290 -#define L1_CACHE_BYTES 32
3291 #define L1_CACHE_SHIFT 5
3292 #endif
3293
3294 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3295 +
3296 #ifndef __ASSEMBLY__
3297
3298 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3299 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
3300 index 19f6cb1..6c78cf2 100644
3301 --- a/arch/parisc/include/asm/elf.h
3302 +++ b/arch/parisc/include/asm/elf.h
3303 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
3304
3305 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
3306
3307 +#ifdef CONFIG_PAX_ASLR
3308 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3309 +
3310 +#define PAX_DELTA_MMAP_LEN 16
3311 +#define PAX_DELTA_STACK_LEN 16
3312 +#endif
3313 +
3314 /* This yields a mask that user programs can use to figure out what
3315 instruction set this CPU supports. This could be done in user space,
3316 but it's not easy, and we've already done it here. */
3317 diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
3318 index fc987a1..6e068ef 100644
3319 --- a/arch/parisc/include/asm/pgalloc.h
3320 +++ b/arch/parisc/include/asm/pgalloc.h
3321 @@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
3322 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
3323 }
3324
3325 +static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
3326 +{
3327 + pgd_populate(mm, pgd, pmd);
3328 +}
3329 +
3330 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
3331 {
3332 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
3333 @@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
3334 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
3335 #define pmd_free(mm, x) do { } while (0)
3336 #define pgd_populate(mm, pmd, pte) BUG()
3337 +#define pgd_populate_kernel(mm, pmd, pte) BUG()
3338
3339 #endif
3340
3341 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
3342 index ee99f23..802b0a1 100644
3343 --- a/arch/parisc/include/asm/pgtable.h
3344 +++ b/arch/parisc/include/asm/pgtable.h
3345 @@ -212,6 +212,17 @@ struct vm_area_struct;
3346 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
3347 #define PAGE_COPY PAGE_EXECREAD
3348 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
3349 +
3350 +#ifdef CONFIG_PAX_PAGEEXEC
3351 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
3352 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3353 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3354 +#else
3355 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3356 +# define PAGE_COPY_NOEXEC PAGE_COPY
3357 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3358 +#endif
3359 +
3360 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
3361 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
3362 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
3363 diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
3364 index 9ac0660..6ed15c4 100644
3365 --- a/arch/parisc/include/asm/uaccess.h
3366 +++ b/arch/parisc/include/asm/uaccess.h
3367 @@ -252,10 +252,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
3368 const void __user *from,
3369 unsigned long n)
3370 {
3371 - int sz = __compiletime_object_size(to);
3372 + size_t sz = __compiletime_object_size(to);
3373 int ret = -EFAULT;
3374
3375 - if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
3376 + if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
3377 ret = __copy_from_user(to, from, n);
3378 else
3379 copy_from_user_overflow();
3380 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
3381 index 5e34ccf..672bc9c 100644
3382 --- a/arch/parisc/kernel/module.c
3383 +++ b/arch/parisc/kernel/module.c
3384 @@ -98,16 +98,38 @@
3385
3386 /* three functions to determine where in the module core
3387 * or init pieces the location is */
3388 +static inline int in_init_rx(struct module *me, void *loc)
3389 +{
3390 + return (loc >= me->module_init_rx &&
3391 + loc < (me->module_init_rx + me->init_size_rx));
3392 +}
3393 +
3394 +static inline int in_init_rw(struct module *me, void *loc)
3395 +{
3396 + return (loc >= me->module_init_rw &&
3397 + loc < (me->module_init_rw + me->init_size_rw));
3398 +}
3399 +
3400 static inline int in_init(struct module *me, void *loc)
3401 {
3402 - return (loc >= me->module_init &&
3403 - loc <= (me->module_init + me->init_size));
3404 + return in_init_rx(me, loc) || in_init_rw(me, loc);
3405 +}
3406 +
3407 +static inline int in_core_rx(struct module *me, void *loc)
3408 +{
3409 + return (loc >= me->module_core_rx &&
3410 + loc < (me->module_core_rx + me->core_size_rx));
3411 +}
3412 +
3413 +static inline int in_core_rw(struct module *me, void *loc)
3414 +{
3415 + return (loc >= me->module_core_rw &&
3416 + loc < (me->module_core_rw + me->core_size_rw));
3417 }
3418
3419 static inline int in_core(struct module *me, void *loc)
3420 {
3421 - return (loc >= me->module_core &&
3422 - loc <= (me->module_core + me->core_size));
3423 + return in_core_rx(me, loc) || in_core_rw(me, loc);
3424 }
3425
3426 static inline int in_local(struct module *me, void *loc)
3427 @@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
3428 }
3429
3430 /* align things a bit */
3431 - me->core_size = ALIGN(me->core_size, 16);
3432 - me->arch.got_offset = me->core_size;
3433 - me->core_size += gots * sizeof(struct got_entry);
3434 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
3435 + me->arch.got_offset = me->core_size_rw;
3436 + me->core_size_rw += gots * sizeof(struct got_entry);
3437
3438 - me->core_size = ALIGN(me->core_size, 16);
3439 - me->arch.fdesc_offset = me->core_size;
3440 - me->core_size += fdescs * sizeof(Elf_Fdesc);
3441 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
3442 + me->arch.fdesc_offset = me->core_size_rw;
3443 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
3444
3445 me->arch.got_max = gots;
3446 me->arch.fdesc_max = fdescs;
3447 @@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3448
3449 BUG_ON(value == 0);
3450
3451 - got = me->module_core + me->arch.got_offset;
3452 + got = me->module_core_rw + me->arch.got_offset;
3453 for (i = 0; got[i].addr; i++)
3454 if (got[i].addr == value)
3455 goto out;
3456 @@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
3457 #ifdef CONFIG_64BIT
3458 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3459 {
3460 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
3461 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
3462
3463 if (!value) {
3464 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
3465 @@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3466
3467 /* Create new one */
3468 fdesc->addr = value;
3469 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3470 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3471 return (Elf_Addr)fdesc;
3472 }
3473 #endif /* CONFIG_64BIT */
3474 @@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
3475
3476 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
3477 end = table + sechdrs[me->arch.unwind_section].sh_size;
3478 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3479 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3480
3481 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
3482 me->arch.unwind_section, table, end, gp);
3483 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
3484 index c9b9322..02d8940 100644
3485 --- a/arch/parisc/kernel/sys_parisc.c
3486 +++ b/arch/parisc/kernel/sys_parisc.c
3487 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
3488 /* At this point: (!vma || addr < vma->vm_end). */
3489 if (TASK_SIZE - len < addr)
3490 return -ENOMEM;
3491 - if (!vma || addr + len <= vma->vm_start)
3492 + if (check_heap_stack_gap(vma, addr, len))
3493 return addr;
3494 addr = vma->vm_end;
3495 }
3496 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
3497 /* At this point: (!vma || addr < vma->vm_end). */
3498 if (TASK_SIZE - len < addr)
3499 return -ENOMEM;
3500 - if (!vma || addr + len <= vma->vm_start)
3501 + if (check_heap_stack_gap(vma, addr, len))
3502 return addr;
3503 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
3504 if (addr < vma->vm_end) /* handle wraparound */
3505 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
3506 if (flags & MAP_FIXED)
3507 return addr;
3508 if (!addr)
3509 - addr = TASK_UNMAPPED_BASE;
3510 + addr = current->mm->mmap_base;
3511
3512 if (filp) {
3513 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
3514 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
3515 index 45ba99f..8e22c33 100644
3516 --- a/arch/parisc/kernel/traps.c
3517 +++ b/arch/parisc/kernel/traps.c
3518 @@ -732,9 +732,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
3519
3520 down_read(&current->mm->mmap_sem);
3521 vma = find_vma(current->mm,regs->iaoq[0]);
3522 - if (vma && (regs->iaoq[0] >= vma->vm_start)
3523 - && (vma->vm_flags & VM_EXEC)) {
3524 -
3525 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
3526 fault_address = regs->iaoq[0];
3527 fault_space = regs->iasq[0];
3528
3529 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
3530 index 18162ce..94de376 100644
3531 --- a/arch/parisc/mm/fault.c
3532 +++ b/arch/parisc/mm/fault.c
3533 @@ -15,6 +15,7 @@
3534 #include <linux/sched.h>
3535 #include <linux/interrupt.h>
3536 #include <linux/module.h>
3537 +#include <linux/unistd.h>
3538
3539 #include <asm/uaccess.h>
3540 #include <asm/traps.h>
3541 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
3542 static unsigned long
3543 parisc_acctyp(unsigned long code, unsigned int inst)
3544 {
3545 - if (code == 6 || code == 16)
3546 + if (code == 6 || code == 7 || code == 16)
3547 return VM_EXEC;
3548
3549 switch (inst & 0xf0000000) {
3550 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
3551 }
3552 #endif
3553
3554 +#ifdef CONFIG_PAX_PAGEEXEC
3555 +/*
3556 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
3557 + *
3558 + * returns 1 when task should be killed
3559 + * 2 when rt_sigreturn trampoline was detected
3560 + * 3 when unpatched PLT trampoline was detected
3561 + */
3562 +static int pax_handle_fetch_fault(struct pt_regs *regs)
3563 +{
3564 +
3565 +#ifdef CONFIG_PAX_EMUPLT
3566 + int err;
3567 +
3568 + do { /* PaX: unpatched PLT emulation */
3569 + unsigned int bl, depwi;
3570 +
3571 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
3572 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
3573 +
3574 + if (err)
3575 + break;
3576 +
3577 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
3578 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
3579 +
3580 + err = get_user(ldw, (unsigned int *)addr);
3581 + err |= get_user(bv, (unsigned int *)(addr+4));
3582 + err |= get_user(ldw2, (unsigned int *)(addr+8));
3583 +
3584 + if (err)
3585 + break;
3586 +
3587 + if (ldw == 0x0E801096U &&
3588 + bv == 0xEAC0C000U &&
3589 + ldw2 == 0x0E881095U)
3590 + {
3591 + unsigned int resolver, map;
3592 +
3593 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
3594 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
3595 + if (err)
3596 + break;
3597 +
3598 + regs->gr[20] = instruction_pointer(regs)+8;
3599 + regs->gr[21] = map;
3600 + regs->gr[22] = resolver;
3601 + regs->iaoq[0] = resolver | 3UL;
3602 + regs->iaoq[1] = regs->iaoq[0] + 4;
3603 + return 3;
3604 + }
3605 + }
3606 + } while (0);
3607 +#endif
3608 +
3609 +#ifdef CONFIG_PAX_EMUTRAMP
3610 +
3611 +#ifndef CONFIG_PAX_EMUSIGRT
3612 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
3613 + return 1;
3614 +#endif
3615 +
3616 + do { /* PaX: rt_sigreturn emulation */
3617 + unsigned int ldi1, ldi2, bel, nop;
3618 +
3619 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
3620 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
3621 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
3622 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
3623 +
3624 + if (err)
3625 + break;
3626 +
3627 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
3628 + ldi2 == 0x3414015AU &&
3629 + bel == 0xE4008200U &&
3630 + nop == 0x08000240U)
3631 + {
3632 + regs->gr[25] = (ldi1 & 2) >> 1;
3633 + regs->gr[20] = __NR_rt_sigreturn;
3634 + regs->gr[31] = regs->iaoq[1] + 16;
3635 + regs->sr[0] = regs->iasq[1];
3636 + regs->iaoq[0] = 0x100UL;
3637 + regs->iaoq[1] = regs->iaoq[0] + 4;
3638 + regs->iasq[0] = regs->sr[2];
3639 + regs->iasq[1] = regs->sr[2];
3640 + return 2;
3641 + }
3642 + } while (0);
3643 +#endif
3644 +
3645 + return 1;
3646 +}
3647 +
3648 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3649 +{
3650 + unsigned long i;
3651 +
3652 + printk(KERN_ERR "PAX: bytes at PC: ");
3653 + for (i = 0; i < 5; i++) {
3654 + unsigned int c;
3655 + if (get_user(c, (unsigned int *)pc+i))
3656 + printk(KERN_CONT "???????? ");
3657 + else
3658 + printk(KERN_CONT "%08x ", c);
3659 + }
3660 + printk("\n");
3661 +}
3662 +#endif
3663 +
3664 int fixup_exception(struct pt_regs *regs)
3665 {
3666 const struct exception_table_entry *fix;
3667 @@ -192,8 +303,33 @@ good_area:
3668
3669 acc_type = parisc_acctyp(code,regs->iir);
3670
3671 - if ((vma->vm_flags & acc_type) != acc_type)
3672 + if ((vma->vm_flags & acc_type) != acc_type) {
3673 +
3674 +#ifdef CONFIG_PAX_PAGEEXEC
3675 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
3676 + (address & ~3UL) == instruction_pointer(regs))
3677 + {
3678 + up_read(&mm->mmap_sem);
3679 + switch (pax_handle_fetch_fault(regs)) {
3680 +
3681 +#ifdef CONFIG_PAX_EMUPLT
3682 + case 3:
3683 + return;
3684 +#endif
3685 +
3686 +#ifdef CONFIG_PAX_EMUTRAMP
3687 + case 2:
3688 + return;
3689 +#endif
3690 +
3691 + }
3692 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
3693 + do_group_exit(SIGKILL);
3694 + }
3695 +#endif
3696 +
3697 goto bad_area;
3698 + }
3699
3700 /*
3701 * If for any reason at all we couldn't handle the fault, make
3702 diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
3703 index da29032..f76c24c 100644
3704 --- a/arch/powerpc/include/asm/atomic.h
3705 +++ b/arch/powerpc/include/asm/atomic.h
3706 @@ -522,6 +522,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
3707 return t1;
3708 }
3709
3710 +#define atomic64_read_unchecked(v) atomic64_read(v)
3711 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3712 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3713 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3714 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3715 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
3716 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3717 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
3718 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3719 +
3720 #endif /* __powerpc64__ */
3721
3722 #endif /* __KERNEL__ */
3723 diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
3724 index 9e495c9..b6878e5 100644
3725 --- a/arch/powerpc/include/asm/cache.h
3726 +++ b/arch/powerpc/include/asm/cache.h
3727 @@ -3,6 +3,7 @@
3728
3729 #ifdef __KERNEL__
3730
3731 +#include <linux/const.h>
3732
3733 /* bytes per L1 cache line */
3734 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
3735 @@ -22,7 +23,7 @@
3736 #define L1_CACHE_SHIFT 7
3737 #endif
3738
3739 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3740 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3741
3742 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3743
3744 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
3745 index 3bf9cca..e7457d0 100644
3746 --- a/arch/powerpc/include/asm/elf.h
3747 +++ b/arch/powerpc/include/asm/elf.h
3748 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
3749 the loader. We need to make sure that it is out of the way of the program
3750 that it will "exec", and that there is sufficient room for the brk. */
3751
3752 -extern unsigned long randomize_et_dyn(unsigned long base);
3753 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
3754 +#define ELF_ET_DYN_BASE (0x20000000)
3755 +
3756 +#ifdef CONFIG_PAX_ASLR
3757 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
3758 +
3759 +#ifdef __powerpc64__
3760 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
3761 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
3762 +#else
3763 +#define PAX_DELTA_MMAP_LEN 15
3764 +#define PAX_DELTA_STACK_LEN 15
3765 +#endif
3766 +#endif
3767
3768 /*
3769 * Our registers are always unsigned longs, whether we're a 32 bit
3770 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
3771 (0x7ff >> (PAGE_SHIFT - 12)) : \
3772 (0x3ffff >> (PAGE_SHIFT - 12)))
3773
3774 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3775 -#define arch_randomize_brk arch_randomize_brk
3776 -
3777 #endif /* __KERNEL__ */
3778
3779 /*
3780 diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
3781 index 8196e9c..d83a9f3 100644
3782 --- a/arch/powerpc/include/asm/exec.h
3783 +++ b/arch/powerpc/include/asm/exec.h
3784 @@ -4,6 +4,6 @@
3785 #ifndef _ASM_POWERPC_EXEC_H
3786 #define _ASM_POWERPC_EXEC_H
3787
3788 -extern unsigned long arch_align_stack(unsigned long sp);
3789 +#define arch_align_stack(x) ((x) & ~0xfUL)
3790
3791 #endif /* _ASM_POWERPC_EXEC_H */
3792 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
3793 index bca8fdc..61e9580 100644
3794 --- a/arch/powerpc/include/asm/kmap_types.h
3795 +++ b/arch/powerpc/include/asm/kmap_types.h
3796 @@ -27,6 +27,7 @@ enum km_type {
3797 KM_PPC_SYNC_PAGE,
3798 KM_PPC_SYNC_ICACHE,
3799 KM_KDB,
3800 + KM_CLEARPAGE,
3801 KM_TYPE_NR
3802 };
3803
3804 diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
3805 index d4a7f64..451de1c 100644
3806 --- a/arch/powerpc/include/asm/mman.h
3807 +++ b/arch/powerpc/include/asm/mman.h
3808 @@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
3809 }
3810 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
3811
3812 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
3813 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
3814 {
3815 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
3816 }
3817 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
3818 index f072e97..b436dee 100644
3819 --- a/arch/powerpc/include/asm/page.h
3820 +++ b/arch/powerpc/include/asm/page.h
3821 @@ -220,8 +220,9 @@ extern long long virt_phys_offset;
3822 * and needs to be executable. This means the whole heap ends
3823 * up being executable.
3824 */
3825 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3826 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3827 +#define VM_DATA_DEFAULT_FLAGS32 \
3828 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3829 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3830
3831 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3832 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3833 @@ -249,6 +250,9 @@ extern long long virt_phys_offset;
3834 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
3835 #endif
3836
3837 +#define ktla_ktva(addr) (addr)
3838 +#define ktva_ktla(addr) (addr)
3839 +
3840 /*
3841 * Use the top bit of the higher-level page table entries to indicate whether
3842 * the entries we point to contain hugepages. This works because we know that
3843 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
3844 index fed85e6..da5c71b 100644
3845 --- a/arch/powerpc/include/asm/page_64.h
3846 +++ b/arch/powerpc/include/asm/page_64.h
3847 @@ -146,15 +146,18 @@ do { \
3848 * stack by default, so in the absence of a PT_GNU_STACK program header
3849 * we turn execute permission off.
3850 */
3851 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3852 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3853 +#define VM_STACK_DEFAULT_FLAGS32 \
3854 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3855 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3856
3857 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3858 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3859
3860 +#ifndef CONFIG_PAX_PAGEEXEC
3861 #define VM_STACK_DEFAULT_FLAGS \
3862 (is_32bit_task() ? \
3863 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
3864 +#endif
3865
3866 #include <asm-generic/getorder.h>
3867
3868 diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
3869 index 292725c..f87ae14 100644
3870 --- a/arch/powerpc/include/asm/pgalloc-64.h
3871 +++ b/arch/powerpc/include/asm/pgalloc-64.h
3872 @@ -50,6 +50,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
3873 #ifndef CONFIG_PPC_64K_PAGES
3874
3875 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
3876 +#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
3877
3878 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
3879 {
3880 @@ -67,6 +68,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3881 pud_set(pud, (unsigned long)pmd);
3882 }
3883
3884 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3885 +{
3886 + pud_populate(mm, pud, pmd);
3887 +}
3888 +
3889 #define pmd_populate(mm, pmd, pte_page) \
3890 pmd_populate_kernel(mm, pmd, page_address(pte_page))
3891 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
3892 @@ -76,6 +82,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3893 #else /* CONFIG_PPC_64K_PAGES */
3894
3895 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
3896 +#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
3897
3898 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
3899 pte_t *pte)
3900 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
3901 index 2e0e411..7899c68 100644
3902 --- a/arch/powerpc/include/asm/pgtable.h
3903 +++ b/arch/powerpc/include/asm/pgtable.h
3904 @@ -2,6 +2,7 @@
3905 #define _ASM_POWERPC_PGTABLE_H
3906 #ifdef __KERNEL__
3907
3908 +#include <linux/const.h>
3909 #ifndef __ASSEMBLY__
3910 #include <asm/processor.h> /* For TASK_SIZE */
3911 #include <asm/mmu.h>
3912 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
3913 index 4aad413..85d86bf 100644
3914 --- a/arch/powerpc/include/asm/pte-hash32.h
3915 +++ b/arch/powerpc/include/asm/pte-hash32.h
3916 @@ -21,6 +21,7 @@
3917 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
3918 #define _PAGE_USER 0x004 /* usermode access allowed */
3919 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
3920 +#define _PAGE_EXEC _PAGE_GUARDED
3921 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
3922 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
3923 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
3924 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
3925 index 9d7f0fb..a28fe69 100644
3926 --- a/arch/powerpc/include/asm/reg.h
3927 +++ b/arch/powerpc/include/asm/reg.h
3928 @@ -212,6 +212,7 @@
3929 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
3930 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
3931 #define DSISR_NOHPTE 0x40000000 /* no translation found */
3932 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
3933 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
3934 #define DSISR_ISSTORE 0x02000000 /* access was a store */
3935 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
3936 diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
3937 index 4a741c7..c8162227b 100644
3938 --- a/arch/powerpc/include/asm/thread_info.h
3939 +++ b/arch/powerpc/include/asm/thread_info.h
3940 @@ -104,12 +104,14 @@ static inline struct thread_info *current_thread_info(void)
3941 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
3942 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
3943 #define TIF_SINGLESTEP 8 /* singlestepping active */
3944 -#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
3945 #define TIF_SECCOMP 10 /* secure computing */
3946 #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
3947 #define TIF_NOERROR 12 /* Force successful syscall return */
3948 #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
3949 #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
3950 +#define TIF_MEMDIE 16 /* is terminating due to OOM killer */
3951 +/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
3952 +#define TIF_GRSEC_SETXID 9 /* update credentials on syscall entry/exit */
3953
3954 /* as above, but as bit values */
3955 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
3956 @@ -127,8 +129,11 @@ static inline struct thread_info *current_thread_info(void)
3957 #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
3958 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
3959 #define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
3960 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
3961 +
3962 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
3963 - _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
3964 + _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT \
3965 + _TIF_GRSEC_SETXID)
3966
3967 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
3968 _TIF_NOTIFY_RESUME)
3969 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
3970 index bd0fb84..a42a14b 100644
3971 --- a/arch/powerpc/include/asm/uaccess.h
3972 +++ b/arch/powerpc/include/asm/uaccess.h
3973 @@ -13,6 +13,8 @@
3974 #define VERIFY_READ 0
3975 #define VERIFY_WRITE 1
3976
3977 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3978 +
3979 /*
3980 * The fs value determines whether argument validity checking should be
3981 * performed or not. If get_fs() == USER_DS, checking is performed, with
3982 @@ -327,52 +329,6 @@ do { \
3983 extern unsigned long __copy_tofrom_user(void __user *to,
3984 const void __user *from, unsigned long size);
3985
3986 -#ifndef __powerpc64__
3987 -
3988 -static inline unsigned long copy_from_user(void *to,
3989 - const void __user *from, unsigned long n)
3990 -{
3991 - unsigned long over;
3992 -
3993 - if (access_ok(VERIFY_READ, from, n))
3994 - return __copy_tofrom_user((__force void __user *)to, from, n);
3995 - if ((unsigned long)from < TASK_SIZE) {
3996 - over = (unsigned long)from + n - TASK_SIZE;
3997 - return __copy_tofrom_user((__force void __user *)to, from,
3998 - n - over) + over;
3999 - }
4000 - return n;
4001 -}
4002 -
4003 -static inline unsigned long copy_to_user(void __user *to,
4004 - const void *from, unsigned long n)
4005 -{
4006 - unsigned long over;
4007 -
4008 - if (access_ok(VERIFY_WRITE, to, n))
4009 - return __copy_tofrom_user(to, (__force void __user *)from, n);
4010 - if ((unsigned long)to < TASK_SIZE) {
4011 - over = (unsigned long)to + n - TASK_SIZE;
4012 - return __copy_tofrom_user(to, (__force void __user *)from,
4013 - n - over) + over;
4014 - }
4015 - return n;
4016 -}
4017 -
4018 -#else /* __powerpc64__ */
4019 -
4020 -#define __copy_in_user(to, from, size) \
4021 - __copy_tofrom_user((to), (from), (size))
4022 -
4023 -extern unsigned long copy_from_user(void *to, const void __user *from,
4024 - unsigned long n);
4025 -extern unsigned long copy_to_user(void __user *to, const void *from,
4026 - unsigned long n);
4027 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
4028 - unsigned long n);
4029 -
4030 -#endif /* __powerpc64__ */
4031 -
4032 static inline unsigned long __copy_from_user_inatomic(void *to,
4033 const void __user *from, unsigned long n)
4034 {
4035 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
4036 if (ret == 0)
4037 return 0;
4038 }
4039 +
4040 + if (!__builtin_constant_p(n))
4041 + check_object_size(to, n, false);
4042 +
4043 return __copy_tofrom_user((__force void __user *)to, from, n);
4044 }
4045
4046 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
4047 if (ret == 0)
4048 return 0;
4049 }
4050 +
4051 + if (!__builtin_constant_p(n))
4052 + check_object_size(from, n, true);
4053 +
4054 return __copy_tofrom_user(to, (__force const void __user *)from, n);
4055 }
4056
4057 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
4058 return __copy_to_user_inatomic(to, from, size);
4059 }
4060
4061 +#ifndef __powerpc64__
4062 +
4063 +static inline unsigned long __must_check copy_from_user(void *to,
4064 + const void __user *from, unsigned long n)
4065 +{
4066 + unsigned long over;
4067 +
4068 + if ((long)n < 0)
4069 + return n;
4070 +
4071 + if (access_ok(VERIFY_READ, from, n)) {
4072 + if (!__builtin_constant_p(n))
4073 + check_object_size(to, n, false);
4074 + return __copy_tofrom_user((__force void __user *)to, from, n);
4075 + }
4076 + if ((unsigned long)from < TASK_SIZE) {
4077 + over = (unsigned long)from + n - TASK_SIZE;
4078 + if (!__builtin_constant_p(n - over))
4079 + check_object_size(to, n - over, false);
4080 + return __copy_tofrom_user((__force void __user *)to, from,
4081 + n - over) + over;
4082 + }
4083 + return n;
4084 +}
4085 +
4086 +static inline unsigned long __must_check copy_to_user(void __user *to,
4087 + const void *from, unsigned long n)
4088 +{
4089 + unsigned long over;
4090 +
4091 + if ((long)n < 0)
4092 + return n;
4093 +
4094 + if (access_ok(VERIFY_WRITE, to, n)) {
4095 + if (!__builtin_constant_p(n))
4096 + check_object_size(from, n, true);
4097 + return __copy_tofrom_user(to, (__force void __user *)from, n);
4098 + }
4099 + if ((unsigned long)to < TASK_SIZE) {
4100 + over = (unsigned long)to + n - TASK_SIZE;
4101 + if (!__builtin_constant_p(n))
4102 + check_object_size(from, n - over, true);
4103 + return __copy_tofrom_user(to, (__force void __user *)from,
4104 + n - over) + over;
4105 + }
4106 + return n;
4107 +}
4108 +
4109 +#else /* __powerpc64__ */
4110 +
4111 +#define __copy_in_user(to, from, size) \
4112 + __copy_tofrom_user((to), (from), (size))
4113 +
4114 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
4115 +{
4116 + if ((long)n < 0 || n > INT_MAX)
4117 + return n;
4118 +
4119 + if (!__builtin_constant_p(n))
4120 + check_object_size(to, n, false);
4121 +
4122 + if (likely(access_ok(VERIFY_READ, from, n)))
4123 + n = __copy_from_user(to, from, n);
4124 + else
4125 + memset(to, 0, n);
4126 + return n;
4127 +}
4128 +
4129 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
4130 +{
4131 + if ((long)n < 0 || n > INT_MAX)
4132 + return n;
4133 +
4134 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
4135 + if (!__builtin_constant_p(n))
4136 + check_object_size(from, n, true);
4137 + n = __copy_to_user(to, from, n);
4138 + }
4139 + return n;
4140 +}
4141 +
4142 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
4143 + unsigned long n);
4144 +
4145 +#endif /* __powerpc64__ */
4146 +
4147 extern unsigned long __clear_user(void __user *addr, unsigned long size);
4148
4149 static inline unsigned long clear_user(void __user *addr, unsigned long size)
4150 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
4151 index 7215cc2..a9730c1 100644
4152 --- a/arch/powerpc/kernel/exceptions-64e.S
4153 +++ b/arch/powerpc/kernel/exceptions-64e.S
4154 @@ -661,6 +661,7 @@ storage_fault_common:
4155 std r14,_DAR(r1)
4156 std r15,_DSISR(r1)
4157 addi r3,r1,STACK_FRAME_OVERHEAD
4158 + bl .save_nvgprs
4159 mr r4,r14
4160 mr r5,r15
4161 ld r14,PACA_EXGEN+EX_R14(r13)
4162 @@ -669,8 +670,7 @@ storage_fault_common:
4163 cmpdi r3,0
4164 bne- 1f
4165 b .ret_from_except_lite
4166 -1: bl .save_nvgprs
4167 - mr r5,r3
4168 +1: mr r5,r3
4169 addi r3,r1,STACK_FRAME_OVERHEAD
4170 ld r4,_DAR(r1)
4171 bl .bad_page_fault
4172 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
4173 index 8f880bc..c5bd2f3 100644
4174 --- a/arch/powerpc/kernel/exceptions-64s.S
4175 +++ b/arch/powerpc/kernel/exceptions-64s.S
4176 @@ -890,10 +890,10 @@ handle_page_fault:
4177 11: ld r4,_DAR(r1)
4178 ld r5,_DSISR(r1)
4179 addi r3,r1,STACK_FRAME_OVERHEAD
4180 + bl .save_nvgprs
4181 bl .do_page_fault
4182 cmpdi r3,0
4183 beq+ 12f
4184 - bl .save_nvgprs
4185 mr r5,r3
4186 addi r3,r1,STACK_FRAME_OVERHEAD
4187 lwz r4,_DAR(r1)
4188 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
4189 index 2e3200c..72095ce 100644
4190 --- a/arch/powerpc/kernel/module_32.c
4191 +++ b/arch/powerpc/kernel/module_32.c
4192 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
4193 me->arch.core_plt_section = i;
4194 }
4195 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
4196 - printk("Module doesn't contain .plt or .init.plt sections.\n");
4197 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
4198 return -ENOEXEC;
4199 }
4200
4201 @@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
4202
4203 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
4204 /* Init, or core PLT? */
4205 - if (location >= mod->module_core
4206 - && location < mod->module_core + mod->core_size)
4207 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
4208 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
4209 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
4210 - else
4211 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
4212 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
4213 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
4214 + else {
4215 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
4216 + return ~0UL;
4217 + }
4218
4219 /* Find this entry, or if that fails, the next avail. entry */
4220 while (entry->jump[0]) {
4221 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
4222 index 4937c96..70714b7 100644
4223 --- a/arch/powerpc/kernel/process.c
4224 +++ b/arch/powerpc/kernel/process.c
4225 @@ -681,8 +681,8 @@ void show_regs(struct pt_regs * regs)
4226 * Lookup NIP late so we have the best change of getting the
4227 * above info out without failing
4228 */
4229 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
4230 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
4231 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
4232 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
4233 #endif
4234 show_stack(current, (unsigned long *) regs->gpr[1]);
4235 if (!user_mode(regs))
4236 @@ -1186,10 +1186,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4237 newsp = stack[0];
4238 ip = stack[STACK_FRAME_LR_SAVE];
4239 if (!firstframe || ip != lr) {
4240 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
4241 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
4242 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4243 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
4244 - printk(" (%pS)",
4245 + printk(" (%pA)",
4246 (void *)current->ret_stack[curr_frame].ret);
4247 curr_frame--;
4248 }
4249 @@ -1209,7 +1209,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
4250 struct pt_regs *regs = (struct pt_regs *)
4251 (sp + STACK_FRAME_OVERHEAD);
4252 lr = regs->link;
4253 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
4254 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
4255 regs->trap, (void *)regs->nip, (void *)lr);
4256 firstframe = 1;
4257 }
4258 @@ -1282,58 +1282,3 @@ void thread_info_cache_init(void)
4259 }
4260
4261 #endif /* THREAD_SHIFT < PAGE_SHIFT */
4262 -
4263 -unsigned long arch_align_stack(unsigned long sp)
4264 -{
4265 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4266 - sp -= get_random_int() & ~PAGE_MASK;
4267 - return sp & ~0xf;
4268 -}
4269 -
4270 -static inline unsigned long brk_rnd(void)
4271 -{
4272 - unsigned long rnd = 0;
4273 -
4274 - /* 8MB for 32bit, 1GB for 64bit */
4275 - if (is_32bit_task())
4276 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
4277 - else
4278 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
4279 -
4280 - return rnd << PAGE_SHIFT;
4281 -}
4282 -
4283 -unsigned long arch_randomize_brk(struct mm_struct *mm)
4284 -{
4285 - unsigned long base = mm->brk;
4286 - unsigned long ret;
4287 -
4288 -#ifdef CONFIG_PPC_STD_MMU_64
4289 - /*
4290 - * If we are using 1TB segments and we are allowed to randomise
4291 - * the heap, we can put it above 1TB so it is backed by a 1TB
4292 - * segment. Otherwise the heap will be in the bottom 1TB
4293 - * which always uses 256MB segments and this may result in a
4294 - * performance penalty.
4295 - */
4296 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
4297 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
4298 -#endif
4299 -
4300 - ret = PAGE_ALIGN(base + brk_rnd());
4301 -
4302 - if (ret < mm->brk)
4303 - return mm->brk;
4304 -
4305 - return ret;
4306 -}
4307 -
4308 -unsigned long randomize_et_dyn(unsigned long base)
4309 -{
4310 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4311 -
4312 - if (ret < base)
4313 - return base;
4314 -
4315 - return ret;
4316 -}
4317 diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
4318 index 8d8e028..c2aeb50 100644
4319 --- a/arch/powerpc/kernel/ptrace.c
4320 +++ b/arch/powerpc/kernel/ptrace.c
4321 @@ -1702,6 +1702,10 @@ long arch_ptrace(struct task_struct *child, long request,
4322 return ret;
4323 }
4324
4325 +#ifdef CONFIG_GRKERNSEC_SETXID
4326 +extern void gr_delayed_cred_worker(void);
4327 +#endif
4328 +
4329 /*
4330 * We must return the syscall number to actually look up in the table.
4331 * This can be -1L to skip running any syscall at all.
4332 @@ -1712,6 +1716,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
4333
4334 secure_computing(regs->gpr[0]);
4335
4336 +#ifdef CONFIG_GRKERNSEC_SETXID
4337 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
4338 + gr_delayed_cred_worker();
4339 +#endif
4340 +
4341 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
4342 tracehook_report_syscall_entry(regs))
4343 /*
4344 @@ -1746,6 +1755,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
4345 {
4346 int step;
4347
4348 +#ifdef CONFIG_GRKERNSEC_SETXID
4349 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
4350 + gr_delayed_cred_worker();
4351 +#endif
4352 +
4353 audit_syscall_exit(regs);
4354
4355 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
4356 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
4357 index 45eb998..0cb36bc 100644
4358 --- a/arch/powerpc/kernel/signal_32.c
4359 +++ b/arch/powerpc/kernel/signal_32.c
4360 @@ -861,7 +861,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
4361 /* Save user registers on the stack */
4362 frame = &rt_sf->uc.uc_mcontext;
4363 addr = frame;
4364 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
4365 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4366 if (save_user_regs(regs, frame, 0, 1))
4367 goto badframe;
4368 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
4369 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
4370 index 2692efd..6673d2e 100644
4371 --- a/arch/powerpc/kernel/signal_64.c
4372 +++ b/arch/powerpc/kernel/signal_64.c
4373 @@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
4374 current->thread.fpscr.val = 0;
4375
4376 /* Set up to return from userspace. */
4377 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
4378 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4379 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
4380 } else {
4381 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
4382 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
4383 index 1589723..cefe690 100644
4384 --- a/arch/powerpc/kernel/traps.c
4385 +++ b/arch/powerpc/kernel/traps.c
4386 @@ -133,6 +133,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
4387 return flags;
4388 }
4389
4390 +extern void gr_handle_kernel_exploit(void);
4391 +
4392 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
4393 int signr)
4394 {
4395 @@ -182,6 +184,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
4396 panic("Fatal exception in interrupt");
4397 if (panic_on_oops)
4398 panic("Fatal exception");
4399 +
4400 + gr_handle_kernel_exploit();
4401 +
4402 do_exit(signr);
4403 }
4404
4405 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
4406 index 9eb5b9b..e45498a 100644
4407 --- a/arch/powerpc/kernel/vdso.c
4408 +++ b/arch/powerpc/kernel/vdso.c
4409 @@ -34,6 +34,7 @@
4410 #include <asm/firmware.h>
4411 #include <asm/vdso.h>
4412 #include <asm/vdso_datapage.h>
4413 +#include <asm/mman.h>
4414
4415 #include "setup.h"
4416
4417 @@ -218,7 +219,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4418 vdso_base = VDSO32_MBASE;
4419 #endif
4420
4421 - current->mm->context.vdso_base = 0;
4422 + current->mm->context.vdso_base = ~0UL;
4423
4424 /* vDSO has a problem and was disabled, just don't "enable" it for the
4425 * process
4426 @@ -238,7 +239,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
4427 vdso_base = get_unmapped_area(NULL, vdso_base,
4428 (vdso_pages << PAGE_SHIFT) +
4429 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
4430 - 0, 0);
4431 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
4432 if (IS_ERR_VALUE(vdso_base)) {
4433 rc = vdso_base;
4434 goto fail_mmapsem;
4435 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
4436 index 5eea6f3..5d10396 100644
4437 --- a/arch/powerpc/lib/usercopy_64.c
4438 +++ b/arch/powerpc/lib/usercopy_64.c
4439 @@ -9,22 +9,6 @@
4440 #include <linux/module.h>
4441 #include <asm/uaccess.h>
4442
4443 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4444 -{
4445 - if (likely(access_ok(VERIFY_READ, from, n)))
4446 - n = __copy_from_user(to, from, n);
4447 - else
4448 - memset(to, 0, n);
4449 - return n;
4450 -}
4451 -
4452 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4453 -{
4454 - if (likely(access_ok(VERIFY_WRITE, to, n)))
4455 - n = __copy_to_user(to, from, n);
4456 - return n;
4457 -}
4458 -
4459 unsigned long copy_in_user(void __user *to, const void __user *from,
4460 unsigned long n)
4461 {
4462 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
4463 return n;
4464 }
4465
4466 -EXPORT_SYMBOL(copy_from_user);
4467 -EXPORT_SYMBOL(copy_to_user);
4468 EXPORT_SYMBOL(copy_in_user);
4469
4470 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
4471 index 08ffcf5..a0ab912 100644
4472 --- a/arch/powerpc/mm/fault.c
4473 +++ b/arch/powerpc/mm/fault.c
4474 @@ -32,6 +32,10 @@
4475 #include <linux/perf_event.h>
4476 #include <linux/magic.h>
4477 #include <linux/ratelimit.h>
4478 +#include <linux/slab.h>
4479 +#include <linux/pagemap.h>
4480 +#include <linux/compiler.h>
4481 +#include <linux/unistd.h>
4482
4483 #include <asm/firmware.h>
4484 #include <asm/page.h>
4485 @@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
4486 }
4487 #endif
4488
4489 +#ifdef CONFIG_PAX_PAGEEXEC
4490 +/*
4491 + * PaX: decide what to do with offenders (regs->nip = fault address)
4492 + *
4493 + * returns 1 when task should be killed
4494 + */
4495 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4496 +{
4497 + return 1;
4498 +}
4499 +
4500 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4501 +{
4502 + unsigned long i;
4503 +
4504 + printk(KERN_ERR "PAX: bytes at PC: ");
4505 + for (i = 0; i < 5; i++) {
4506 + unsigned int c;
4507 + if (get_user(c, (unsigned int __user *)pc+i))
4508 + printk(KERN_CONT "???????? ");
4509 + else
4510 + printk(KERN_CONT "%08x ", c);
4511 + }
4512 + printk("\n");
4513 +}
4514 +#endif
4515 +
4516 /*
4517 * Check whether the instruction at regs->nip is a store using
4518 * an update addressing form which will update r1.
4519 @@ -215,7 +246,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
4520 * indicate errors in DSISR but can validly be set in SRR1.
4521 */
4522 if (trap == 0x400)
4523 - error_code &= 0x48200000;
4524 + error_code &= 0x58200000;
4525 else
4526 is_write = error_code & DSISR_ISSTORE;
4527 #else
4528 @@ -366,7 +397,7 @@ good_area:
4529 * "undefined". Of those that can be set, this is the only
4530 * one which seems bad.
4531 */
4532 - if (error_code & 0x10000000)
4533 + if (error_code & DSISR_GUARDED)
4534 /* Guarded storage error. */
4535 goto bad_area;
4536 #endif /* CONFIG_8xx */
4537 @@ -381,7 +412,7 @@ good_area:
4538 * processors use the same I/D cache coherency mechanism
4539 * as embedded.
4540 */
4541 - if (error_code & DSISR_PROTFAULT)
4542 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
4543 goto bad_area;
4544 #endif /* CONFIG_PPC_STD_MMU */
4545
4546 @@ -463,6 +494,23 @@ bad_area:
4547 bad_area_nosemaphore:
4548 /* User mode accesses cause a SIGSEGV */
4549 if (user_mode(regs)) {
4550 +
4551 +#ifdef CONFIG_PAX_PAGEEXEC
4552 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4553 +#ifdef CONFIG_PPC_STD_MMU
4554 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
4555 +#else
4556 + if (is_exec && regs->nip == address) {
4557 +#endif
4558 + switch (pax_handle_fetch_fault(regs)) {
4559 + }
4560 +
4561 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
4562 + do_group_exit(SIGKILL);
4563 + }
4564 + }
4565 +#endif
4566 +
4567 _exception(SIGSEGV, regs, code, address);
4568 return 0;
4569 }
4570 diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
4571 index 67a42ed..1c7210c 100644
4572 --- a/arch/powerpc/mm/mmap_64.c
4573 +++ b/arch/powerpc/mm/mmap_64.c
4574 @@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4575 */
4576 if (mmap_is_legacy()) {
4577 mm->mmap_base = TASK_UNMAPPED_BASE;
4578 +
4579 +#ifdef CONFIG_PAX_RANDMMAP
4580 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4581 + mm->mmap_base += mm->delta_mmap;
4582 +#endif
4583 +
4584 mm->get_unmapped_area = arch_get_unmapped_area;
4585 mm->unmap_area = arch_unmap_area;
4586 } else {
4587 mm->mmap_base = mmap_base();
4588 +
4589 +#ifdef CONFIG_PAX_RANDMMAP
4590 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4591 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4592 +#endif
4593 +
4594 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4595 mm->unmap_area = arch_unmap_area_topdown;
4596 }
4597 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
4598 index 73709f7..6b90313 100644
4599 --- a/arch/powerpc/mm/slice.c
4600 +++ b/arch/powerpc/mm/slice.c
4601 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
4602 if ((mm->task_size - len) < addr)
4603 return 0;
4604 vma = find_vma(mm, addr);
4605 - return (!vma || (addr + len) <= vma->vm_start);
4606 + return check_heap_stack_gap(vma, addr, len);
4607 }
4608
4609 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
4610 @@ -256,7 +256,7 @@ full_search:
4611 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
4612 continue;
4613 }
4614 - if (!vma || addr + len <= vma->vm_start) {
4615 + if (check_heap_stack_gap(vma, addr, len)) {
4616 /*
4617 * Remember the place where we stopped the search:
4618 */
4619 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4620 }
4621 }
4622
4623 - addr = mm->mmap_base;
4624 - while (addr > len) {
4625 + if (mm->mmap_base < len)
4626 + addr = -ENOMEM;
4627 + else
4628 + addr = mm->mmap_base - len;
4629 +
4630 + while (!IS_ERR_VALUE(addr)) {
4631 /* Go down by chunk size */
4632 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
4633 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
4634
4635 /* Check for hit with different page size */
4636 mask = slice_range_to_mask(addr, len);
4637 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4638 * return with success:
4639 */
4640 vma = find_vma(mm, addr);
4641 - if (!vma || (addr + len) <= vma->vm_start) {
4642 + if (check_heap_stack_gap(vma, addr, len)) {
4643 /* remember the address as a hint for next time */
4644 if (use_cache)
4645 mm->free_area_cache = addr;
4646 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
4647 mm->cached_hole_size = vma->vm_start - addr;
4648
4649 /* try just below the current vma->vm_start */
4650 - addr = vma->vm_start;
4651 + addr = skip_heap_stack_gap(vma, len);
4652 }
4653
4654 /*
4655 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
4656 if (fixed && addr > (mm->task_size - len))
4657 return -EINVAL;
4658
4659 +#ifdef CONFIG_PAX_RANDMMAP
4660 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
4661 + addr = 0;
4662 +#endif
4663 +
4664 /* If hint, make sure it matches our alignment restrictions */
4665 if (!fixed && addr) {
4666 addr = _ALIGN_UP(addr, 1ul << pshift);
4667 diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
4668 index 748347b..81bc6c7 100644
4669 --- a/arch/s390/include/asm/atomic.h
4670 +++ b/arch/s390/include/asm/atomic.h
4671 @@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
4672 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
4673 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4674
4675 +#define atomic64_read_unchecked(v) atomic64_read(v)
4676 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4677 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4678 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4679 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4680 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
4681 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4682 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
4683 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4684 +
4685 #define smp_mb__before_atomic_dec() smp_mb()
4686 #define smp_mb__after_atomic_dec() smp_mb()
4687 #define smp_mb__before_atomic_inc() smp_mb()
4688 diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
4689 index 2a30d5a..5e5586f 100644
4690 --- a/arch/s390/include/asm/cache.h
4691 +++ b/arch/s390/include/asm/cache.h
4692 @@ -11,8 +11,10 @@
4693 #ifndef __ARCH_S390_CACHE_H
4694 #define __ARCH_S390_CACHE_H
4695
4696 -#define L1_CACHE_BYTES 256
4697 +#include <linux/const.h>
4698 +
4699 #define L1_CACHE_SHIFT 8
4700 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4701 #define NET_SKB_PAD 32
4702
4703 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4704 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
4705 index c4ee39f..352881b 100644
4706 --- a/arch/s390/include/asm/elf.h
4707 +++ b/arch/s390/include/asm/elf.h
4708 @@ -161,8 +161,14 @@ extern unsigned int vdso_enabled;
4709 the loader. We need to make sure that it is out of the way of the program
4710 that it will "exec", and that there is sufficient room for the brk. */
4711
4712 -extern unsigned long randomize_et_dyn(unsigned long base);
4713 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
4714 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
4715 +
4716 +#ifdef CONFIG_PAX_ASLR
4717 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
4718 +
4719 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4720 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4721 +#endif
4722
4723 /* This yields a mask that user programs can use to figure out what
4724 instruction set this CPU supports. */
4725 @@ -210,7 +216,4 @@ struct linux_binprm;
4726 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
4727 int arch_setup_additional_pages(struct linux_binprm *, int);
4728
4729 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
4730 -#define arch_randomize_brk arch_randomize_brk
4731 -
4732 #endif
4733 diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
4734 index c4a93d6..4d2a9b4 100644
4735 --- a/arch/s390/include/asm/exec.h
4736 +++ b/arch/s390/include/asm/exec.h
4737 @@ -7,6 +7,6 @@
4738 #ifndef __ASM_EXEC_H
4739 #define __ASM_EXEC_H
4740
4741 -extern unsigned long arch_align_stack(unsigned long sp);
4742 +#define arch_align_stack(x) ((x) & ~0xfUL)
4743
4744 #endif /* __ASM_EXEC_H */
4745 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
4746 index 8f2cada..43072c1 100644
4747 --- a/arch/s390/include/asm/uaccess.h
4748 +++ b/arch/s390/include/asm/uaccess.h
4749 @@ -236,6 +236,10 @@ static inline unsigned long __must_check
4750 copy_to_user(void __user *to, const void *from, unsigned long n)
4751 {
4752 might_fault();
4753 +
4754 + if ((long)n < 0)
4755 + return n;
4756 +
4757 if (access_ok(VERIFY_WRITE, to, n))
4758 n = __copy_to_user(to, from, n);
4759 return n;
4760 @@ -261,6 +265,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
4761 static inline unsigned long __must_check
4762 __copy_from_user(void *to, const void __user *from, unsigned long n)
4763 {
4764 + if ((long)n < 0)
4765 + return n;
4766 +
4767 if (__builtin_constant_p(n) && (n <= 256))
4768 return uaccess.copy_from_user_small(n, from, to);
4769 else
4770 @@ -292,10 +299,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
4771 static inline unsigned long __must_check
4772 copy_from_user(void *to, const void __user *from, unsigned long n)
4773 {
4774 - unsigned int sz = __compiletime_object_size(to);
4775 + size_t sz = __compiletime_object_size(to);
4776
4777 might_fault();
4778 - if (unlikely(sz != -1 && sz < n)) {
4779 +
4780 + if ((long)n < 0)
4781 + return n;
4782 +
4783 + if (unlikely(sz != (size_t)-1 && sz < n)) {
4784 copy_from_user_overflow();
4785 return n;
4786 }
4787 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
4788 index dfcb343..eda788a 100644
4789 --- a/arch/s390/kernel/module.c
4790 +++ b/arch/s390/kernel/module.c
4791 @@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
4792
4793 /* Increase core size by size of got & plt and set start
4794 offsets for got and plt. */
4795 - me->core_size = ALIGN(me->core_size, 4);
4796 - me->arch.got_offset = me->core_size;
4797 - me->core_size += me->arch.got_size;
4798 - me->arch.plt_offset = me->core_size;
4799 - me->core_size += me->arch.plt_size;
4800 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
4801 + me->arch.got_offset = me->core_size_rw;
4802 + me->core_size_rw += me->arch.got_size;
4803 + me->arch.plt_offset = me->core_size_rx;
4804 + me->core_size_rx += me->arch.plt_size;
4805 return 0;
4806 }
4807
4808 @@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4809 if (info->got_initialized == 0) {
4810 Elf_Addr *gotent;
4811
4812 - gotent = me->module_core + me->arch.got_offset +
4813 + gotent = me->module_core_rw + me->arch.got_offset +
4814 info->got_offset;
4815 *gotent = val;
4816 info->got_initialized = 1;
4817 @@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4818 else if (r_type == R_390_GOTENT ||
4819 r_type == R_390_GOTPLTENT)
4820 *(unsigned int *) loc =
4821 - (val + (Elf_Addr) me->module_core - loc) >> 1;
4822 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
4823 else if (r_type == R_390_GOT64 ||
4824 r_type == R_390_GOTPLT64)
4825 *(unsigned long *) loc = val;
4826 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4827 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
4828 if (info->plt_initialized == 0) {
4829 unsigned int *ip;
4830 - ip = me->module_core + me->arch.plt_offset +
4831 + ip = me->module_core_rx + me->arch.plt_offset +
4832 info->plt_offset;
4833 #ifndef CONFIG_64BIT
4834 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
4835 @@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4836 val - loc + 0xffffUL < 0x1ffffeUL) ||
4837 (r_type == R_390_PLT32DBL &&
4838 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
4839 - val = (Elf_Addr) me->module_core +
4840 + val = (Elf_Addr) me->module_core_rx +
4841 me->arch.plt_offset +
4842 info->plt_offset;
4843 val += rela->r_addend - loc;
4844 @@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4845 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
4846 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
4847 val = val + rela->r_addend -
4848 - ((Elf_Addr) me->module_core + me->arch.got_offset);
4849 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
4850 if (r_type == R_390_GOTOFF16)
4851 *(unsigned short *) loc = val;
4852 else if (r_type == R_390_GOTOFF32)
4853 @@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
4854 break;
4855 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
4856 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
4857 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
4858 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
4859 rela->r_addend - loc;
4860 if (r_type == R_390_GOTPC)
4861 *(unsigned int *) loc = val;
4862 diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
4863 index 60055ce..ee4b252 100644
4864 --- a/arch/s390/kernel/process.c
4865 +++ b/arch/s390/kernel/process.c
4866 @@ -316,39 +316,3 @@ unsigned long get_wchan(struct task_struct *p)
4867 }
4868 return 0;
4869 }
4870 -
4871 -unsigned long arch_align_stack(unsigned long sp)
4872 -{
4873 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4874 - sp -= get_random_int() & ~PAGE_MASK;
4875 - return sp & ~0xf;
4876 -}
4877 -
4878 -static inline unsigned long brk_rnd(void)
4879 -{
4880 - /* 8MB for 32bit, 1GB for 64bit */
4881 - if (is_32bit_task())
4882 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
4883 - else
4884 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
4885 -}
4886 -
4887 -unsigned long arch_randomize_brk(struct mm_struct *mm)
4888 -{
4889 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
4890 -
4891 - if (ret < mm->brk)
4892 - return mm->brk;
4893 - return ret;
4894 -}
4895 -
4896 -unsigned long randomize_et_dyn(unsigned long base)
4897 -{
4898 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4899 -
4900 - if (!(current->flags & PF_RANDOMIZE))
4901 - return base;
4902 - if (ret < base)
4903 - return base;
4904 - return ret;
4905 -}
4906 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4907 index 2857c48..d047481 100644
4908 --- a/arch/s390/mm/mmap.c
4909 +++ b/arch/s390/mm/mmap.c
4910 @@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4911 */
4912 if (mmap_is_legacy()) {
4913 mm->mmap_base = TASK_UNMAPPED_BASE;
4914 +
4915 +#ifdef CONFIG_PAX_RANDMMAP
4916 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4917 + mm->mmap_base += mm->delta_mmap;
4918 +#endif
4919 +
4920 mm->get_unmapped_area = arch_get_unmapped_area;
4921 mm->unmap_area = arch_unmap_area;
4922 } else {
4923 mm->mmap_base = mmap_base();
4924 +
4925 +#ifdef CONFIG_PAX_RANDMMAP
4926 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4927 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4928 +#endif
4929 +
4930 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4931 mm->unmap_area = arch_unmap_area_topdown;
4932 }
4933 @@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4934 */
4935 if (mmap_is_legacy()) {
4936 mm->mmap_base = TASK_UNMAPPED_BASE;
4937 +
4938 +#ifdef CONFIG_PAX_RANDMMAP
4939 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4940 + mm->mmap_base += mm->delta_mmap;
4941 +#endif
4942 +
4943 mm->get_unmapped_area = s390_get_unmapped_area;
4944 mm->unmap_area = arch_unmap_area;
4945 } else {
4946 mm->mmap_base = mmap_base();
4947 +
4948 +#ifdef CONFIG_PAX_RANDMMAP
4949 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4950 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4951 +#endif
4952 +
4953 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4954 mm->unmap_area = arch_unmap_area_topdown;
4955 }
4956 diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
4957 index ae3d59f..f65f075 100644
4958 --- a/arch/score/include/asm/cache.h
4959 +++ b/arch/score/include/asm/cache.h
4960 @@ -1,7 +1,9 @@
4961 #ifndef _ASM_SCORE_CACHE_H
4962 #define _ASM_SCORE_CACHE_H
4963
4964 +#include <linux/const.h>
4965 +
4966 #define L1_CACHE_SHIFT 4
4967 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4968 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4969
4970 #endif /* _ASM_SCORE_CACHE_H */
4971 diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
4972 index f9f3cd5..58ff438 100644
4973 --- a/arch/score/include/asm/exec.h
4974 +++ b/arch/score/include/asm/exec.h
4975 @@ -1,6 +1,6 @@
4976 #ifndef _ASM_SCORE_EXEC_H
4977 #define _ASM_SCORE_EXEC_H
4978
4979 -extern unsigned long arch_align_stack(unsigned long sp);
4980 +#define arch_align_stack(x) (x)
4981
4982 #endif /* _ASM_SCORE_EXEC_H */
4983 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
4984 index 2707023..1c2a3b7 100644
4985 --- a/arch/score/kernel/process.c
4986 +++ b/arch/score/kernel/process.c
4987 @@ -159,8 +159,3 @@ unsigned long get_wchan(struct task_struct *task)
4988
4989 return task_pt_regs(task)->cp0_epc;
4990 }
4991 -
4992 -unsigned long arch_align_stack(unsigned long sp)
4993 -{
4994 - return sp;
4995 -}
4996 diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
4997 index ef9e555..331bd29 100644
4998 --- a/arch/sh/include/asm/cache.h
4999 +++ b/arch/sh/include/asm/cache.h
5000 @@ -9,10 +9,11 @@
5001 #define __ASM_SH_CACHE_H
5002 #ifdef __KERNEL__
5003
5004 +#include <linux/const.h>
5005 #include <linux/init.h>
5006 #include <cpu/cache.h>
5007
5008 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5009 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5010
5011 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
5012
5013 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
5014 index afeb710..d1d1289 100644
5015 --- a/arch/sh/mm/mmap.c
5016 +++ b/arch/sh/mm/mmap.c
5017 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
5018 addr = PAGE_ALIGN(addr);
5019
5020 vma = find_vma(mm, addr);
5021 - if (TASK_SIZE - len >= addr &&
5022 - (!vma || addr + len <= vma->vm_start))
5023 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
5024 return addr;
5025 }
5026
5027 @@ -106,7 +105,7 @@ full_search:
5028 }
5029 return -ENOMEM;
5030 }
5031 - if (likely(!vma || addr + len <= vma->vm_start)) {
5032 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5033 /*
5034 * Remember the place where we stopped the search:
5035 */
5036 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5037 addr = PAGE_ALIGN(addr);
5038
5039 vma = find_vma(mm, addr);
5040 - if (TASK_SIZE - len >= addr &&
5041 - (!vma || addr + len <= vma->vm_start))
5042 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
5043 return addr;
5044 }
5045
5046 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5047 /* make sure it can fit in the remaining address space */
5048 if (likely(addr > len)) {
5049 vma = find_vma(mm, addr-len);
5050 - if (!vma || addr <= vma->vm_start) {
5051 + if (check_heap_stack_gap(vma, addr - len, len)) {
5052 /* remember the address as a hint for next time */
5053 return (mm->free_area_cache = addr-len);
5054 }
5055 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5056 if (unlikely(mm->mmap_base < len))
5057 goto bottomup;
5058
5059 - addr = mm->mmap_base-len;
5060 - if (do_colour_align)
5061 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5062 + addr = mm->mmap_base - len;
5063
5064 do {
5065 + if (do_colour_align)
5066 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5067 /*
5068 * Lookup failure means no vma is above this address,
5069 * else if new region fits below vma->vm_start,
5070 * return with success:
5071 */
5072 vma = find_vma(mm, addr);
5073 - if (likely(!vma || addr+len <= vma->vm_start)) {
5074 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5075 /* remember the address as a hint for next time */
5076 return (mm->free_area_cache = addr);
5077 }
5078 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5079 mm->cached_hole_size = vma->vm_start - addr;
5080
5081 /* try just below the current vma->vm_start */
5082 - addr = vma->vm_start-len;
5083 - if (do_colour_align)
5084 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5085 - } while (likely(len < vma->vm_start));
5086 + addr = skip_heap_stack_gap(vma, len);
5087 + } while (!IS_ERR_VALUE(addr));
5088
5089 bottomup:
5090 /*
5091 diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
5092 index eddcfb3..b117d90 100644
5093 --- a/arch/sparc/Makefile
5094 +++ b/arch/sparc/Makefile
5095 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
5096 # Export what is needed by arch/sparc/boot/Makefile
5097 export VMLINUX_INIT VMLINUX_MAIN
5098 VMLINUX_INIT := $(head-y) $(init-y)
5099 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
5100 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
5101 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
5102 VMLINUX_MAIN += $(drivers-y) $(net-y)
5103
5104 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
5105 index ce35a1c..2e7b8f9 100644
5106 --- a/arch/sparc/include/asm/atomic_64.h
5107 +++ b/arch/sparc/include/asm/atomic_64.h
5108 @@ -14,18 +14,40 @@
5109 #define ATOMIC64_INIT(i) { (i) }
5110
5111 #define atomic_read(v) (*(volatile int *)&(v)->counter)
5112 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5113 +{
5114 + return v->counter;
5115 +}
5116 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
5117 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
5118 +{
5119 + return v->counter;
5120 +}
5121
5122 #define atomic_set(v, i) (((v)->counter) = i)
5123 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5124 +{
5125 + v->counter = i;
5126 +}
5127 #define atomic64_set(v, i) (((v)->counter) = i)
5128 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
5129 +{
5130 + v->counter = i;
5131 +}
5132
5133 extern void atomic_add(int, atomic_t *);
5134 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
5135 extern void atomic64_add(long, atomic64_t *);
5136 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
5137 extern void atomic_sub(int, atomic_t *);
5138 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
5139 extern void atomic64_sub(long, atomic64_t *);
5140 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
5141
5142 extern int atomic_add_ret(int, atomic_t *);
5143 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
5144 extern long atomic64_add_ret(long, atomic64_t *);
5145 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
5146 extern int atomic_sub_ret(int, atomic_t *);
5147 extern long atomic64_sub_ret(long, atomic64_t *);
5148
5149 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5150 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
5151
5152 #define atomic_inc_return(v) atomic_add_ret(1, v)
5153 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
5154 +{
5155 + return atomic_add_ret_unchecked(1, v);
5156 +}
5157 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
5158 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
5159 +{
5160 + return atomic64_add_ret_unchecked(1, v);
5161 +}
5162
5163 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
5164 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
5165
5166 #define atomic_add_return(i, v) atomic_add_ret(i, v)
5167 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
5168 +{
5169 + return atomic_add_ret_unchecked(i, v);
5170 +}
5171 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
5172 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
5173 +{
5174 + return atomic64_add_ret_unchecked(i, v);
5175 +}
5176
5177 /*
5178 * atomic_inc_and_test - increment and test
5179 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5180 * other cases.
5181 */
5182 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
5183 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
5184 +{
5185 + return atomic_inc_return_unchecked(v) == 0;
5186 +}
5187 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
5188
5189 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
5190 @@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
5191 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
5192
5193 #define atomic_inc(v) atomic_add(1, v)
5194 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
5195 +{
5196 + atomic_add_unchecked(1, v);
5197 +}
5198 #define atomic64_inc(v) atomic64_add(1, v)
5199 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
5200 +{
5201 + atomic64_add_unchecked(1, v);
5202 +}
5203
5204 #define atomic_dec(v) atomic_sub(1, v)
5205 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
5206 +{
5207 + atomic_sub_unchecked(1, v);
5208 +}
5209 #define atomic64_dec(v) atomic64_sub(1, v)
5210 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
5211 +{
5212 + atomic64_sub_unchecked(1, v);
5213 +}
5214
5215 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
5216 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
5217
5218 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5219 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
5220 +{
5221 + return cmpxchg(&v->counter, old, new);
5222 +}
5223 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
5224 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
5225 +{
5226 + return xchg(&v->counter, new);
5227 +}
5228
5229 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
5230 {
5231 - int c, old;
5232 + int c, old, new;
5233 c = atomic_read(v);
5234 for (;;) {
5235 - if (unlikely(c == (u)))
5236 + if (unlikely(c == u))
5237 break;
5238 - old = atomic_cmpxchg((v), c, c + (a));
5239 +
5240 + asm volatile("addcc %2, %0, %0\n"
5241 +
5242 +#ifdef CONFIG_PAX_REFCOUNT
5243 + "tvs %%icc, 6\n"
5244 +#endif
5245 +
5246 + : "=r" (new)
5247 + : "0" (c), "ir" (a)
5248 + : "cc");
5249 +
5250 + old = atomic_cmpxchg(v, c, new);
5251 if (likely(old == c))
5252 break;
5253 c = old;
5254 @@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
5255 #define atomic64_cmpxchg(v, o, n) \
5256 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
5257 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
5258 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
5259 +{
5260 + return xchg(&v->counter, new);
5261 +}
5262
5263 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
5264 {
5265 - long c, old;
5266 + long c, old, new;
5267 c = atomic64_read(v);
5268 for (;;) {
5269 - if (unlikely(c == (u)))
5270 + if (unlikely(c == u))
5271 break;
5272 - old = atomic64_cmpxchg((v), c, c + (a));
5273 +
5274 + asm volatile("addcc %2, %0, %0\n"
5275 +
5276 +#ifdef CONFIG_PAX_REFCOUNT
5277 + "tvs %%xcc, 6\n"
5278 +#endif
5279 +
5280 + : "=r" (new)
5281 + : "0" (c), "ir" (a)
5282 + : "cc");
5283 +
5284 + old = atomic64_cmpxchg(v, c, new);
5285 if (likely(old == c))
5286 break;
5287 c = old;
5288 }
5289 - return c != (u);
5290 + return c != u;
5291 }
5292
5293 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5294 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
5295 index 69358b5..9d0d492 100644
5296 --- a/arch/sparc/include/asm/cache.h
5297 +++ b/arch/sparc/include/asm/cache.h
5298 @@ -7,10 +7,12 @@
5299 #ifndef _SPARC_CACHE_H
5300 #define _SPARC_CACHE_H
5301
5302 +#include <linux/const.h>
5303 +
5304 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
5305
5306 #define L1_CACHE_SHIFT 5
5307 -#define L1_CACHE_BYTES 32
5308 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5309
5310 #ifdef CONFIG_SPARC32
5311 #define SMP_CACHE_BYTES_SHIFT 5
5312 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
5313 index 4269ca6..e3da77f 100644
5314 --- a/arch/sparc/include/asm/elf_32.h
5315 +++ b/arch/sparc/include/asm/elf_32.h
5316 @@ -114,6 +114,13 @@ typedef struct {
5317
5318 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
5319
5320 +#ifdef CONFIG_PAX_ASLR
5321 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
5322 +
5323 +#define PAX_DELTA_MMAP_LEN 16
5324 +#define PAX_DELTA_STACK_LEN 16
5325 +#endif
5326 +
5327 /* This yields a mask that user programs can use to figure out what
5328 instruction set this cpu supports. This can NOT be done in userspace
5329 on Sparc. */
5330 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
5331 index 7df8b7f..4946269 100644
5332 --- a/arch/sparc/include/asm/elf_64.h
5333 +++ b/arch/sparc/include/asm/elf_64.h
5334 @@ -180,6 +180,13 @@ typedef struct {
5335 #define ELF_ET_DYN_BASE 0x0000010000000000UL
5336 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
5337
5338 +#ifdef CONFIG_PAX_ASLR
5339 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
5340 +
5341 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
5342 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
5343 +#endif
5344 +
5345 extern unsigned long sparc64_elf_hwcap;
5346 #define ELF_HWCAP sparc64_elf_hwcap
5347
5348 diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
5349 index ca2b344..c6084f89 100644
5350 --- a/arch/sparc/include/asm/pgalloc_32.h
5351 +++ b/arch/sparc/include/asm/pgalloc_32.h
5352 @@ -37,6 +37,7 @@ BTFIXUPDEF_CALL(void, free_pgd_fast, pgd_t *)
5353 BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *)
5354 #define pgd_set(pgdp,pmdp) BTFIXUP_CALL(pgd_set)(pgdp,pmdp)
5355 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
5356 +#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
5357
5358 BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one, struct mm_struct *, unsigned long)
5359 #define pmd_alloc_one(mm, address) BTFIXUP_CALL(pmd_alloc_one)(mm, address)
5360 diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
5361 index 40b2d7a..22a665b 100644
5362 --- a/arch/sparc/include/asm/pgalloc_64.h
5363 +++ b/arch/sparc/include/asm/pgalloc_64.h
5364 @@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
5365 }
5366
5367 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
5368 +#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
5369
5370 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5371 {
5372 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
5373 index 3d71018..48a11c5 100644
5374 --- a/arch/sparc/include/asm/pgtable_32.h
5375 +++ b/arch/sparc/include/asm/pgtable_32.h
5376 @@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
5377 BTFIXUPDEF_INT(page_none)
5378 BTFIXUPDEF_INT(page_copy)
5379 BTFIXUPDEF_INT(page_readonly)
5380 +
5381 +#ifdef CONFIG_PAX_PAGEEXEC
5382 +BTFIXUPDEF_INT(page_shared_noexec)
5383 +BTFIXUPDEF_INT(page_copy_noexec)
5384 +BTFIXUPDEF_INT(page_readonly_noexec)
5385 +#endif
5386 +
5387 BTFIXUPDEF_INT(page_kernel)
5388
5389 #define PMD_SHIFT SUN4C_PMD_SHIFT
5390 @@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
5391 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
5392 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
5393
5394 +#ifdef CONFIG_PAX_PAGEEXEC
5395 +extern pgprot_t PAGE_SHARED_NOEXEC;
5396 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
5397 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
5398 +#else
5399 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
5400 +# define PAGE_COPY_NOEXEC PAGE_COPY
5401 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
5402 +#endif
5403 +
5404 extern unsigned long page_kernel;
5405
5406 #ifdef MODULE
5407 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
5408 index f6ae2b2..b03ffc7 100644
5409 --- a/arch/sparc/include/asm/pgtsrmmu.h
5410 +++ b/arch/sparc/include/asm/pgtsrmmu.h
5411 @@ -115,6 +115,13 @@
5412 SRMMU_EXEC | SRMMU_REF)
5413 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
5414 SRMMU_EXEC | SRMMU_REF)
5415 +
5416 +#ifdef CONFIG_PAX_PAGEEXEC
5417 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
5418 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5419 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5420 +#endif
5421 +
5422 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
5423 SRMMU_DIRTY | SRMMU_REF)
5424
5425 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
5426 index 9689176..63c18ea 100644
5427 --- a/arch/sparc/include/asm/spinlock_64.h
5428 +++ b/arch/sparc/include/asm/spinlock_64.h
5429 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
5430
5431 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
5432
5433 -static void inline arch_read_lock(arch_rwlock_t *lock)
5434 +static inline void arch_read_lock(arch_rwlock_t *lock)
5435 {
5436 unsigned long tmp1, tmp2;
5437
5438 __asm__ __volatile__ (
5439 "1: ldsw [%2], %0\n"
5440 " brlz,pn %0, 2f\n"
5441 -"4: add %0, 1, %1\n"
5442 +"4: addcc %0, 1, %1\n"
5443 +
5444 +#ifdef CONFIG_PAX_REFCOUNT
5445 +" tvs %%icc, 6\n"
5446 +#endif
5447 +
5448 " cas [%2], %0, %1\n"
5449 " cmp %0, %1\n"
5450 " bne,pn %%icc, 1b\n"
5451 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
5452 " .previous"
5453 : "=&r" (tmp1), "=&r" (tmp2)
5454 : "r" (lock)
5455 - : "memory");
5456 + : "memory", "cc");
5457 }
5458
5459 -static int inline arch_read_trylock(arch_rwlock_t *lock)
5460 +static inline int arch_read_trylock(arch_rwlock_t *lock)
5461 {
5462 int tmp1, tmp2;
5463
5464 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5465 "1: ldsw [%2], %0\n"
5466 " brlz,a,pn %0, 2f\n"
5467 " mov 0, %0\n"
5468 -" add %0, 1, %1\n"
5469 +" addcc %0, 1, %1\n"
5470 +
5471 +#ifdef CONFIG_PAX_REFCOUNT
5472 +" tvs %%icc, 6\n"
5473 +#endif
5474 +
5475 " cas [%2], %0, %1\n"
5476 " cmp %0, %1\n"
5477 " bne,pn %%icc, 1b\n"
5478 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
5479 return tmp1;
5480 }
5481
5482 -static void inline arch_read_unlock(arch_rwlock_t *lock)
5483 +static inline void arch_read_unlock(arch_rwlock_t *lock)
5484 {
5485 unsigned long tmp1, tmp2;
5486
5487 __asm__ __volatile__(
5488 "1: lduw [%2], %0\n"
5489 -" sub %0, 1, %1\n"
5490 +" subcc %0, 1, %1\n"
5491 +
5492 +#ifdef CONFIG_PAX_REFCOUNT
5493 +" tvs %%icc, 6\n"
5494 +#endif
5495 +
5496 " cas [%2], %0, %1\n"
5497 " cmp %0, %1\n"
5498 " bne,pn %%xcc, 1b\n"
5499 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
5500 : "memory");
5501 }
5502
5503 -static void inline arch_write_lock(arch_rwlock_t *lock)
5504 +static inline void arch_write_lock(arch_rwlock_t *lock)
5505 {
5506 unsigned long mask, tmp1, tmp2;
5507
5508 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
5509 : "memory");
5510 }
5511
5512 -static void inline arch_write_unlock(arch_rwlock_t *lock)
5513 +static inline void arch_write_unlock(arch_rwlock_t *lock)
5514 {
5515 __asm__ __volatile__(
5516 " stw %%g0, [%0]"
5517 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
5518 : "memory");
5519 }
5520
5521 -static int inline arch_write_trylock(arch_rwlock_t *lock)
5522 +static inline int arch_write_trylock(arch_rwlock_t *lock)
5523 {
5524 unsigned long mask, tmp1, tmp2, result;
5525
5526 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
5527 index c2a1080..21ed218 100644
5528 --- a/arch/sparc/include/asm/thread_info_32.h
5529 +++ b/arch/sparc/include/asm/thread_info_32.h
5530 @@ -50,6 +50,8 @@ struct thread_info {
5531 unsigned long w_saved;
5532
5533 struct restart_block restart_block;
5534 +
5535 + unsigned long lowest_stack;
5536 };
5537
5538 /*
5539 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
5540 index 01d057f..13a7d2f 100644
5541 --- a/arch/sparc/include/asm/thread_info_64.h
5542 +++ b/arch/sparc/include/asm/thread_info_64.h
5543 @@ -63,6 +63,8 @@ struct thread_info {
5544 struct pt_regs *kern_una_regs;
5545 unsigned int kern_una_insn;
5546
5547 + unsigned long lowest_stack;
5548 +
5549 unsigned long fpregs[0] __attribute__ ((aligned(64)));
5550 };
5551
5552 @@ -214,10 +216,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
5553 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
5554 /* flag bit 6 is available */
5555 #define TIF_32BIT 7 /* 32-bit binary */
5556 -/* flag bit 8 is available */
5557 +#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
5558 #define TIF_SECCOMP 9 /* secure computing */
5559 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
5560 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
5561 +
5562 /* NOTE: Thread flags >= 12 should be ones we have no interest
5563 * in using in assembly, else we can't use the mask as
5564 * an immediate value in instructions such as andcc.
5565 @@ -236,12 +239,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
5566 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
5567 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
5568 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
5569 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
5570
5571 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
5572 _TIF_DO_NOTIFY_RESUME_MASK | \
5573 _TIF_NEED_RESCHED)
5574 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
5575
5576 +#define _TIF_WORK_SYSCALL \
5577 + (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
5578 + _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
5579 +
5580 +
5581 /*
5582 * Thread-synchronous status.
5583 *
5584 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
5585 index e88fbe5..96b0ce5 100644
5586 --- a/arch/sparc/include/asm/uaccess.h
5587 +++ b/arch/sparc/include/asm/uaccess.h
5588 @@ -1,5 +1,13 @@
5589 #ifndef ___ASM_SPARC_UACCESS_H
5590 #define ___ASM_SPARC_UACCESS_H
5591 +
5592 +#ifdef __KERNEL__
5593 +#ifndef __ASSEMBLY__
5594 +#include <linux/types.h>
5595 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
5596 +#endif
5597 +#endif
5598 +
5599 #if defined(__sparc__) && defined(__arch64__)
5600 #include <asm/uaccess_64.h>
5601 #else
5602 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
5603 index 8303ac4..07f333d 100644
5604 --- a/arch/sparc/include/asm/uaccess_32.h
5605 +++ b/arch/sparc/include/asm/uaccess_32.h
5606 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
5607
5608 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
5609 {
5610 - if (n && __access_ok((unsigned long) to, n))
5611 + if ((long)n < 0)
5612 + return n;
5613 +
5614 + if (n && __access_ok((unsigned long) to, n)) {
5615 + if (!__builtin_constant_p(n))
5616 + check_object_size(from, n, true);
5617 return __copy_user(to, (__force void __user *) from, n);
5618 - else
5619 + } else
5620 return n;
5621 }
5622
5623 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
5624 {
5625 + if ((long)n < 0)
5626 + return n;
5627 +
5628 + if (!__builtin_constant_p(n))
5629 + check_object_size(from, n, true);
5630 +
5631 return __copy_user(to, (__force void __user *) from, n);
5632 }
5633
5634 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
5635 {
5636 - if (n && __access_ok((unsigned long) from, n))
5637 + if ((long)n < 0)
5638 + return n;
5639 +
5640 + if (n && __access_ok((unsigned long) from, n)) {
5641 + if (!__builtin_constant_p(n))
5642 + check_object_size(to, n, false);
5643 return __copy_user((__force void __user *) to, from, n);
5644 - else
5645 + } else
5646 return n;
5647 }
5648
5649 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
5650 {
5651 + if ((long)n < 0)
5652 + return n;
5653 +
5654 return __copy_user((__force void __user *) to, from, n);
5655 }
5656
5657 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
5658 index a1091afb..380228e 100644
5659 --- a/arch/sparc/include/asm/uaccess_64.h
5660 +++ b/arch/sparc/include/asm/uaccess_64.h
5661 @@ -10,6 +10,7 @@
5662 #include <linux/compiler.h>
5663 #include <linux/string.h>
5664 #include <linux/thread_info.h>
5665 +#include <linux/kernel.h>
5666 #include <asm/asi.h>
5667 #include <asm/spitfire.h>
5668 #include <asm-generic/uaccess-unaligned.h>
5669 @@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
5670 static inline unsigned long __must_check
5671 copy_from_user(void *to, const void __user *from, unsigned long size)
5672 {
5673 - unsigned long ret = ___copy_from_user(to, from, size);
5674 + unsigned long ret;
5675
5676 + if ((long)size < 0 || size > INT_MAX)
5677 + return size;
5678 +
5679 + if (!__builtin_constant_p(size))
5680 + check_object_size(to, size, false);
5681 +
5682 + ret = ___copy_from_user(to, from, size);
5683 if (unlikely(ret))
5684 ret = copy_from_user_fixup(to, from, size);
5685
5686 @@ -229,8 +237,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
5687 static inline unsigned long __must_check
5688 copy_to_user(void __user *to, const void *from, unsigned long size)
5689 {
5690 - unsigned long ret = ___copy_to_user(to, from, size);
5691 + unsigned long ret;
5692
5693 + if ((long)size < 0 || size > INT_MAX)
5694 + return size;
5695 +
5696 + if (!__builtin_constant_p(size))
5697 + check_object_size(from, size, true);
5698 +
5699 + ret = ___copy_to_user(to, from, size);
5700 if (unlikely(ret))
5701 ret = copy_to_user_fixup(to, from, size);
5702 return ret;
5703 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
5704 index cb85458..e063f17 100644
5705 --- a/arch/sparc/kernel/Makefile
5706 +++ b/arch/sparc/kernel/Makefile
5707 @@ -3,7 +3,7 @@
5708 #
5709
5710 asflags-y := -ansi
5711 -ccflags-y := -Werror
5712 +#ccflags-y := -Werror
5713
5714 extra-y := head_$(BITS).o
5715 extra-y += init_task.o
5716 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
5717 index efa0754..74b03fe 100644
5718 --- a/arch/sparc/kernel/process_32.c
5719 +++ b/arch/sparc/kernel/process_32.c
5720 @@ -200,7 +200,7 @@ void __show_backtrace(unsigned long fp)
5721 rw->ins[4], rw->ins[5],
5722 rw->ins[6],
5723 rw->ins[7]);
5724 - printk("%pS\n", (void *) rw->ins[7]);
5725 + printk("%pA\n", (void *) rw->ins[7]);
5726 rw = (struct reg_window32 *) rw->ins[6];
5727 }
5728 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
5729 @@ -267,14 +267,14 @@ void show_regs(struct pt_regs *r)
5730
5731 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
5732 r->psr, r->pc, r->npc, r->y, print_tainted());
5733 - printk("PC: <%pS>\n", (void *) r->pc);
5734 + printk("PC: <%pA>\n", (void *) r->pc);
5735 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5736 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
5737 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
5738 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5739 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
5740 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
5741 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
5742 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
5743
5744 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5745 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
5746 @@ -309,7 +309,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5747 rw = (struct reg_window32 *) fp;
5748 pc = rw->ins[7];
5749 printk("[%08lx : ", pc);
5750 - printk("%pS ] ", (void *) pc);
5751 + printk("%pA ] ", (void *) pc);
5752 fp = rw->ins[6];
5753 } while (++count < 16);
5754 printk("\n");
5755 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
5756 index aff0c72..9067b39 100644
5757 --- a/arch/sparc/kernel/process_64.c
5758 +++ b/arch/sparc/kernel/process_64.c
5759 @@ -179,14 +179,14 @@ static void show_regwindow(struct pt_regs *regs)
5760 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
5761 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
5762 if (regs->tstate & TSTATE_PRIV)
5763 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
5764 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
5765 }
5766
5767 void show_regs(struct pt_regs *regs)
5768 {
5769 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5770 regs->tpc, regs->tnpc, regs->y, print_tainted());
5771 - printk("TPC: <%pS>\n", (void *) regs->tpc);
5772 + printk("TPC: <%pA>\n", (void *) regs->tpc);
5773 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5774 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5775 regs->u_regs[3]);
5776 @@ -199,7 +199,7 @@ void show_regs(struct pt_regs *regs)
5777 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5778 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5779 regs->u_regs[15]);
5780 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5781 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5782 show_regwindow(regs);
5783 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
5784 }
5785 @@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void)
5786 ((tp && tp->task) ? tp->task->pid : -1));
5787
5788 if (gp->tstate & TSTATE_PRIV) {
5789 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5790 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5791 (void *) gp->tpc,
5792 (void *) gp->o7,
5793 (void *) gp->i7,
5794 diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
5795 index 6f97c07..b1300ec 100644
5796 --- a/arch/sparc/kernel/ptrace_64.c
5797 +++ b/arch/sparc/kernel/ptrace_64.c
5798 @@ -1057,6 +1057,10 @@ long arch_ptrace(struct task_struct *child, long request,
5799 return ret;
5800 }
5801
5802 +#ifdef CONFIG_GRKERNSEC_SETXID
5803 +extern void gr_delayed_cred_worker(void);
5804 +#endif
5805 +
5806 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5807 {
5808 int ret = 0;
5809 @@ -1064,6 +1068,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5810 /* do the secure computing check first */
5811 secure_computing(regs->u_regs[UREG_G1]);
5812
5813 +#ifdef CONFIG_GRKERNSEC_SETXID
5814 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5815 + gr_delayed_cred_worker();
5816 +#endif
5817 +
5818 if (test_thread_flag(TIF_SYSCALL_TRACE))
5819 ret = tracehook_report_syscall_entry(regs);
5820
5821 @@ -1084,6 +1093,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5822
5823 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
5824 {
5825 +#ifdef CONFIG_GRKERNSEC_SETXID
5826 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5827 + gr_delayed_cred_worker();
5828 +#endif
5829 +
5830 audit_syscall_exit(regs);
5831
5832 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
5833 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5834 index 42b282f..28ce9f2 100644
5835 --- a/arch/sparc/kernel/sys_sparc_32.c
5836 +++ b/arch/sparc/kernel/sys_sparc_32.c
5837 @@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5838 if (ARCH_SUN4C && len > 0x20000000)
5839 return -ENOMEM;
5840 if (!addr)
5841 - addr = TASK_UNMAPPED_BASE;
5842 + addr = current->mm->mmap_base;
5843
5844 if (flags & MAP_SHARED)
5845 addr = COLOUR_ALIGN(addr);
5846 @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5847 }
5848 if (TASK_SIZE - PAGE_SIZE - len < addr)
5849 return -ENOMEM;
5850 - if (!vmm || addr + len <= vmm->vm_start)
5851 + if (check_heap_stack_gap(vmm, addr, len))
5852 return addr;
5853 addr = vmm->vm_end;
5854 if (flags & MAP_SHARED)
5855 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5856 index 3ee51f1..2ba4913 100644
5857 --- a/arch/sparc/kernel/sys_sparc_64.c
5858 +++ b/arch/sparc/kernel/sys_sparc_64.c
5859 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5860 /* We do not accept a shared mapping if it would violate
5861 * cache aliasing constraints.
5862 */
5863 - if ((flags & MAP_SHARED) &&
5864 + if ((filp || (flags & MAP_SHARED)) &&
5865 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5866 return -EINVAL;
5867 return addr;
5868 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5869 if (filp || (flags & MAP_SHARED))
5870 do_color_align = 1;
5871
5872 +#ifdef CONFIG_PAX_RANDMMAP
5873 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5874 +#endif
5875 +
5876 if (addr) {
5877 if (do_color_align)
5878 addr = COLOUR_ALIGN(addr, pgoff);
5879 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5880 addr = PAGE_ALIGN(addr);
5881
5882 vma = find_vma(mm, addr);
5883 - if (task_size - len >= addr &&
5884 - (!vma || addr + len <= vma->vm_start))
5885 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5886 return addr;
5887 }
5888
5889 if (len > mm->cached_hole_size) {
5890 - start_addr = addr = mm->free_area_cache;
5891 + start_addr = addr = mm->free_area_cache;
5892 } else {
5893 - start_addr = addr = TASK_UNMAPPED_BASE;
5894 + start_addr = addr = mm->mmap_base;
5895 mm->cached_hole_size = 0;
5896 }
5897
5898 @@ -174,14 +177,14 @@ full_search:
5899 vma = find_vma(mm, VA_EXCLUDE_END);
5900 }
5901 if (unlikely(task_size < addr)) {
5902 - if (start_addr != TASK_UNMAPPED_BASE) {
5903 - start_addr = addr = TASK_UNMAPPED_BASE;
5904 + if (start_addr != mm->mmap_base) {
5905 + start_addr = addr = mm->mmap_base;
5906 mm->cached_hole_size = 0;
5907 goto full_search;
5908 }
5909 return -ENOMEM;
5910 }
5911 - if (likely(!vma || addr + len <= vma->vm_start)) {
5912 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5913 /*
5914 * Remember the place where we stopped the search:
5915 */
5916 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5917 /* We do not accept a shared mapping if it would violate
5918 * cache aliasing constraints.
5919 */
5920 - if ((flags & MAP_SHARED) &&
5921 + if ((filp || (flags & MAP_SHARED)) &&
5922 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5923 return -EINVAL;
5924 return addr;
5925 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5926 addr = PAGE_ALIGN(addr);
5927
5928 vma = find_vma(mm, addr);
5929 - if (task_size - len >= addr &&
5930 - (!vma || addr + len <= vma->vm_start))
5931 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5932 return addr;
5933 }
5934
5935 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5936 /* make sure it can fit in the remaining address space */
5937 if (likely(addr > len)) {
5938 vma = find_vma(mm, addr-len);
5939 - if (!vma || addr <= vma->vm_start) {
5940 + if (check_heap_stack_gap(vma, addr - len, len)) {
5941 /* remember the address as a hint for next time */
5942 return (mm->free_area_cache = addr-len);
5943 }
5944 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5945 if (unlikely(mm->mmap_base < len))
5946 goto bottomup;
5947
5948 - addr = mm->mmap_base-len;
5949 - if (do_color_align)
5950 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5951 + addr = mm->mmap_base - len;
5952
5953 do {
5954 + if (do_color_align)
5955 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5956 /*
5957 * Lookup failure means no vma is above this address,
5958 * else if new region fits below vma->vm_start,
5959 * return with success:
5960 */
5961 vma = find_vma(mm, addr);
5962 - if (likely(!vma || addr+len <= vma->vm_start)) {
5963 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5964 /* remember the address as a hint for next time */
5965 return (mm->free_area_cache = addr);
5966 }
5967 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5968 mm->cached_hole_size = vma->vm_start - addr;
5969
5970 /* try just below the current vma->vm_start */
5971 - addr = vma->vm_start-len;
5972 - if (do_color_align)
5973 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5974 - } while (likely(len < vma->vm_start));
5975 + addr = skip_heap_stack_gap(vma, len);
5976 + } while (!IS_ERR_VALUE(addr));
5977
5978 bottomup:
5979 /*
5980 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5981 gap == RLIM_INFINITY ||
5982 sysctl_legacy_va_layout) {
5983 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5984 +
5985 +#ifdef CONFIG_PAX_RANDMMAP
5986 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5987 + mm->mmap_base += mm->delta_mmap;
5988 +#endif
5989 +
5990 mm->get_unmapped_area = arch_get_unmapped_area;
5991 mm->unmap_area = arch_unmap_area;
5992 } else {
5993 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5994 gap = (task_size / 6 * 5);
5995
5996 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
5997 +
5998 +#ifdef CONFIG_PAX_RANDMMAP
5999 + if (mm->pax_flags & MF_PAX_RANDMMAP)
6000 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
6001 +#endif
6002 +
6003 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
6004 mm->unmap_area = arch_unmap_area_topdown;
6005 }
6006 diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
6007 index 1d7e274..b39c527 100644
6008 --- a/arch/sparc/kernel/syscalls.S
6009 +++ b/arch/sparc/kernel/syscalls.S
6010 @@ -62,7 +62,7 @@ sys32_rt_sigreturn:
6011 #endif
6012 .align 32
6013 1: ldx [%g6 + TI_FLAGS], %l5
6014 - andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
6015 + andcc %l5, _TIF_WORK_SYSCALL, %g0
6016 be,pt %icc, rtrap
6017 nop
6018 call syscall_trace_leave
6019 @@ -179,7 +179,7 @@ linux_sparc_syscall32:
6020
6021 srl %i5, 0, %o5 ! IEU1
6022 srl %i2, 0, %o2 ! IEU0 Group
6023 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
6024 + andcc %l0, _TIF_WORK_SYSCALL, %g0
6025 bne,pn %icc, linux_syscall_trace32 ! CTI
6026 mov %i0, %l5 ! IEU1
6027 call %l7 ! CTI Group brk forced
6028 @@ -202,7 +202,7 @@ linux_sparc_syscall:
6029
6030 mov %i3, %o3 ! IEU1
6031 mov %i4, %o4 ! IEU0 Group
6032 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
6033 + andcc %l0, _TIF_WORK_SYSCALL, %g0
6034 bne,pn %icc, linux_syscall_trace ! CTI Group
6035 mov %i0, %l5 ! IEU0
6036 2: call %l7 ! CTI Group brk forced
6037 @@ -226,7 +226,7 @@ ret_sys_call:
6038
6039 cmp %o0, -ERESTART_RESTARTBLOCK
6040 bgeu,pn %xcc, 1f
6041 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
6042 + andcc %l0, _TIF_WORK_SYSCALL, %l6
6043 80:
6044 /* System call success, clear Carry condition code. */
6045 andn %g3, %g2, %g3
6046 @@ -241,7 +241,7 @@ ret_sys_call:
6047 /* System call failure, set Carry condition code.
6048 * Also, get abs(errno) to return to the process.
6049 */
6050 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
6051 + andcc %l0, _TIF_WORK_SYSCALL, %l6
6052 sub %g0, %o0, %o0
6053 or %g3, %g2, %g3
6054 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
6055 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
6056 index d2de213..6b22bc3 100644
6057 --- a/arch/sparc/kernel/traps_32.c
6058 +++ b/arch/sparc/kernel/traps_32.c
6059 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
6060 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
6061 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
6062
6063 +extern void gr_handle_kernel_exploit(void);
6064 +
6065 void die_if_kernel(char *str, struct pt_regs *regs)
6066 {
6067 static int die_counter;
6068 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6069 count++ < 30 &&
6070 (((unsigned long) rw) >= PAGE_OFFSET) &&
6071 !(((unsigned long) rw) & 0x7)) {
6072 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
6073 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
6074 (void *) rw->ins[7]);
6075 rw = (struct reg_window32 *)rw->ins[6];
6076 }
6077 }
6078 printk("Instruction DUMP:");
6079 instruction_dump ((unsigned long *) regs->pc);
6080 - if(regs->psr & PSR_PS)
6081 + if(regs->psr & PSR_PS) {
6082 + gr_handle_kernel_exploit();
6083 do_exit(SIGKILL);
6084 + }
6085 do_exit(SIGSEGV);
6086 }
6087
6088 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
6089 index c72fdf5..743a344 100644
6090 --- a/arch/sparc/kernel/traps_64.c
6091 +++ b/arch/sparc/kernel/traps_64.c
6092 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
6093 i + 1,
6094 p->trapstack[i].tstate, p->trapstack[i].tpc,
6095 p->trapstack[i].tnpc, p->trapstack[i].tt);
6096 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
6097 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
6098 }
6099 }
6100
6101 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
6102
6103 lvl -= 0x100;
6104 if (regs->tstate & TSTATE_PRIV) {
6105 +
6106 +#ifdef CONFIG_PAX_REFCOUNT
6107 + if (lvl == 6)
6108 + pax_report_refcount_overflow(regs);
6109 +#endif
6110 +
6111 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
6112 die_if_kernel(buffer, regs);
6113 }
6114 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
6115 void bad_trap_tl1(struct pt_regs *regs, long lvl)
6116 {
6117 char buffer[32];
6118 -
6119 +
6120 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
6121 0, lvl, SIGTRAP) == NOTIFY_STOP)
6122 return;
6123
6124 +#ifdef CONFIG_PAX_REFCOUNT
6125 + if (lvl == 6)
6126 + pax_report_refcount_overflow(regs);
6127 +#endif
6128 +
6129 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
6130
6131 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
6132 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
6133 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
6134 printk("%s" "ERROR(%d): ",
6135 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
6136 - printk("TPC<%pS>\n", (void *) regs->tpc);
6137 + printk("TPC<%pA>\n", (void *) regs->tpc);
6138 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
6139 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
6140 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
6141 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6142 smp_processor_id(),
6143 (type & 0x1) ? 'I' : 'D',
6144 regs->tpc);
6145 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
6146 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
6147 panic("Irrecoverable Cheetah+ parity error.");
6148 }
6149
6150 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
6151 smp_processor_id(),
6152 (type & 0x1) ? 'I' : 'D',
6153 regs->tpc);
6154 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
6155 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
6156 }
6157
6158 struct sun4v_error_entry {
6159 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
6160
6161 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
6162 regs->tpc, tl);
6163 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
6164 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
6165 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6166 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
6167 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
6168 (void *) regs->u_regs[UREG_I7]);
6169 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
6170 "pte[%lx] error[%lx]\n",
6171 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
6172
6173 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
6174 regs->tpc, tl);
6175 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
6176 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
6177 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6178 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
6179 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
6180 (void *) regs->u_regs[UREG_I7]);
6181 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
6182 "pte[%lx] error[%lx]\n",
6183 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
6184 fp = (unsigned long)sf->fp + STACK_BIAS;
6185 }
6186
6187 - printk(" [%016lx] %pS\n", pc, (void *) pc);
6188 + printk(" [%016lx] %pA\n", pc, (void *) pc);
6189 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6190 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
6191 int index = tsk->curr_ret_stack;
6192 if (tsk->ret_stack && index >= graph) {
6193 pc = tsk->ret_stack[index - graph].ret;
6194 - printk(" [%016lx] %pS\n", pc, (void *) pc);
6195 + printk(" [%016lx] %pA\n", pc, (void *) pc);
6196 graph++;
6197 }
6198 }
6199 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
6200 return (struct reg_window *) (fp + STACK_BIAS);
6201 }
6202
6203 +extern void gr_handle_kernel_exploit(void);
6204 +
6205 void die_if_kernel(char *str, struct pt_regs *regs)
6206 {
6207 static int die_counter;
6208 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6209 while (rw &&
6210 count++ < 30 &&
6211 kstack_valid(tp, (unsigned long) rw)) {
6212 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
6213 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
6214 (void *) rw->ins[7]);
6215
6216 rw = kernel_stack_up(rw);
6217 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
6218 }
6219 user_instruction_dump ((unsigned int __user *) regs->tpc);
6220 }
6221 - if (regs->tstate & TSTATE_PRIV)
6222 + if (regs->tstate & TSTATE_PRIV) {
6223 + gr_handle_kernel_exploit();
6224 do_exit(SIGKILL);
6225 + }
6226 do_exit(SIGSEGV);
6227 }
6228 EXPORT_SYMBOL(die_if_kernel);
6229 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
6230 index dae85bc..af1e19d 100644
6231 --- a/arch/sparc/kernel/unaligned_64.c
6232 +++ b/arch/sparc/kernel/unaligned_64.c
6233 @@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
6234 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
6235
6236 if (__ratelimit(&ratelimit)) {
6237 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
6238 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
6239 regs->tpc, (void *) regs->tpc);
6240 }
6241 }
6242 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
6243 index a3fc437..fea9957 100644
6244 --- a/arch/sparc/lib/Makefile
6245 +++ b/arch/sparc/lib/Makefile
6246 @@ -2,7 +2,7 @@
6247 #
6248
6249 asflags-y := -ansi -DST_DIV0=0x02
6250 -ccflags-y := -Werror
6251 +#ccflags-y := -Werror
6252
6253 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
6254 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
6255 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
6256 index 59186e0..f747d7a 100644
6257 --- a/arch/sparc/lib/atomic_64.S
6258 +++ b/arch/sparc/lib/atomic_64.S
6259 @@ -18,7 +18,12 @@
6260 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
6261 BACKOFF_SETUP(%o2)
6262 1: lduw [%o1], %g1
6263 - add %g1, %o0, %g7
6264 + addcc %g1, %o0, %g7
6265 +
6266 +#ifdef CONFIG_PAX_REFCOUNT
6267 + tvs %icc, 6
6268 +#endif
6269 +
6270 cas [%o1], %g1, %g7
6271 cmp %g1, %g7
6272 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6273 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
6274 2: BACKOFF_SPIN(%o2, %o3, 1b)
6275 .size atomic_add, .-atomic_add
6276
6277 + .globl atomic_add_unchecked
6278 + .type atomic_add_unchecked,#function
6279 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6280 + BACKOFF_SETUP(%o2)
6281 +1: lduw [%o1], %g1
6282 + add %g1, %o0, %g7
6283 + cas [%o1], %g1, %g7
6284 + cmp %g1, %g7
6285 + bne,pn %icc, 2f
6286 + nop
6287 + retl
6288 + nop
6289 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6290 + .size atomic_add_unchecked, .-atomic_add_unchecked
6291 +
6292 .globl atomic_sub
6293 .type atomic_sub,#function
6294 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6295 BACKOFF_SETUP(%o2)
6296 1: lduw [%o1], %g1
6297 - sub %g1, %o0, %g7
6298 + subcc %g1, %o0, %g7
6299 +
6300 +#ifdef CONFIG_PAX_REFCOUNT
6301 + tvs %icc, 6
6302 +#endif
6303 +
6304 cas [%o1], %g1, %g7
6305 cmp %g1, %g7
6306 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6307 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6308 2: BACKOFF_SPIN(%o2, %o3, 1b)
6309 .size atomic_sub, .-atomic_sub
6310
6311 + .globl atomic_sub_unchecked
6312 + .type atomic_sub_unchecked,#function
6313 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
6314 + BACKOFF_SETUP(%o2)
6315 +1: lduw [%o1], %g1
6316 + sub %g1, %o0, %g7
6317 + cas [%o1], %g1, %g7
6318 + cmp %g1, %g7
6319 + bne,pn %icc, 2f
6320 + nop
6321 + retl
6322 + nop
6323 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6324 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
6325 +
6326 .globl atomic_add_ret
6327 .type atomic_add_ret,#function
6328 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6329 BACKOFF_SETUP(%o2)
6330 1: lduw [%o1], %g1
6331 - add %g1, %o0, %g7
6332 + addcc %g1, %o0, %g7
6333 +
6334 +#ifdef CONFIG_PAX_REFCOUNT
6335 + tvs %icc, 6
6336 +#endif
6337 +
6338 cas [%o1], %g1, %g7
6339 cmp %g1, %g7
6340 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6341 @@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6342 2: BACKOFF_SPIN(%o2, %o3, 1b)
6343 .size atomic_add_ret, .-atomic_add_ret
6344
6345 + .globl atomic_add_ret_unchecked
6346 + .type atomic_add_ret_unchecked,#function
6347 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6348 + BACKOFF_SETUP(%o2)
6349 +1: lduw [%o1], %g1
6350 + addcc %g1, %o0, %g7
6351 + cas [%o1], %g1, %g7
6352 + cmp %g1, %g7
6353 + bne,pn %icc, 2f
6354 + add %g7, %o0, %g7
6355 + sra %g7, 0, %o0
6356 + retl
6357 + nop
6358 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6359 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
6360 +
6361 .globl atomic_sub_ret
6362 .type atomic_sub_ret,#function
6363 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6364 BACKOFF_SETUP(%o2)
6365 1: lduw [%o1], %g1
6366 - sub %g1, %o0, %g7
6367 + subcc %g1, %o0, %g7
6368 +
6369 +#ifdef CONFIG_PAX_REFCOUNT
6370 + tvs %icc, 6
6371 +#endif
6372 +
6373 cas [%o1], %g1, %g7
6374 cmp %g1, %g7
6375 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
6376 @@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6377 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
6378 BACKOFF_SETUP(%o2)
6379 1: ldx [%o1], %g1
6380 - add %g1, %o0, %g7
6381 + addcc %g1, %o0, %g7
6382 +
6383 +#ifdef CONFIG_PAX_REFCOUNT
6384 + tvs %xcc, 6
6385 +#endif
6386 +
6387 casx [%o1], %g1, %g7
6388 cmp %g1, %g7
6389 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6390 @@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
6391 2: BACKOFF_SPIN(%o2, %o3, 1b)
6392 .size atomic64_add, .-atomic64_add
6393
6394 + .globl atomic64_add_unchecked
6395 + .type atomic64_add_unchecked,#function
6396 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6397 + BACKOFF_SETUP(%o2)
6398 +1: ldx [%o1], %g1
6399 + addcc %g1, %o0, %g7
6400 + casx [%o1], %g1, %g7
6401 + cmp %g1, %g7
6402 + bne,pn %xcc, 2f
6403 + nop
6404 + retl
6405 + nop
6406 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6407 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
6408 +
6409 .globl atomic64_sub
6410 .type atomic64_sub,#function
6411 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6412 BACKOFF_SETUP(%o2)
6413 1: ldx [%o1], %g1
6414 - sub %g1, %o0, %g7
6415 + subcc %g1, %o0, %g7
6416 +
6417 +#ifdef CONFIG_PAX_REFCOUNT
6418 + tvs %xcc, 6
6419 +#endif
6420 +
6421 casx [%o1], %g1, %g7
6422 cmp %g1, %g7
6423 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6424 @@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6425 2: BACKOFF_SPIN(%o2, %o3, 1b)
6426 .size atomic64_sub, .-atomic64_sub
6427
6428 + .globl atomic64_sub_unchecked
6429 + .type atomic64_sub_unchecked,#function
6430 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
6431 + BACKOFF_SETUP(%o2)
6432 +1: ldx [%o1], %g1
6433 + subcc %g1, %o0, %g7
6434 + casx [%o1], %g1, %g7
6435 + cmp %g1, %g7
6436 + bne,pn %xcc, 2f
6437 + nop
6438 + retl
6439 + nop
6440 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6441 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
6442 +
6443 .globl atomic64_add_ret
6444 .type atomic64_add_ret,#function
6445 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6446 BACKOFF_SETUP(%o2)
6447 1: ldx [%o1], %g1
6448 - add %g1, %o0, %g7
6449 + addcc %g1, %o0, %g7
6450 +
6451 +#ifdef CONFIG_PAX_REFCOUNT
6452 + tvs %xcc, 6
6453 +#endif
6454 +
6455 casx [%o1], %g1, %g7
6456 cmp %g1, %g7
6457 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6458 @@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6459 2: BACKOFF_SPIN(%o2, %o3, 1b)
6460 .size atomic64_add_ret, .-atomic64_add_ret
6461
6462 + .globl atomic64_add_ret_unchecked
6463 + .type atomic64_add_ret_unchecked,#function
6464 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6465 + BACKOFF_SETUP(%o2)
6466 +1: ldx [%o1], %g1
6467 + addcc %g1, %o0, %g7
6468 + casx [%o1], %g1, %g7
6469 + cmp %g1, %g7
6470 + bne,pn %xcc, 2f
6471 + add %g7, %o0, %g7
6472 + mov %g7, %o0
6473 + retl
6474 + nop
6475 +2: BACKOFF_SPIN(%o2, %o3, 1b)
6476 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
6477 +
6478 .globl atomic64_sub_ret
6479 .type atomic64_sub_ret,#function
6480 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6481 BACKOFF_SETUP(%o2)
6482 1: ldx [%o1], %g1
6483 - sub %g1, %o0, %g7
6484 + subcc %g1, %o0, %g7
6485 +
6486 +#ifdef CONFIG_PAX_REFCOUNT
6487 + tvs %xcc, 6
6488 +#endif
6489 +
6490 casx [%o1], %g1, %g7
6491 cmp %g1, %g7
6492 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
6493 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
6494 index f73c224..662af10 100644
6495 --- a/arch/sparc/lib/ksyms.c
6496 +++ b/arch/sparc/lib/ksyms.c
6497 @@ -136,12 +136,18 @@ EXPORT_SYMBOL(__downgrade_write);
6498
6499 /* Atomic counter implementation. */
6500 EXPORT_SYMBOL(atomic_add);
6501 +EXPORT_SYMBOL(atomic_add_unchecked);
6502 EXPORT_SYMBOL(atomic_add_ret);
6503 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
6504 EXPORT_SYMBOL(atomic_sub);
6505 +EXPORT_SYMBOL(atomic_sub_unchecked);
6506 EXPORT_SYMBOL(atomic_sub_ret);
6507 EXPORT_SYMBOL(atomic64_add);
6508 +EXPORT_SYMBOL(atomic64_add_unchecked);
6509 EXPORT_SYMBOL(atomic64_add_ret);
6510 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
6511 EXPORT_SYMBOL(atomic64_sub);
6512 +EXPORT_SYMBOL(atomic64_sub_unchecked);
6513 EXPORT_SYMBOL(atomic64_sub_ret);
6514
6515 /* Atomic bit operations. */
6516 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
6517 index 301421c..e2535d1 100644
6518 --- a/arch/sparc/mm/Makefile
6519 +++ b/arch/sparc/mm/Makefile
6520 @@ -2,7 +2,7 @@
6521 #
6522
6523 asflags-y := -ansi
6524 -ccflags-y := -Werror
6525 +#ccflags-y := -Werror
6526
6527 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
6528 obj-y += fault_$(BITS).o
6529 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
6530 index df3155a..eb708b8 100644
6531 --- a/arch/sparc/mm/fault_32.c
6532 +++ b/arch/sparc/mm/fault_32.c
6533 @@ -21,6 +21,9 @@
6534 #include <linux/perf_event.h>
6535 #include <linux/interrupt.h>
6536 #include <linux/kdebug.h>
6537 +#include <linux/slab.h>
6538 +#include <linux/pagemap.h>
6539 +#include <linux/compiler.h>
6540
6541 #include <asm/page.h>
6542 #include <asm/pgtable.h>
6543 @@ -207,6 +210,268 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
6544 return safe_compute_effective_address(regs, insn);
6545 }
6546
6547 +#ifdef CONFIG_PAX_PAGEEXEC
6548 +#ifdef CONFIG_PAX_DLRESOLVE
6549 +static void pax_emuplt_close(struct vm_area_struct *vma)
6550 +{
6551 + vma->vm_mm->call_dl_resolve = 0UL;
6552 +}
6553 +
6554 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6555 +{
6556 + unsigned int *kaddr;
6557 +
6558 + vmf->page = alloc_page(GFP_HIGHUSER);
6559 + if (!vmf->page)
6560 + return VM_FAULT_OOM;
6561 +
6562 + kaddr = kmap(vmf->page);
6563 + memset(kaddr, 0, PAGE_SIZE);
6564 + kaddr[0] = 0x9DE3BFA8U; /* save */
6565 + flush_dcache_page(vmf->page);
6566 + kunmap(vmf->page);
6567 + return VM_FAULT_MAJOR;
6568 +}
6569 +
6570 +static const struct vm_operations_struct pax_vm_ops = {
6571 + .close = pax_emuplt_close,
6572 + .fault = pax_emuplt_fault
6573 +};
6574 +
6575 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6576 +{
6577 + int ret;
6578 +
6579 + INIT_LIST_HEAD(&vma->anon_vma_chain);
6580 + vma->vm_mm = current->mm;
6581 + vma->vm_start = addr;
6582 + vma->vm_end = addr + PAGE_SIZE;
6583 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6584 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6585 + vma->vm_ops = &pax_vm_ops;
6586 +
6587 + ret = insert_vm_struct(current->mm, vma);
6588 + if (ret)
6589 + return ret;
6590 +
6591 + ++current->mm->total_vm;
6592 + return 0;
6593 +}
6594 +#endif
6595 +
6596 +/*
6597 + * PaX: decide what to do with offenders (regs->pc = fault address)
6598 + *
6599 + * returns 1 when task should be killed
6600 + * 2 when patched PLT trampoline was detected
6601 + * 3 when unpatched PLT trampoline was detected
6602 + */
6603 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6604 +{
6605 +
6606 +#ifdef CONFIG_PAX_EMUPLT
6607 + int err;
6608 +
6609 + do { /* PaX: patched PLT emulation #1 */
6610 + unsigned int sethi1, sethi2, jmpl;
6611 +
6612 + err = get_user(sethi1, (unsigned int *)regs->pc);
6613 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
6614 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
6615 +
6616 + if (err)
6617 + break;
6618 +
6619 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6620 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6621 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6622 + {
6623 + unsigned int addr;
6624 +
6625 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6626 + addr = regs->u_regs[UREG_G1];
6627 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6628 + regs->pc = addr;
6629 + regs->npc = addr+4;
6630 + return 2;
6631 + }
6632 + } while (0);
6633 +
6634 + { /* PaX: patched PLT emulation #2 */
6635 + unsigned int ba;
6636 +
6637 + err = get_user(ba, (unsigned int *)regs->pc);
6638 +
6639 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6640 + unsigned int addr;
6641 +
6642 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6643 + regs->pc = addr;
6644 + regs->npc = addr+4;
6645 + return 2;
6646 + }
6647 + }
6648 +
6649 + do { /* PaX: patched PLT emulation #3 */
6650 + unsigned int sethi, jmpl, nop;
6651 +
6652 + err = get_user(sethi, (unsigned int *)regs->pc);
6653 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
6654 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6655 +
6656 + if (err)
6657 + break;
6658 +
6659 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6660 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6661 + nop == 0x01000000U)
6662 + {
6663 + unsigned int addr;
6664 +
6665 + addr = (sethi & 0x003FFFFFU) << 10;
6666 + regs->u_regs[UREG_G1] = addr;
6667 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6668 + regs->pc = addr;
6669 + regs->npc = addr+4;
6670 + return 2;
6671 + }
6672 + } while (0);
6673 +
6674 + do { /* PaX: unpatched PLT emulation step 1 */
6675 + unsigned int sethi, ba, nop;
6676 +
6677 + err = get_user(sethi, (unsigned int *)regs->pc);
6678 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
6679 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
6680 +
6681 + if (err)
6682 + break;
6683 +
6684 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6685 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6686 + nop == 0x01000000U)
6687 + {
6688 + unsigned int addr, save, call;
6689 +
6690 + if ((ba & 0xFFC00000U) == 0x30800000U)
6691 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6692 + else
6693 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
6694 +
6695 + err = get_user(save, (unsigned int *)addr);
6696 + err |= get_user(call, (unsigned int *)(addr+4));
6697 + err |= get_user(nop, (unsigned int *)(addr+8));
6698 + if (err)
6699 + break;
6700 +
6701 +#ifdef CONFIG_PAX_DLRESOLVE
6702 + if (save == 0x9DE3BFA8U &&
6703 + (call & 0xC0000000U) == 0x40000000U &&
6704 + nop == 0x01000000U)
6705 + {
6706 + struct vm_area_struct *vma;
6707 + unsigned long call_dl_resolve;
6708 +
6709 + down_read(&current->mm->mmap_sem);
6710 + call_dl_resolve = current->mm->call_dl_resolve;
6711 + up_read(&current->mm->mmap_sem);
6712 + if (likely(call_dl_resolve))
6713 + goto emulate;
6714 +
6715 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6716 +
6717 + down_write(&current->mm->mmap_sem);
6718 + if (current->mm->call_dl_resolve) {
6719 + call_dl_resolve = current->mm->call_dl_resolve;
6720 + up_write(&current->mm->mmap_sem);
6721 + if (vma)
6722 + kmem_cache_free(vm_area_cachep, vma);
6723 + goto emulate;
6724 + }
6725 +
6726 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6727 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6728 + up_write(&current->mm->mmap_sem);
6729 + if (vma)
6730 + kmem_cache_free(vm_area_cachep, vma);
6731 + return 1;
6732 + }
6733 +
6734 + if (pax_insert_vma(vma, call_dl_resolve)) {
6735 + up_write(&current->mm->mmap_sem);
6736 + kmem_cache_free(vm_area_cachep, vma);
6737 + return 1;
6738 + }
6739 +
6740 + current->mm->call_dl_resolve = call_dl_resolve;
6741 + up_write(&current->mm->mmap_sem);
6742 +
6743 +emulate:
6744 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6745 + regs->pc = call_dl_resolve;
6746 + regs->npc = addr+4;
6747 + return 3;
6748 + }
6749 +#endif
6750 +
6751 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6752 + if ((save & 0xFFC00000U) == 0x05000000U &&
6753 + (call & 0xFFFFE000U) == 0x85C0A000U &&
6754 + nop == 0x01000000U)
6755 + {
6756 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6757 + regs->u_regs[UREG_G2] = addr + 4;
6758 + addr = (save & 0x003FFFFFU) << 10;
6759 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6760 + regs->pc = addr;
6761 + regs->npc = addr+4;
6762 + return 3;
6763 + }
6764 + }
6765 + } while (0);
6766 +
6767 + do { /* PaX: unpatched PLT emulation step 2 */
6768 + unsigned int save, call, nop;
6769 +
6770 + err = get_user(save, (unsigned int *)(regs->pc-4));
6771 + err |= get_user(call, (unsigned int *)regs->pc);
6772 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
6773 + if (err)
6774 + break;
6775 +
6776 + if (save == 0x9DE3BFA8U &&
6777 + (call & 0xC0000000U) == 0x40000000U &&
6778 + nop == 0x01000000U)
6779 + {
6780 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6781 +
6782 + regs->u_regs[UREG_RETPC] = regs->pc;
6783 + regs->pc = dl_resolve;
6784 + regs->npc = dl_resolve+4;
6785 + return 3;
6786 + }
6787 + } while (0);
6788 +#endif
6789 +
6790 + return 1;
6791 +}
6792 +
6793 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6794 +{
6795 + unsigned long i;
6796 +
6797 + printk(KERN_ERR "PAX: bytes at PC: ");
6798 + for (i = 0; i < 8; i++) {
6799 + unsigned int c;
6800 + if (get_user(c, (unsigned int *)pc+i))
6801 + printk(KERN_CONT "???????? ");
6802 + else
6803 + printk(KERN_CONT "%08x ", c);
6804 + }
6805 + printk("\n");
6806 +}
6807 +#endif
6808 +
6809 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
6810 int text_fault)
6811 {
6812 @@ -282,6 +547,24 @@ good_area:
6813 if(!(vma->vm_flags & VM_WRITE))
6814 goto bad_area;
6815 } else {
6816 +
6817 +#ifdef CONFIG_PAX_PAGEEXEC
6818 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6819 + up_read(&mm->mmap_sem);
6820 + switch (pax_handle_fetch_fault(regs)) {
6821 +
6822 +#ifdef CONFIG_PAX_EMUPLT
6823 + case 2:
6824 + case 3:
6825 + return;
6826 +#endif
6827 +
6828 + }
6829 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6830 + do_group_exit(SIGKILL);
6831 + }
6832 +#endif
6833 +
6834 /* Allow reads even for write-only mappings */
6835 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
6836 goto bad_area;
6837 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6838 index 1fe0429..aee2e87 100644
6839 --- a/arch/sparc/mm/fault_64.c
6840 +++ b/arch/sparc/mm/fault_64.c
6841 @@ -21,6 +21,9 @@
6842 #include <linux/kprobes.h>
6843 #include <linux/kdebug.h>
6844 #include <linux/percpu.h>
6845 +#include <linux/slab.h>
6846 +#include <linux/pagemap.h>
6847 +#include <linux/compiler.h>
6848
6849 #include <asm/page.h>
6850 #include <asm/pgtable.h>
6851 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
6852 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6853 regs->tpc);
6854 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6855 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6856 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6857 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6858 dump_stack();
6859 unhandled_fault(regs->tpc, current, regs);
6860 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
6861 show_regs(regs);
6862 }
6863
6864 +#ifdef CONFIG_PAX_PAGEEXEC
6865 +#ifdef CONFIG_PAX_DLRESOLVE
6866 +static void pax_emuplt_close(struct vm_area_struct *vma)
6867 +{
6868 + vma->vm_mm->call_dl_resolve = 0UL;
6869 +}
6870 +
6871 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6872 +{
6873 + unsigned int *kaddr;
6874 +
6875 + vmf->page = alloc_page(GFP_HIGHUSER);
6876 + if (!vmf->page)
6877 + return VM_FAULT_OOM;
6878 +
6879 + kaddr = kmap(vmf->page);
6880 + memset(kaddr, 0, PAGE_SIZE);
6881 + kaddr[0] = 0x9DE3BFA8U; /* save */
6882 + flush_dcache_page(vmf->page);
6883 + kunmap(vmf->page);
6884 + return VM_FAULT_MAJOR;
6885 +}
6886 +
6887 +static const struct vm_operations_struct pax_vm_ops = {
6888 + .close = pax_emuplt_close,
6889 + .fault = pax_emuplt_fault
6890 +};
6891 +
6892 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6893 +{
6894 + int ret;
6895 +
6896 + INIT_LIST_HEAD(&vma->anon_vma_chain);
6897 + vma->vm_mm = current->mm;
6898 + vma->vm_start = addr;
6899 + vma->vm_end = addr + PAGE_SIZE;
6900 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6901 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6902 + vma->vm_ops = &pax_vm_ops;
6903 +
6904 + ret = insert_vm_struct(current->mm, vma);
6905 + if (ret)
6906 + return ret;
6907 +
6908 + ++current->mm->total_vm;
6909 + return 0;
6910 +}
6911 +#endif
6912 +
6913 +/*
6914 + * PaX: decide what to do with offenders (regs->tpc = fault address)
6915 + *
6916 + * returns 1 when task should be killed
6917 + * 2 when patched PLT trampoline was detected
6918 + * 3 when unpatched PLT trampoline was detected
6919 + */
6920 +static int pax_handle_fetch_fault(struct pt_regs *regs)
6921 +{
6922 +
6923 +#ifdef CONFIG_PAX_EMUPLT
6924 + int err;
6925 +
6926 + do { /* PaX: patched PLT emulation #1 */
6927 + unsigned int sethi1, sethi2, jmpl;
6928 +
6929 + err = get_user(sethi1, (unsigned int *)regs->tpc);
6930 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6931 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6932 +
6933 + if (err)
6934 + break;
6935 +
6936 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6937 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
6938 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
6939 + {
6940 + unsigned long addr;
6941 +
6942 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6943 + addr = regs->u_regs[UREG_G1];
6944 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6945 +
6946 + if (test_thread_flag(TIF_32BIT))
6947 + addr &= 0xFFFFFFFFUL;
6948 +
6949 + regs->tpc = addr;
6950 + regs->tnpc = addr+4;
6951 + return 2;
6952 + }
6953 + } while (0);
6954 +
6955 + { /* PaX: patched PLT emulation #2 */
6956 + unsigned int ba;
6957 +
6958 + err = get_user(ba, (unsigned int *)regs->tpc);
6959 +
6960 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6961 + unsigned long addr;
6962 +
6963 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6964 +
6965 + if (test_thread_flag(TIF_32BIT))
6966 + addr &= 0xFFFFFFFFUL;
6967 +
6968 + regs->tpc = addr;
6969 + regs->tnpc = addr+4;
6970 + return 2;
6971 + }
6972 + }
6973 +
6974 + do { /* PaX: patched PLT emulation #3 */
6975 + unsigned int sethi, jmpl, nop;
6976 +
6977 + err = get_user(sethi, (unsigned int *)regs->tpc);
6978 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
6979 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6980 +
6981 + if (err)
6982 + break;
6983 +
6984 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
6985 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6986 + nop == 0x01000000U)
6987 + {
6988 + unsigned long addr;
6989 +
6990 + addr = (sethi & 0x003FFFFFU) << 10;
6991 + regs->u_regs[UREG_G1] = addr;
6992 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6993 +
6994 + if (test_thread_flag(TIF_32BIT))
6995 + addr &= 0xFFFFFFFFUL;
6996 +
6997 + regs->tpc = addr;
6998 + regs->tnpc = addr+4;
6999 + return 2;
7000 + }
7001 + } while (0);
7002 +
7003 + do { /* PaX: patched PLT emulation #4 */
7004 + unsigned int sethi, mov1, call, mov2;
7005 +
7006 + err = get_user(sethi, (unsigned int *)regs->tpc);
7007 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
7008 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
7009 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
7010 +
7011 + if (err)
7012 + break;
7013 +
7014 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7015 + mov1 == 0x8210000FU &&
7016 + (call & 0xC0000000U) == 0x40000000U &&
7017 + mov2 == 0x9E100001U)
7018 + {
7019 + unsigned long addr;
7020 +
7021 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
7022 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7023 +
7024 + if (test_thread_flag(TIF_32BIT))
7025 + addr &= 0xFFFFFFFFUL;
7026 +
7027 + regs->tpc = addr;
7028 + regs->tnpc = addr+4;
7029 + return 2;
7030 + }
7031 + } while (0);
7032 +
7033 + do { /* PaX: patched PLT emulation #5 */
7034 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
7035 +
7036 + err = get_user(sethi, (unsigned int *)regs->tpc);
7037 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
7038 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
7039 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
7040 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
7041 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
7042 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
7043 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
7044 +
7045 + if (err)
7046 + break;
7047 +
7048 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7049 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
7050 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7051 + (or1 & 0xFFFFE000U) == 0x82106000U &&
7052 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
7053 + sllx == 0x83287020U &&
7054 + jmpl == 0x81C04005U &&
7055 + nop == 0x01000000U)
7056 + {
7057 + unsigned long addr;
7058 +
7059 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7060 + regs->u_regs[UREG_G1] <<= 32;
7061 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7062 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7063 + regs->tpc = addr;
7064 + regs->tnpc = addr+4;
7065 + return 2;
7066 + }
7067 + } while (0);
7068 +
7069 + do { /* PaX: patched PLT emulation #6 */
7070 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
7071 +
7072 + err = get_user(sethi, (unsigned int *)regs->tpc);
7073 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
7074 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
7075 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
7076 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
7077 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
7078 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
7079 +
7080 + if (err)
7081 + break;
7082 +
7083 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7084 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
7085 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7086 + sllx == 0x83287020U &&
7087 + (or & 0xFFFFE000U) == 0x8A116000U &&
7088 + jmpl == 0x81C04005U &&
7089 + nop == 0x01000000U)
7090 + {
7091 + unsigned long addr;
7092 +
7093 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
7094 + regs->u_regs[UREG_G1] <<= 32;
7095 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
7096 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7097 + regs->tpc = addr;
7098 + regs->tnpc = addr+4;
7099 + return 2;
7100 + }
7101 + } while (0);
7102 +
7103 + do { /* PaX: unpatched PLT emulation step 1 */
7104 + unsigned int sethi, ba, nop;
7105 +
7106 + err = get_user(sethi, (unsigned int *)regs->tpc);
7107 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7108 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7109 +
7110 + if (err)
7111 + break;
7112 +
7113 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7114 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
7115 + nop == 0x01000000U)
7116 + {
7117 + unsigned long addr;
7118 + unsigned int save, call;
7119 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
7120 +
7121 + if ((ba & 0xFFC00000U) == 0x30800000U)
7122 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
7123 + else
7124 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7125 +
7126 + if (test_thread_flag(TIF_32BIT))
7127 + addr &= 0xFFFFFFFFUL;
7128 +
7129 + err = get_user(save, (unsigned int *)addr);
7130 + err |= get_user(call, (unsigned int *)(addr+4));
7131 + err |= get_user(nop, (unsigned int *)(addr+8));
7132 + if (err)
7133 + break;
7134 +
7135 +#ifdef CONFIG_PAX_DLRESOLVE
7136 + if (save == 0x9DE3BFA8U &&
7137 + (call & 0xC0000000U) == 0x40000000U &&
7138 + nop == 0x01000000U)
7139 + {
7140 + struct vm_area_struct *vma;
7141 + unsigned long call_dl_resolve;
7142 +
7143 + down_read(&current->mm->mmap_sem);
7144 + call_dl_resolve = current->mm->call_dl_resolve;
7145 + up_read(&current->mm->mmap_sem);
7146 + if (likely(call_dl_resolve))
7147 + goto emulate;
7148 +
7149 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
7150 +
7151 + down_write(&current->mm->mmap_sem);
7152 + if (current->mm->call_dl_resolve) {
7153 + call_dl_resolve = current->mm->call_dl_resolve;
7154 + up_write(&current->mm->mmap_sem);
7155 + if (vma)
7156 + kmem_cache_free(vm_area_cachep, vma);
7157 + goto emulate;
7158 + }
7159 +
7160 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
7161 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
7162 + up_write(&current->mm->mmap_sem);
7163 + if (vma)
7164 + kmem_cache_free(vm_area_cachep, vma);
7165 + return 1;
7166 + }
7167 +
7168 + if (pax_insert_vma(vma, call_dl_resolve)) {
7169 + up_write(&current->mm->mmap_sem);
7170 + kmem_cache_free(vm_area_cachep, vma);
7171 + return 1;
7172 + }
7173 +
7174 + current->mm->call_dl_resolve = call_dl_resolve;
7175 + up_write(&current->mm->mmap_sem);
7176 +
7177 +emulate:
7178 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7179 + regs->tpc = call_dl_resolve;
7180 + regs->tnpc = addr+4;
7181 + return 3;
7182 + }
7183 +#endif
7184 +
7185 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
7186 + if ((save & 0xFFC00000U) == 0x05000000U &&
7187 + (call & 0xFFFFE000U) == 0x85C0A000U &&
7188 + nop == 0x01000000U)
7189 + {
7190 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7191 + regs->u_regs[UREG_G2] = addr + 4;
7192 + addr = (save & 0x003FFFFFU) << 10;
7193 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7194 +
7195 + if (test_thread_flag(TIF_32BIT))
7196 + addr &= 0xFFFFFFFFUL;
7197 +
7198 + regs->tpc = addr;
7199 + regs->tnpc = addr+4;
7200 + return 3;
7201 + }
7202 +
7203 + /* PaX: 64-bit PLT stub */
7204 + err = get_user(sethi1, (unsigned int *)addr);
7205 + err |= get_user(sethi2, (unsigned int *)(addr+4));
7206 + err |= get_user(or1, (unsigned int *)(addr+8));
7207 + err |= get_user(or2, (unsigned int *)(addr+12));
7208 + err |= get_user(sllx, (unsigned int *)(addr+16));
7209 + err |= get_user(add, (unsigned int *)(addr+20));
7210 + err |= get_user(jmpl, (unsigned int *)(addr+24));
7211 + err |= get_user(nop, (unsigned int *)(addr+28));
7212 + if (err)
7213 + break;
7214 +
7215 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
7216 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7217 + (or1 & 0xFFFFE000U) == 0x88112000U &&
7218 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
7219 + sllx == 0x89293020U &&
7220 + add == 0x8A010005U &&
7221 + jmpl == 0x89C14000U &&
7222 + nop == 0x01000000U)
7223 + {
7224 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7225 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7226 + regs->u_regs[UREG_G4] <<= 32;
7227 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7228 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
7229 + regs->u_regs[UREG_G4] = addr + 24;
7230 + addr = regs->u_regs[UREG_G5];
7231 + regs->tpc = addr;
7232 + regs->tnpc = addr+4;
7233 + return 3;
7234 + }
7235 + }
7236 + } while (0);
7237 +
7238 +#ifdef CONFIG_PAX_DLRESOLVE
7239 + do { /* PaX: unpatched PLT emulation step 2 */
7240 + unsigned int save, call, nop;
7241 +
7242 + err = get_user(save, (unsigned int *)(regs->tpc-4));
7243 + err |= get_user(call, (unsigned int *)regs->tpc);
7244 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
7245 + if (err)
7246 + break;
7247 +
7248 + if (save == 0x9DE3BFA8U &&
7249 + (call & 0xC0000000U) == 0x40000000U &&
7250 + nop == 0x01000000U)
7251 + {
7252 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7253 +
7254 + if (test_thread_flag(TIF_32BIT))
7255 + dl_resolve &= 0xFFFFFFFFUL;
7256 +
7257 + regs->u_regs[UREG_RETPC] = regs->tpc;
7258 + regs->tpc = dl_resolve;
7259 + regs->tnpc = dl_resolve+4;
7260 + return 3;
7261 + }
7262 + } while (0);
7263 +#endif
7264 +
7265 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
7266 + unsigned int sethi, ba, nop;
7267 +
7268 + err = get_user(sethi, (unsigned int *)regs->tpc);
7269 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7270 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7271 +
7272 + if (err)
7273 + break;
7274 +
7275 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
7276 + (ba & 0xFFF00000U) == 0x30600000U &&
7277 + nop == 0x01000000U)
7278 + {
7279 + unsigned long addr;
7280 +
7281 + addr = (sethi & 0x003FFFFFU) << 10;
7282 + regs->u_regs[UREG_G1] = addr;
7283 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7284 +
7285 + if (test_thread_flag(TIF_32BIT))
7286 + addr &= 0xFFFFFFFFUL;
7287 +
7288 + regs->tpc = addr;
7289 + regs->tnpc = addr+4;
7290 + return 2;
7291 + }
7292 + } while (0);
7293 +
7294 +#endif
7295 +
7296 + return 1;
7297 +}
7298 +
7299 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7300 +{
7301 + unsigned long i;
7302 +
7303 + printk(KERN_ERR "PAX: bytes at PC: ");
7304 + for (i = 0; i < 8; i++) {
7305 + unsigned int c;
7306 + if (get_user(c, (unsigned int *)pc+i))
7307 + printk(KERN_CONT "???????? ");
7308 + else
7309 + printk(KERN_CONT "%08x ", c);
7310 + }
7311 + printk("\n");
7312 +}
7313 +#endif
7314 +
7315 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
7316 {
7317 struct mm_struct *mm = current->mm;
7318 @@ -343,6 +797,29 @@ retry:
7319 if (!vma)
7320 goto bad_area;
7321
7322 +#ifdef CONFIG_PAX_PAGEEXEC
7323 + /* PaX: detect ITLB misses on non-exec pages */
7324 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
7325 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
7326 + {
7327 + if (address != regs->tpc)
7328 + goto good_area;
7329 +
7330 + up_read(&mm->mmap_sem);
7331 + switch (pax_handle_fetch_fault(regs)) {
7332 +
7333 +#ifdef CONFIG_PAX_EMUPLT
7334 + case 2:
7335 + case 3:
7336 + return;
7337 +#endif
7338 +
7339 + }
7340 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
7341 + do_group_exit(SIGKILL);
7342 + }
7343 +#endif
7344 +
7345 /* Pure DTLB misses do not tell us whether the fault causing
7346 * load/store/atomic was a write or not, it only says that there
7347 * was no match. So in such a case we (carefully) read the
7348 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
7349 index 07e1453..0a7d9e9 100644
7350 --- a/arch/sparc/mm/hugetlbpage.c
7351 +++ b/arch/sparc/mm/hugetlbpage.c
7352 @@ -67,7 +67,7 @@ full_search:
7353 }
7354 return -ENOMEM;
7355 }
7356 - if (likely(!vma || addr + len <= vma->vm_start)) {
7357 + if (likely(check_heap_stack_gap(vma, addr, len))) {
7358 /*
7359 * Remember the place where we stopped the search:
7360 */
7361 @@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7362 /* make sure it can fit in the remaining address space */
7363 if (likely(addr > len)) {
7364 vma = find_vma(mm, addr-len);
7365 - if (!vma || addr <= vma->vm_start) {
7366 + if (check_heap_stack_gap(vma, addr - len, len)) {
7367 /* remember the address as a hint for next time */
7368 return (mm->free_area_cache = addr-len);
7369 }
7370 @@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7371 if (unlikely(mm->mmap_base < len))
7372 goto bottomup;
7373
7374 - addr = (mm->mmap_base-len) & HPAGE_MASK;
7375 + addr = mm->mmap_base - len;
7376
7377 do {
7378 + addr &= HPAGE_MASK;
7379 /*
7380 * Lookup failure means no vma is above this address,
7381 * else if new region fits below vma->vm_start,
7382 * return with success:
7383 */
7384 vma = find_vma(mm, addr);
7385 - if (likely(!vma || addr+len <= vma->vm_start)) {
7386 + if (likely(check_heap_stack_gap(vma, addr, len))) {
7387 /* remember the address as a hint for next time */
7388 return (mm->free_area_cache = addr);
7389 }
7390 @@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7391 mm->cached_hole_size = vma->vm_start - addr;
7392
7393 /* try just below the current vma->vm_start */
7394 - addr = (vma->vm_start-len) & HPAGE_MASK;
7395 - } while (likely(len < vma->vm_start));
7396 + addr = skip_heap_stack_gap(vma, len);
7397 + } while (!IS_ERR_VALUE(addr));
7398
7399 bottomup:
7400 /*
7401 @@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
7402 if (addr) {
7403 addr = ALIGN(addr, HPAGE_SIZE);
7404 vma = find_vma(mm, addr);
7405 - if (task_size - len >= addr &&
7406 - (!vma || addr + len <= vma->vm_start))
7407 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
7408 return addr;
7409 }
7410 if (mm->get_unmapped_area == arch_get_unmapped_area)
7411 diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
7412 index c5f9021..7591bae 100644
7413 --- a/arch/sparc/mm/init_32.c
7414 +++ b/arch/sparc/mm/init_32.c
7415 @@ -315,6 +315,9 @@ extern void device_scan(void);
7416 pgprot_t PAGE_SHARED __read_mostly;
7417 EXPORT_SYMBOL(PAGE_SHARED);
7418
7419 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
7420 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
7421 +
7422 void __init paging_init(void)
7423 {
7424 switch(sparc_cpu_model) {
7425 @@ -343,17 +346,17 @@ void __init paging_init(void)
7426
7427 /* Initialize the protection map with non-constant, MMU dependent values. */
7428 protection_map[0] = PAGE_NONE;
7429 - protection_map[1] = PAGE_READONLY;
7430 - protection_map[2] = PAGE_COPY;
7431 - protection_map[3] = PAGE_COPY;
7432 + protection_map[1] = PAGE_READONLY_NOEXEC;
7433 + protection_map[2] = PAGE_COPY_NOEXEC;
7434 + protection_map[3] = PAGE_COPY_NOEXEC;
7435 protection_map[4] = PAGE_READONLY;
7436 protection_map[5] = PAGE_READONLY;
7437 protection_map[6] = PAGE_COPY;
7438 protection_map[7] = PAGE_COPY;
7439 protection_map[8] = PAGE_NONE;
7440 - protection_map[9] = PAGE_READONLY;
7441 - protection_map[10] = PAGE_SHARED;
7442 - protection_map[11] = PAGE_SHARED;
7443 + protection_map[9] = PAGE_READONLY_NOEXEC;
7444 + protection_map[10] = PAGE_SHARED_NOEXEC;
7445 + protection_map[11] = PAGE_SHARED_NOEXEC;
7446 protection_map[12] = PAGE_READONLY;
7447 protection_map[13] = PAGE_READONLY;
7448 protection_map[14] = PAGE_SHARED;
7449 diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
7450 index cbef74e..c38fead 100644
7451 --- a/arch/sparc/mm/srmmu.c
7452 +++ b/arch/sparc/mm/srmmu.c
7453 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
7454 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
7455 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
7456 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
7457 +
7458 +#ifdef CONFIG_PAX_PAGEEXEC
7459 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
7460 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
7461 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
7462 +#endif
7463 +
7464 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
7465 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
7466
7467 diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
7468 index f4500c6..889656c 100644
7469 --- a/arch/tile/include/asm/atomic_64.h
7470 +++ b/arch/tile/include/asm/atomic_64.h
7471 @@ -143,6 +143,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
7472
7473 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7474
7475 +#define atomic64_read_unchecked(v) atomic64_read(v)
7476 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7477 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7478 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7479 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7480 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
7481 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7482 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
7483 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7484 +
7485 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
7486 #define smp_mb__before_atomic_dec() smp_mb()
7487 #define smp_mb__after_atomic_dec() smp_mb()
7488 diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
7489 index 392e533..536b092 100644
7490 --- a/arch/tile/include/asm/cache.h
7491 +++ b/arch/tile/include/asm/cache.h
7492 @@ -15,11 +15,12 @@
7493 #ifndef _ASM_TILE_CACHE_H
7494 #define _ASM_TILE_CACHE_H
7495
7496 +#include <linux/const.h>
7497 #include <arch/chip.h>
7498
7499 /* bytes per L1 data cache line */
7500 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
7501 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7502 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7503
7504 /* bytes per L2 cache line */
7505 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
7506 diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
7507 index ef34d2c..d6ce60c 100644
7508 --- a/arch/tile/include/asm/uaccess.h
7509 +++ b/arch/tile/include/asm/uaccess.h
7510 @@ -361,9 +361,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
7511 const void __user *from,
7512 unsigned long n)
7513 {
7514 - int sz = __compiletime_object_size(to);
7515 + size_t sz = __compiletime_object_size(to);
7516
7517 - if (likely(sz == -1 || sz >= n))
7518 + if (likely(sz == (size_t)-1 || sz >= n))
7519 n = _copy_from_user(to, from, n);
7520 else
7521 copy_from_user_overflow();
7522 diff --git a/arch/um/Makefile b/arch/um/Makefile
7523 index 55c0661..86ad413 100644
7524 --- a/arch/um/Makefile
7525 +++ b/arch/um/Makefile
7526 @@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
7527 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
7528 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
7529
7530 +ifdef CONSTIFY_PLUGIN
7531 +USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7532 +endif
7533 +
7534 #This will adjust *FLAGS accordingly to the platform.
7535 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
7536
7537 diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
7538 index 19e1bdd..3665b77 100644
7539 --- a/arch/um/include/asm/cache.h
7540 +++ b/arch/um/include/asm/cache.h
7541 @@ -1,6 +1,7 @@
7542 #ifndef __UM_CACHE_H
7543 #define __UM_CACHE_H
7544
7545 +#include <linux/const.h>
7546
7547 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
7548 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7549 @@ -12,6 +13,6 @@
7550 # define L1_CACHE_SHIFT 5
7551 #endif
7552
7553 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7554 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7555
7556 #endif
7557 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
7558 index 6c03acd..a5e0215 100644
7559 --- a/arch/um/include/asm/kmap_types.h
7560 +++ b/arch/um/include/asm/kmap_types.h
7561 @@ -23,6 +23,7 @@ enum km_type {
7562 KM_IRQ1,
7563 KM_SOFTIRQ0,
7564 KM_SOFTIRQ1,
7565 + KM_CLEARPAGE,
7566 KM_TYPE_NR
7567 };
7568
7569 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
7570 index 7cfc3ce..cbd1a58 100644
7571 --- a/arch/um/include/asm/page.h
7572 +++ b/arch/um/include/asm/page.h
7573 @@ -14,6 +14,9 @@
7574 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
7575 #define PAGE_MASK (~(PAGE_SIZE-1))
7576
7577 +#define ktla_ktva(addr) (addr)
7578 +#define ktva_ktla(addr) (addr)
7579 +
7580 #ifndef __ASSEMBLY__
7581
7582 struct page;
7583 diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
7584 index 0032f92..cd151e0 100644
7585 --- a/arch/um/include/asm/pgtable-3level.h
7586 +++ b/arch/um/include/asm/pgtable-3level.h
7587 @@ -58,6 +58,7 @@
7588 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
7589 #define pud_populate(mm, pud, pmd) \
7590 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
7591 +#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
7592
7593 #ifdef CONFIG_64BIT
7594 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
7595 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
7596 index 2b73ded..804f540 100644
7597 --- a/arch/um/kernel/process.c
7598 +++ b/arch/um/kernel/process.c
7599 @@ -404,22 +404,6 @@ int singlestepping(void * t)
7600 return 2;
7601 }
7602
7603 -/*
7604 - * Only x86 and x86_64 have an arch_align_stack().
7605 - * All other arches have "#define arch_align_stack(x) (x)"
7606 - * in their asm/system.h
7607 - * As this is included in UML from asm-um/system-generic.h,
7608 - * we can use it to behave as the subarch does.
7609 - */
7610 -#ifndef arch_align_stack
7611 -unsigned long arch_align_stack(unsigned long sp)
7612 -{
7613 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7614 - sp -= get_random_int() % 8192;
7615 - return sp & ~0xf;
7616 -}
7617 -#endif
7618 -
7619 unsigned long get_wchan(struct task_struct *p)
7620 {
7621 unsigned long stack_page, sp, ip;
7622 diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
7623 index ad8f795..2c7eec6 100644
7624 --- a/arch/unicore32/include/asm/cache.h
7625 +++ b/arch/unicore32/include/asm/cache.h
7626 @@ -12,8 +12,10 @@
7627 #ifndef __UNICORE_CACHE_H__
7628 #define __UNICORE_CACHE_H__
7629
7630 -#define L1_CACHE_SHIFT (5)
7631 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7632 +#include <linux/const.h>
7633 +
7634 +#define L1_CACHE_SHIFT 5
7635 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7636
7637 /*
7638 * Memory returned by kmalloc() may be used for DMA, so we must make
7639 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
7640 index c9866b0..fe53aef 100644
7641 --- a/arch/x86/Kconfig
7642 +++ b/arch/x86/Kconfig
7643 @@ -229,7 +229,7 @@ config X86_HT
7644
7645 config X86_32_LAZY_GS
7646 def_bool y
7647 - depends on X86_32 && !CC_STACKPROTECTOR
7648 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
7649
7650 config ARCH_HWEIGHT_CFLAGS
7651 string
7652 @@ -1042,7 +1042,7 @@ choice
7653
7654 config NOHIGHMEM
7655 bool "off"
7656 - depends on !X86_NUMAQ
7657 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7658 ---help---
7659 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
7660 However, the address space of 32-bit x86 processors is only 4
7661 @@ -1079,7 +1079,7 @@ config NOHIGHMEM
7662
7663 config HIGHMEM4G
7664 bool "4GB"
7665 - depends on !X86_NUMAQ
7666 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7667 ---help---
7668 Select this if you have a 32-bit processor and between 1 and 4
7669 gigabytes of physical RAM.
7670 @@ -1133,7 +1133,7 @@ config PAGE_OFFSET
7671 hex
7672 default 0xB0000000 if VMSPLIT_3G_OPT
7673 default 0x80000000 if VMSPLIT_2G
7674 - default 0x78000000 if VMSPLIT_2G_OPT
7675 + default 0x70000000 if VMSPLIT_2G_OPT
7676 default 0x40000000 if VMSPLIT_1G
7677 default 0xC0000000
7678 depends on X86_32
7679 @@ -1523,6 +1523,7 @@ config SECCOMP
7680
7681 config CC_STACKPROTECTOR
7682 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
7683 + depends on X86_64 || !PAX_MEMORY_UDEREF
7684 ---help---
7685 This option turns on the -fstack-protector GCC feature. This
7686 feature puts, at the beginning of functions, a canary value on
7687 @@ -1580,6 +1581,7 @@ config KEXEC_JUMP
7688 config PHYSICAL_START
7689 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
7690 default "0x1000000"
7691 + range 0x400000 0x40000000
7692 ---help---
7693 This gives the physical address where the kernel is loaded.
7694
7695 @@ -1643,6 +1645,7 @@ config X86_NEED_RELOCS
7696 config PHYSICAL_ALIGN
7697 hex "Alignment value to which kernel should be aligned" if X86_32
7698 default "0x1000000"
7699 + range 0x400000 0x1000000 if PAX_KERNEXEC
7700 range 0x2000 0x1000000
7701 ---help---
7702 This value puts the alignment restrictions on physical address
7703 @@ -1674,9 +1677,10 @@ config HOTPLUG_CPU
7704 Say N if you want to disable CPU hotplug.
7705
7706 config COMPAT_VDSO
7707 - def_bool y
7708 + def_bool n
7709 prompt "Compat VDSO support"
7710 depends on X86_32 || IA32_EMULATION
7711 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
7712 ---help---
7713 Map the 32-bit VDSO to the predictable old-style address too.
7714
7715 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
7716 index 706e12e..62e4feb 100644
7717 --- a/arch/x86/Kconfig.cpu
7718 +++ b/arch/x86/Kconfig.cpu
7719 @@ -334,7 +334,7 @@ config X86_PPRO_FENCE
7720
7721 config X86_F00F_BUG
7722 def_bool y
7723 - depends on M586MMX || M586TSC || M586 || M486 || M386
7724 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
7725
7726 config X86_INVD_BUG
7727 def_bool y
7728 @@ -358,7 +358,7 @@ config X86_POPAD_OK
7729
7730 config X86_ALIGNMENT_16
7731 def_bool y
7732 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7733 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7734
7735 config X86_INTEL_USERCOPY
7736 def_bool y
7737 @@ -404,7 +404,7 @@ config X86_CMPXCHG64
7738 # generates cmov.
7739 config X86_CMOV
7740 def_bool y
7741 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7742 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7743
7744 config X86_MINIMUM_CPU_FAMILY
7745 int
7746 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
7747 index e46c214..ab62fd1 100644
7748 --- a/arch/x86/Kconfig.debug
7749 +++ b/arch/x86/Kconfig.debug
7750 @@ -84,7 +84,7 @@ config X86_PTDUMP
7751 config DEBUG_RODATA
7752 bool "Write protect kernel read-only data structures"
7753 default y
7754 - depends on DEBUG_KERNEL
7755 + depends on DEBUG_KERNEL && BROKEN
7756 ---help---
7757 Mark the kernel read-only data as write-protected in the pagetables,
7758 in order to catch accidental (and incorrect) writes to such const
7759 @@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
7760
7761 config DEBUG_SET_MODULE_RONX
7762 bool "Set loadable kernel module data as NX and text as RO"
7763 - depends on MODULES
7764 + depends on MODULES && BROKEN
7765 ---help---
7766 This option helps catch unintended modifications to loadable
7767 kernel module's text and read-only data. It also prevents execution
7768 @@ -275,7 +275,7 @@ config OPTIMIZE_INLINING
7769
7770 config DEBUG_STRICT_USER_COPY_CHECKS
7771 bool "Strict copy size checks"
7772 - depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
7773 + depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
7774 ---help---
7775 Enabling this option turns a certain set of sanity checks for user
7776 copy operations into compile time failures.
7777 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
7778 index b1c611e..2c1a823 100644
7779 --- a/arch/x86/Makefile
7780 +++ b/arch/x86/Makefile
7781 @@ -46,6 +46,7 @@ else
7782 UTS_MACHINE := x86_64
7783 CHECKFLAGS += -D__x86_64__ -m64
7784
7785 + biarch := $(call cc-option,-m64)
7786 KBUILD_AFLAGS += -m64
7787 KBUILD_CFLAGS += -m64
7788
7789 @@ -222,3 +223,12 @@ define archhelp
7790 echo ' FDARGS="..." arguments for the booted kernel'
7791 echo ' FDINITRD=file initrd for the booted kernel'
7792 endef
7793 +
7794 +define OLD_LD
7795 +
7796 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
7797 +*** Please upgrade your binutils to 2.18 or newer
7798 +endef
7799 +
7800 +archprepare:
7801 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
7802 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
7803 index 5a747dd..ff7b12c 100644
7804 --- a/arch/x86/boot/Makefile
7805 +++ b/arch/x86/boot/Makefile
7806 @@ -64,6 +64,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
7807 $(call cc-option, -fno-stack-protector) \
7808 $(call cc-option, -mpreferred-stack-boundary=2)
7809 KBUILD_CFLAGS += $(call cc-option, -m32)
7810 +ifdef CONSTIFY_PLUGIN
7811 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7812 +endif
7813 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7814 GCOV_PROFILE := n
7815
7816 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7817 index 878e4b9..20537ab 100644
7818 --- a/arch/x86/boot/bitops.h
7819 +++ b/arch/x86/boot/bitops.h
7820 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7821 u8 v;
7822 const u32 *p = (const u32 *)addr;
7823
7824 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7825 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7826 return v;
7827 }
7828
7829 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7830
7831 static inline void set_bit(int nr, void *addr)
7832 {
7833 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7834 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7835 }
7836
7837 #endif /* BOOT_BITOPS_H */
7838 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7839 index 18997e5..83d9c67 100644
7840 --- a/arch/x86/boot/boot.h
7841 +++ b/arch/x86/boot/boot.h
7842 @@ -85,7 +85,7 @@ static inline void io_delay(void)
7843 static inline u16 ds(void)
7844 {
7845 u16 seg;
7846 - asm("movw %%ds,%0" : "=rm" (seg));
7847 + asm volatile("movw %%ds,%0" : "=rm" (seg));
7848 return seg;
7849 }
7850
7851 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
7852 static inline int memcmp(const void *s1, const void *s2, size_t len)
7853 {
7854 u8 diff;
7855 - asm("repe; cmpsb; setnz %0"
7856 + asm volatile("repe; cmpsb; setnz %0"
7857 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7858 return diff;
7859 }
7860 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
7861 index e398bb5..3a382ca 100644
7862 --- a/arch/x86/boot/compressed/Makefile
7863 +++ b/arch/x86/boot/compressed/Makefile
7864 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7865 KBUILD_CFLAGS += $(cflags-y)
7866 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7867 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7868 +ifdef CONSTIFY_PLUGIN
7869 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7870 +endif
7871
7872 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7873 GCOV_PROFILE := n
7874 diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
7875 index 0cdfc0d..6e79437 100644
7876 --- a/arch/x86/boot/compressed/eboot.c
7877 +++ b/arch/x86/boot/compressed/eboot.c
7878 @@ -122,7 +122,6 @@ again:
7879 *addr = max_addr;
7880 }
7881
7882 -free_pool:
7883 efi_call_phys1(sys_table->boottime->free_pool, map);
7884
7885 fail:
7886 @@ -186,7 +185,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
7887 if (i == map_size / desc_size)
7888 status = EFI_NOT_FOUND;
7889
7890 -free_pool:
7891 efi_call_phys1(sys_table->boottime->free_pool, map);
7892 fail:
7893 return status;
7894 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
7895 index c85e3ac..6f5aa80 100644
7896 --- a/arch/x86/boot/compressed/head_32.S
7897 +++ b/arch/x86/boot/compressed/head_32.S
7898 @@ -106,7 +106,7 @@ preferred_addr:
7899 notl %eax
7900 andl %eax, %ebx
7901 #else
7902 - movl $LOAD_PHYSICAL_ADDR, %ebx
7903 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7904 #endif
7905
7906 /* Target address to relocate to for decompression */
7907 @@ -192,7 +192,7 @@ relocated:
7908 * and where it was actually loaded.
7909 */
7910 movl %ebp, %ebx
7911 - subl $LOAD_PHYSICAL_ADDR, %ebx
7912 + subl $____LOAD_PHYSICAL_ADDR, %ebx
7913 jz 2f /* Nothing to be done if loaded at compiled addr. */
7914 /*
7915 * Process relocations.
7916 @@ -200,8 +200,7 @@ relocated:
7917
7918 1: subl $4, %edi
7919 movl (%edi), %ecx
7920 - testl %ecx, %ecx
7921 - jz 2f
7922 + jecxz 2f
7923 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7924 jmp 1b
7925 2:
7926 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
7927 index 87e03a1..0d94c76 100644
7928 --- a/arch/x86/boot/compressed/head_64.S
7929 +++ b/arch/x86/boot/compressed/head_64.S
7930 @@ -91,7 +91,7 @@ ENTRY(startup_32)
7931 notl %eax
7932 andl %eax, %ebx
7933 #else
7934 - movl $LOAD_PHYSICAL_ADDR, %ebx
7935 + movl $____LOAD_PHYSICAL_ADDR, %ebx
7936 #endif
7937
7938 /* Target address to relocate to for decompression */
7939 @@ -263,7 +263,7 @@ preferred_addr:
7940 notq %rax
7941 andq %rax, %rbp
7942 #else
7943 - movq $LOAD_PHYSICAL_ADDR, %rbp
7944 + movq $____LOAD_PHYSICAL_ADDR, %rbp
7945 #endif
7946
7947 /* Target address to relocate to for decompression */
7948 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
7949 index 7116dcb..d9ae1d7 100644
7950 --- a/arch/x86/boot/compressed/misc.c
7951 +++ b/arch/x86/boot/compressed/misc.c
7952 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
7953 case PT_LOAD:
7954 #ifdef CONFIG_RELOCATABLE
7955 dest = output;
7956 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7957 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7958 #else
7959 dest = (void *)(phdr->p_paddr);
7960 #endif
7961 @@ -365,7 +365,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
7962 error("Destination address too large");
7963 #endif
7964 #ifndef CONFIG_RELOCATABLE
7965 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7966 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7967 error("Wrong destination address");
7968 #endif
7969
7970 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
7971 index 4d3ff03..e4972ff 100644
7972 --- a/arch/x86/boot/cpucheck.c
7973 +++ b/arch/x86/boot/cpucheck.c
7974 @@ -74,7 +74,7 @@ static int has_fpu(void)
7975 u16 fcw = -1, fsw = -1;
7976 u32 cr0;
7977
7978 - asm("movl %%cr0,%0" : "=r" (cr0));
7979 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
7980 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
7981 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
7982 asm volatile("movl %0,%%cr0" : : "r" (cr0));
7983 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
7984 {
7985 u32 f0, f1;
7986
7987 - asm("pushfl ; "
7988 + asm volatile("pushfl ; "
7989 "pushfl ; "
7990 "popl %0 ; "
7991 "movl %0,%1 ; "
7992 @@ -115,7 +115,7 @@ static void get_flags(void)
7993 set_bit(X86_FEATURE_FPU, cpu.flags);
7994
7995 if (has_eflag(X86_EFLAGS_ID)) {
7996 - asm("cpuid"
7997 + asm volatile("cpuid"
7998 : "=a" (max_intel_level),
7999 "=b" (cpu_vendor[0]),
8000 "=d" (cpu_vendor[1]),
8001 @@ -124,7 +124,7 @@ static void get_flags(void)
8002
8003 if (max_intel_level >= 0x00000001 &&
8004 max_intel_level <= 0x0000ffff) {
8005 - asm("cpuid"
8006 + asm volatile("cpuid"
8007 : "=a" (tfms),
8008 "=c" (cpu.flags[4]),
8009 "=d" (cpu.flags[0])
8010 @@ -136,7 +136,7 @@ static void get_flags(void)
8011 cpu.model += ((tfms >> 16) & 0xf) << 4;
8012 }
8013
8014 - asm("cpuid"
8015 + asm volatile("cpuid"
8016 : "=a" (max_amd_level)
8017 : "a" (0x80000000)
8018 : "ebx", "ecx", "edx");
8019 @@ -144,7 +144,7 @@ static void get_flags(void)
8020 if (max_amd_level >= 0x80000001 &&
8021 max_amd_level <= 0x8000ffff) {
8022 u32 eax = 0x80000001;
8023 - asm("cpuid"
8024 + asm volatile("cpuid"
8025 : "+a" (eax),
8026 "=c" (cpu.flags[6]),
8027 "=d" (cpu.flags[1])
8028 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8029 u32 ecx = MSR_K7_HWCR;
8030 u32 eax, edx;
8031
8032 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8033 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8034 eax &= ~(1 << 15);
8035 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8036 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8037
8038 get_flags(); /* Make sure it really did something */
8039 err = check_flags();
8040 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8041 u32 ecx = MSR_VIA_FCR;
8042 u32 eax, edx;
8043
8044 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8045 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8046 eax |= (1<<1)|(1<<7);
8047 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8048 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8049
8050 set_bit(X86_FEATURE_CX8, cpu.flags);
8051 err = check_flags();
8052 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
8053 u32 eax, edx;
8054 u32 level = 1;
8055
8056 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8057 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8058 - asm("cpuid"
8059 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8060 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8061 + asm volatile("cpuid"
8062 : "+a" (level), "=d" (cpu.flags[0])
8063 : : "ecx", "ebx");
8064 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8065 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8066
8067 err = check_flags();
8068 }
8069 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
8070 index f1bbeeb..aff09cb 100644
8071 --- a/arch/x86/boot/header.S
8072 +++ b/arch/x86/boot/header.S
8073 @@ -372,7 +372,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
8074 # single linked list of
8075 # struct setup_data
8076
8077 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
8078 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
8079
8080 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
8081 #define VO_INIT_SIZE (VO__end - VO__text)
8082 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
8083 index db75d07..8e6d0af 100644
8084 --- a/arch/x86/boot/memory.c
8085 +++ b/arch/x86/boot/memory.c
8086 @@ -19,7 +19,7 @@
8087
8088 static int detect_memory_e820(void)
8089 {
8090 - int count = 0;
8091 + unsigned int count = 0;
8092 struct biosregs ireg, oreg;
8093 struct e820entry *desc = boot_params.e820_map;
8094 static struct e820entry buf; /* static so it is zeroed */
8095 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
8096 index 11e8c6e..fdbb1ed 100644
8097 --- a/arch/x86/boot/video-vesa.c
8098 +++ b/arch/x86/boot/video-vesa.c
8099 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
8100
8101 boot_params.screen_info.vesapm_seg = oreg.es;
8102 boot_params.screen_info.vesapm_off = oreg.di;
8103 + boot_params.screen_info.vesapm_size = oreg.cx;
8104 }
8105
8106 /*
8107 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
8108 index 43eda28..5ab5fdb 100644
8109 --- a/arch/x86/boot/video.c
8110 +++ b/arch/x86/boot/video.c
8111 @@ -96,7 +96,7 @@ static void store_mode_params(void)
8112 static unsigned int get_entry(void)
8113 {
8114 char entry_buf[4];
8115 - int i, len = 0;
8116 + unsigned int i, len = 0;
8117 int key;
8118 unsigned int v;
8119
8120 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
8121 index 5b577d5..3c1fed4 100644
8122 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
8123 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
8124 @@ -8,6 +8,8 @@
8125 * including this sentence is retained in full.
8126 */
8127
8128 +#include <asm/alternative-asm.h>
8129 +
8130 .extern crypto_ft_tab
8131 .extern crypto_it_tab
8132 .extern crypto_fl_tab
8133 @@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
8134 je B192; \
8135 leaq 32(r9),r9;
8136
8137 +#define ret pax_force_retaddr 0, 1; ret
8138 +
8139 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
8140 movq r1,r2; \
8141 movq r3,r4; \
8142 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
8143 index 3470624..201259d 100644
8144 --- a/arch/x86/crypto/aesni-intel_asm.S
8145 +++ b/arch/x86/crypto/aesni-intel_asm.S
8146 @@ -31,6 +31,7 @@
8147
8148 #include <linux/linkage.h>
8149 #include <asm/inst.h>
8150 +#include <asm/alternative-asm.h>
8151
8152 #ifdef __x86_64__
8153 .data
8154 @@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
8155 pop %r14
8156 pop %r13
8157 pop %r12
8158 + pax_force_retaddr 0, 1
8159 ret
8160 +ENDPROC(aesni_gcm_dec)
8161
8162
8163 /*****************************************************************************
8164 @@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
8165 pop %r14
8166 pop %r13
8167 pop %r12
8168 + pax_force_retaddr 0, 1
8169 ret
8170 +ENDPROC(aesni_gcm_enc)
8171
8172 #endif
8173
8174 @@ -1714,6 +1719,7 @@ _key_expansion_256a:
8175 pxor %xmm1, %xmm0
8176 movaps %xmm0, (TKEYP)
8177 add $0x10, TKEYP
8178 + pax_force_retaddr_bts
8179 ret
8180
8181 .align 4
8182 @@ -1738,6 +1744,7 @@ _key_expansion_192a:
8183 shufps $0b01001110, %xmm2, %xmm1
8184 movaps %xmm1, 0x10(TKEYP)
8185 add $0x20, TKEYP
8186 + pax_force_retaddr_bts
8187 ret
8188
8189 .align 4
8190 @@ -1757,6 +1764,7 @@ _key_expansion_192b:
8191
8192 movaps %xmm0, (TKEYP)
8193 add $0x10, TKEYP
8194 + pax_force_retaddr_bts
8195 ret
8196
8197 .align 4
8198 @@ -1769,6 +1777,7 @@ _key_expansion_256b:
8199 pxor %xmm1, %xmm2
8200 movaps %xmm2, (TKEYP)
8201 add $0x10, TKEYP
8202 + pax_force_retaddr_bts
8203 ret
8204
8205 /*
8206 @@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
8207 #ifndef __x86_64__
8208 popl KEYP
8209 #endif
8210 + pax_force_retaddr 0, 1
8211 ret
8212 +ENDPROC(aesni_set_key)
8213
8214 /*
8215 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
8216 @@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
8217 popl KLEN
8218 popl KEYP
8219 #endif
8220 + pax_force_retaddr 0, 1
8221 ret
8222 +ENDPROC(aesni_enc)
8223
8224 /*
8225 * _aesni_enc1: internal ABI
8226 @@ -1959,6 +1972,7 @@ _aesni_enc1:
8227 AESENC KEY STATE
8228 movaps 0x70(TKEYP), KEY
8229 AESENCLAST KEY STATE
8230 + pax_force_retaddr_bts
8231 ret
8232
8233 /*
8234 @@ -2067,6 +2081,7 @@ _aesni_enc4:
8235 AESENCLAST KEY STATE2
8236 AESENCLAST KEY STATE3
8237 AESENCLAST KEY STATE4
8238 + pax_force_retaddr_bts
8239 ret
8240
8241 /*
8242 @@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
8243 popl KLEN
8244 popl KEYP
8245 #endif
8246 + pax_force_retaddr 0, 1
8247 ret
8248 +ENDPROC(aesni_dec)
8249
8250 /*
8251 * _aesni_dec1: internal ABI
8252 @@ -2146,6 +2163,7 @@ _aesni_dec1:
8253 AESDEC KEY STATE
8254 movaps 0x70(TKEYP), KEY
8255 AESDECLAST KEY STATE
8256 + pax_force_retaddr_bts
8257 ret
8258
8259 /*
8260 @@ -2254,6 +2272,7 @@ _aesni_dec4:
8261 AESDECLAST KEY STATE2
8262 AESDECLAST KEY STATE3
8263 AESDECLAST KEY STATE4
8264 + pax_force_retaddr_bts
8265 ret
8266
8267 /*
8268 @@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
8269 popl KEYP
8270 popl LEN
8271 #endif
8272 + pax_force_retaddr 0, 1
8273 ret
8274 +ENDPROC(aesni_ecb_enc)
8275
8276 /*
8277 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8278 @@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
8279 popl KEYP
8280 popl LEN
8281 #endif
8282 + pax_force_retaddr 0, 1
8283 ret
8284 +ENDPROC(aesni_ecb_dec)
8285
8286 /*
8287 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8288 @@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
8289 popl LEN
8290 popl IVP
8291 #endif
8292 + pax_force_retaddr 0, 1
8293 ret
8294 +ENDPROC(aesni_cbc_enc)
8295
8296 /*
8297 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8298 @@ -2500,7 +2525,9 @@ ENTRY(aesni_cbc_dec)
8299 popl LEN
8300 popl IVP
8301 #endif
8302 + pax_force_retaddr 0, 1
8303 ret
8304 +ENDPROC(aesni_cbc_dec)
8305
8306 #ifdef __x86_64__
8307 .align 16
8308 @@ -2526,6 +2553,7 @@ _aesni_inc_init:
8309 mov $1, TCTR_LOW
8310 MOVQ_R64_XMM TCTR_LOW INC
8311 MOVQ_R64_XMM CTR TCTR_LOW
8312 + pax_force_retaddr_bts
8313 ret
8314
8315 /*
8316 @@ -2554,6 +2582,7 @@ _aesni_inc:
8317 .Linc_low:
8318 movaps CTR, IV
8319 PSHUFB_XMM BSWAP_MASK IV
8320 + pax_force_retaddr_bts
8321 ret
8322
8323 /*
8324 @@ -2614,5 +2643,7 @@ ENTRY(aesni_ctr_enc)
8325 .Lctr_enc_ret:
8326 movups IV, (IVP)
8327 .Lctr_enc_just_ret:
8328 + pax_force_retaddr 0, 1
8329 ret
8330 +ENDPROC(aesni_ctr_enc)
8331 #endif
8332 diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
8333 index 391d245..67f35c2 100644
8334 --- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
8335 +++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
8336 @@ -20,6 +20,8 @@
8337 *
8338 */
8339
8340 +#include <asm/alternative-asm.h>
8341 +
8342 .file "blowfish-x86_64-asm.S"
8343 .text
8344
8345 @@ -151,9 +153,11 @@ __blowfish_enc_blk:
8346 jnz __enc_xor;
8347
8348 write_block();
8349 + pax_force_retaddr 0, 1
8350 ret;
8351 __enc_xor:
8352 xor_block();
8353 + pax_force_retaddr 0, 1
8354 ret;
8355
8356 .align 8
8357 @@ -188,6 +192,7 @@ blowfish_dec_blk:
8358
8359 movq %r11, %rbp;
8360
8361 + pax_force_retaddr 0, 1
8362 ret;
8363
8364 /**********************************************************************
8365 @@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
8366
8367 popq %rbx;
8368 popq %rbp;
8369 + pax_force_retaddr 0, 1
8370 ret;
8371
8372 __enc_xor4:
8373 @@ -349,6 +355,7 @@ __enc_xor4:
8374
8375 popq %rbx;
8376 popq %rbp;
8377 + pax_force_retaddr 0, 1
8378 ret;
8379
8380 .align 8
8381 @@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
8382 popq %rbx;
8383 popq %rbp;
8384
8385 + pax_force_retaddr 0, 1
8386 ret;
8387
8388 diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
8389 index 0b33743..7a56206 100644
8390 --- a/arch/x86/crypto/camellia-x86_64-asm_64.S
8391 +++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
8392 @@ -20,6 +20,8 @@
8393 *
8394 */
8395
8396 +#include <asm/alternative-asm.h>
8397 +
8398 .file "camellia-x86_64-asm_64.S"
8399 .text
8400
8401 @@ -229,12 +231,14 @@ __enc_done:
8402 enc_outunpack(mov, RT1);
8403
8404 movq RRBP, %rbp;
8405 + pax_force_retaddr 0, 1
8406 ret;
8407
8408 __enc_xor:
8409 enc_outunpack(xor, RT1);
8410
8411 movq RRBP, %rbp;
8412 + pax_force_retaddr 0, 1
8413 ret;
8414
8415 .global camellia_dec_blk;
8416 @@ -275,6 +279,7 @@ __dec_rounds16:
8417 dec_outunpack();
8418
8419 movq RRBP, %rbp;
8420 + pax_force_retaddr 0, 1
8421 ret;
8422
8423 /**********************************************************************
8424 @@ -468,6 +473,7 @@ __enc2_done:
8425
8426 movq RRBP, %rbp;
8427 popq %rbx;
8428 + pax_force_retaddr 0, 1
8429 ret;
8430
8431 __enc2_xor:
8432 @@ -475,6 +481,7 @@ __enc2_xor:
8433
8434 movq RRBP, %rbp;
8435 popq %rbx;
8436 + pax_force_retaddr 0, 1
8437 ret;
8438
8439 .global camellia_dec_blk_2way;
8440 @@ -517,4 +524,5 @@ __dec2_rounds16:
8441
8442 movq RRBP, %rbp;
8443 movq RXOR, %rbx;
8444 + pax_force_retaddr 0, 1
8445 ret;
8446 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8447 index 6214a9b..1f4fc9a 100644
8448 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
8449 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8450 @@ -1,3 +1,5 @@
8451 +#include <asm/alternative-asm.h>
8452 +
8453 # enter ECRYPT_encrypt_bytes
8454 .text
8455 .p2align 5
8456 @@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
8457 add %r11,%rsp
8458 mov %rdi,%rax
8459 mov %rsi,%rdx
8460 + pax_force_retaddr 0, 1
8461 ret
8462 # bytesatleast65:
8463 ._bytesatleast65:
8464 @@ -891,6 +894,7 @@ ECRYPT_keysetup:
8465 add %r11,%rsp
8466 mov %rdi,%rax
8467 mov %rsi,%rdx
8468 + pax_force_retaddr
8469 ret
8470 # enter ECRYPT_ivsetup
8471 .text
8472 @@ -917,4 +921,5 @@ ECRYPT_ivsetup:
8473 add %r11,%rsp
8474 mov %rdi,%rax
8475 mov %rsi,%rdx
8476 + pax_force_retaddr
8477 ret
8478 diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8479 index 3ee1ff0..cbc568b 100644
8480 --- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8481 +++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8482 @@ -24,6 +24,8 @@
8483 *
8484 */
8485
8486 +#include <asm/alternative-asm.h>
8487 +
8488 .file "serpent-sse2-x86_64-asm_64.S"
8489 .text
8490
8491 @@ -692,12 +694,14 @@ __serpent_enc_blk_8way:
8492 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8493 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8494
8495 + pax_force_retaddr
8496 ret;
8497
8498 __enc_xor8:
8499 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8500 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8501
8502 + pax_force_retaddr
8503 ret;
8504
8505 .align 8
8506 @@ -755,4 +759,5 @@ serpent_dec_blk_8way:
8507 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
8508 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
8509
8510 + pax_force_retaddr
8511 ret;
8512 diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
8513 index b2c2f57..8470cab 100644
8514 --- a/arch/x86/crypto/sha1_ssse3_asm.S
8515 +++ b/arch/x86/crypto/sha1_ssse3_asm.S
8516 @@ -28,6 +28,8 @@
8517 * (at your option) any later version.
8518 */
8519
8520 +#include <asm/alternative-asm.h>
8521 +
8522 #define CTX %rdi // arg1
8523 #define BUF %rsi // arg2
8524 #define CNT %rdx // arg3
8525 @@ -104,6 +106,7 @@
8526 pop %r12
8527 pop %rbp
8528 pop %rbx
8529 + pax_force_retaddr 0, 1
8530 ret
8531
8532 .size \name, .-\name
8533 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8534 index 5b012a2..36d5364 100644
8535 --- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8536 +++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8537 @@ -20,6 +20,8 @@
8538 *
8539 */
8540
8541 +#include <asm/alternative-asm.h>
8542 +
8543 .file "twofish-x86_64-asm-3way.S"
8544 .text
8545
8546 @@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
8547 popq %r13;
8548 popq %r14;
8549 popq %r15;
8550 + pax_force_retaddr 0, 1
8551 ret;
8552
8553 __enc_xor3:
8554 @@ -271,6 +274,7 @@ __enc_xor3:
8555 popq %r13;
8556 popq %r14;
8557 popq %r15;
8558 + pax_force_retaddr 0, 1
8559 ret;
8560
8561 .global twofish_dec_blk_3way
8562 @@ -312,5 +316,6 @@ twofish_dec_blk_3way:
8563 popq %r13;
8564 popq %r14;
8565 popq %r15;
8566 + pax_force_retaddr 0, 1
8567 ret;
8568
8569 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
8570 index 7bcf3fc..f53832f 100644
8571 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
8572 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
8573 @@ -21,6 +21,7 @@
8574 .text
8575
8576 #include <asm/asm-offsets.h>
8577 +#include <asm/alternative-asm.h>
8578
8579 #define a_offset 0
8580 #define b_offset 4
8581 @@ -268,6 +269,7 @@ twofish_enc_blk:
8582
8583 popq R1
8584 movq $1,%rax
8585 + pax_force_retaddr 0, 1
8586 ret
8587
8588 twofish_dec_blk:
8589 @@ -319,4 +321,5 @@ twofish_dec_blk:
8590
8591 popq R1
8592 movq $1,%rax
8593 + pax_force_retaddr 0, 1
8594 ret
8595 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
8596 index 07b3a68..bd2a388 100644
8597 --- a/arch/x86/ia32/ia32_aout.c
8598 +++ b/arch/x86/ia32/ia32_aout.c
8599 @@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
8600 unsigned long dump_start, dump_size;
8601 struct user32 dump;
8602
8603 + memset(&dump, 0, sizeof(dump));
8604 +
8605 fs = get_fs();
8606 set_fs(KERNEL_DS);
8607 has_dumped = 1;
8608 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
8609 index 4f5bfac..e1ef0d3 100644
8610 --- a/arch/x86/ia32/ia32_signal.c
8611 +++ b/arch/x86/ia32/ia32_signal.c
8612 @@ -168,7 +168,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
8613 }
8614 seg = get_fs();
8615 set_fs(KERNEL_DS);
8616 - ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
8617 + ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
8618 set_fs(seg);
8619 if (ret >= 0 && uoss_ptr) {
8620 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
8621 @@ -369,7 +369,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
8622 */
8623 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8624 size_t frame_size,
8625 - void **fpstate)
8626 + void __user **fpstate)
8627 {
8628 unsigned long sp;
8629
8630 @@ -390,7 +390,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8631
8632 if (used_math()) {
8633 sp = sp - sig_xstate_ia32_size;
8634 - *fpstate = (struct _fpstate_ia32 *) sp;
8635 + *fpstate = (struct _fpstate_ia32 __user *) sp;
8636 if (save_i387_xstate_ia32(*fpstate) < 0)
8637 return (void __user *) -1L;
8638 }
8639 @@ -398,7 +398,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8640 sp -= frame_size;
8641 /* Align the stack pointer according to the i386 ABI,
8642 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
8643 - sp = ((sp + 4) & -16ul) - 4;
8644 + sp = ((sp - 12) & -16ul) - 4;
8645 return (void __user *) sp;
8646 }
8647
8648 @@ -456,7 +456,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
8649 * These are actually not used anymore, but left because some
8650 * gdb versions depend on them as a marker.
8651 */
8652 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8653 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8654 } put_user_catch(err);
8655
8656 if (err)
8657 @@ -498,7 +498,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8658 0xb8,
8659 __NR_ia32_rt_sigreturn,
8660 0x80cd,
8661 - 0,
8662 + 0
8663 };
8664
8665 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
8666 @@ -528,16 +528,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
8667
8668 if (ka->sa.sa_flags & SA_RESTORER)
8669 restorer = ka->sa.sa_restorer;
8670 + else if (current->mm->context.vdso)
8671 + /* Return stub is in 32bit vsyscall page */
8672 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
8673 else
8674 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
8675 - rt_sigreturn);
8676 + restorer = &frame->retcode;
8677 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
8678
8679 /*
8680 * Not actually used anymore, but left because some gdb
8681 * versions need it.
8682 */
8683 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
8684 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
8685 } put_user_catch(err);
8686
8687 if (err)
8688 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
8689 index e3e7340..05ed805 100644
8690 --- a/arch/x86/ia32/ia32entry.S
8691 +++ b/arch/x86/ia32/ia32entry.S
8692 @@ -13,8 +13,10 @@
8693 #include <asm/thread_info.h>
8694 #include <asm/segment.h>
8695 #include <asm/irqflags.h>
8696 +#include <asm/pgtable.h>
8697 #include <linux/linkage.h>
8698 #include <linux/err.h>
8699 +#include <asm/alternative-asm.h>
8700
8701 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
8702 #include <linux/elf-em.h>
8703 @@ -94,6 +96,32 @@ ENTRY(native_irq_enable_sysexit)
8704 ENDPROC(native_irq_enable_sysexit)
8705 #endif
8706
8707 + .macro pax_enter_kernel_user
8708 + pax_set_fptr_mask
8709 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8710 + call pax_enter_kernel_user
8711 +#endif
8712 + .endm
8713 +
8714 + .macro pax_exit_kernel_user
8715 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8716 + call pax_exit_kernel_user
8717 +#endif
8718 +#ifdef CONFIG_PAX_RANDKSTACK
8719 + pushq %rax
8720 + pushq %r11
8721 + call pax_randomize_kstack
8722 + popq %r11
8723 + popq %rax
8724 +#endif
8725 + .endm
8726 +
8727 +.macro pax_erase_kstack
8728 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
8729 + call pax_erase_kstack
8730 +#endif
8731 +.endm
8732 +
8733 /*
8734 * 32bit SYSENTER instruction entry.
8735 *
8736 @@ -120,12 +148,6 @@ ENTRY(ia32_sysenter_target)
8737 CFI_REGISTER rsp,rbp
8738 SWAPGS_UNSAFE_STACK
8739 movq PER_CPU_VAR(kernel_stack), %rsp
8740 - addq $(KERNEL_STACK_OFFSET),%rsp
8741 - /*
8742 - * No need to follow this irqs on/off section: the syscall
8743 - * disabled irqs, here we enable it straight after entry:
8744 - */
8745 - ENABLE_INTERRUPTS(CLBR_NONE)
8746 movl %ebp,%ebp /* zero extension */
8747 pushq_cfi $__USER32_DS
8748 /*CFI_REL_OFFSET ss,0*/
8749 @@ -133,24 +155,39 @@ ENTRY(ia32_sysenter_target)
8750 CFI_REL_OFFSET rsp,0
8751 pushfq_cfi
8752 /*CFI_REL_OFFSET rflags,0*/
8753 - movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
8754 - CFI_REGISTER rip,r10
8755 + orl $X86_EFLAGS_IF,(%rsp)
8756 + GET_THREAD_INFO(%r11)
8757 + movl TI_sysenter_return(%r11), %r11d
8758 + CFI_REGISTER rip,r11
8759 pushq_cfi $__USER32_CS
8760 /*CFI_REL_OFFSET cs,0*/
8761 movl %eax, %eax
8762 - pushq_cfi %r10
8763 + pushq_cfi %r11
8764 CFI_REL_OFFSET rip,0
8765 pushq_cfi %rax
8766 cld
8767 SAVE_ARGS 0,1,0
8768 + pax_enter_kernel_user
8769 + /*
8770 + * No need to follow this irqs on/off section: the syscall
8771 + * disabled irqs, here we enable it straight after entry:
8772 + */
8773 + ENABLE_INTERRUPTS(CLBR_NONE)
8774 /* no need to do an access_ok check here because rbp has been
8775 32bit zero extended */
8776 +
8777 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8778 + mov $PAX_USER_SHADOW_BASE,%r11
8779 + add %r11,%rbp
8780 +#endif
8781 +
8782 1: movl (%rbp),%ebp
8783 .section __ex_table,"a"
8784 .quad 1b,ia32_badarg
8785 .previous
8786 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8787 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8788 + GET_THREAD_INFO(%r11)
8789 + orl $TS_COMPAT,TI_status(%r11)
8790 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8791 CFI_REMEMBER_STATE
8792 jnz sysenter_tracesys
8793 cmpq $(IA32_NR_syscalls-1),%rax
8794 @@ -160,12 +197,15 @@ sysenter_do_call:
8795 sysenter_dispatch:
8796 call *ia32_sys_call_table(,%rax,8)
8797 movq %rax,RAX-ARGOFFSET(%rsp)
8798 + GET_THREAD_INFO(%r11)
8799 DISABLE_INTERRUPTS(CLBR_NONE)
8800 TRACE_IRQS_OFF
8801 - testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8802 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8803 jnz sysexit_audit
8804 sysexit_from_sys_call:
8805 - andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8806 + pax_exit_kernel_user
8807 + pax_erase_kstack
8808 + andl $~TS_COMPAT,TI_status(%r11)
8809 /* clear IF, that popfq doesn't enable interrupts early */
8810 andl $~0x200,EFLAGS-R11(%rsp)
8811 movl RIP-R11(%rsp),%edx /* User %eip */
8812 @@ -191,6 +231,9 @@ sysexit_from_sys_call:
8813 movl %eax,%esi /* 2nd arg: syscall number */
8814 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
8815 call __audit_syscall_entry
8816 +
8817 + pax_erase_kstack
8818 +
8819 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
8820 cmpq $(IA32_NR_syscalls-1),%rax
8821 ja ia32_badsys
8822 @@ -202,7 +245,7 @@ sysexit_from_sys_call:
8823 .endm
8824
8825 .macro auditsys_exit exit
8826 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8827 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8828 jnz ia32_ret_from_sys_call
8829 TRACE_IRQS_ON
8830 sti
8831 @@ -213,11 +256,12 @@ sysexit_from_sys_call:
8832 1: setbe %al /* 1 if error, 0 if not */
8833 movzbl %al,%edi /* zero-extend that into %edi */
8834 call __audit_syscall_exit
8835 + GET_THREAD_INFO(%r11)
8836 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
8837 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
8838 cli
8839 TRACE_IRQS_OFF
8840 - testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8841 + testl %edi,TI_flags(%r11)
8842 jz \exit
8843 CLEAR_RREGS -ARGOFFSET
8844 jmp int_with_check
8845 @@ -235,7 +279,7 @@ sysexit_audit:
8846
8847 sysenter_tracesys:
8848 #ifdef CONFIG_AUDITSYSCALL
8849 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8850 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8851 jz sysenter_auditsys
8852 #endif
8853 SAVE_REST
8854 @@ -243,6 +287,9 @@ sysenter_tracesys:
8855 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
8856 movq %rsp,%rdi /* &pt_regs -> arg1 */
8857 call syscall_trace_enter
8858 +
8859 + pax_erase_kstack
8860 +
8861 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8862 RESTORE_REST
8863 cmpq $(IA32_NR_syscalls-1),%rax
8864 @@ -274,19 +321,20 @@ ENDPROC(ia32_sysenter_target)
8865 ENTRY(ia32_cstar_target)
8866 CFI_STARTPROC32 simple
8867 CFI_SIGNAL_FRAME
8868 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8869 + CFI_DEF_CFA rsp,0
8870 CFI_REGISTER rip,rcx
8871 /*CFI_REGISTER rflags,r11*/
8872 SWAPGS_UNSAFE_STACK
8873 movl %esp,%r8d
8874 CFI_REGISTER rsp,r8
8875 movq PER_CPU_VAR(kernel_stack),%rsp
8876 + SAVE_ARGS 8*6,0,0
8877 + pax_enter_kernel_user
8878 /*
8879 * No need to follow this irqs on/off section: the syscall
8880 * disabled irqs and here we enable it straight after entry:
8881 */
8882 ENABLE_INTERRUPTS(CLBR_NONE)
8883 - SAVE_ARGS 8,0,0
8884 movl %eax,%eax /* zero extension */
8885 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8886 movq %rcx,RIP-ARGOFFSET(%rsp)
8887 @@ -302,12 +350,19 @@ ENTRY(ia32_cstar_target)
8888 /* no need to do an access_ok check here because r8 has been
8889 32bit zero extended */
8890 /* hardware stack frame is complete now */
8891 +
8892 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8893 + mov $PAX_USER_SHADOW_BASE,%r11
8894 + add %r11,%r8
8895 +#endif
8896 +
8897 1: movl (%r8),%r9d
8898 .section __ex_table,"a"
8899 .quad 1b,ia32_badarg
8900 .previous
8901 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8902 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8903 + GET_THREAD_INFO(%r11)
8904 + orl $TS_COMPAT,TI_status(%r11)
8905 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8906 CFI_REMEMBER_STATE
8907 jnz cstar_tracesys
8908 cmpq $IA32_NR_syscalls-1,%rax
8909 @@ -317,12 +372,15 @@ cstar_do_call:
8910 cstar_dispatch:
8911 call *ia32_sys_call_table(,%rax,8)
8912 movq %rax,RAX-ARGOFFSET(%rsp)
8913 + GET_THREAD_INFO(%r11)
8914 DISABLE_INTERRUPTS(CLBR_NONE)
8915 TRACE_IRQS_OFF
8916 - testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8917 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8918 jnz sysretl_audit
8919 sysretl_from_sys_call:
8920 - andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8921 + pax_exit_kernel_user
8922 + pax_erase_kstack
8923 + andl $~TS_COMPAT,TI_status(%r11)
8924 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
8925 movl RIP-ARGOFFSET(%rsp),%ecx
8926 CFI_REGISTER rip,rcx
8927 @@ -350,7 +408,7 @@ sysretl_audit:
8928
8929 cstar_tracesys:
8930 #ifdef CONFIG_AUDITSYSCALL
8931 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8932 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8933 jz cstar_auditsys
8934 #endif
8935 xchgl %r9d,%ebp
8936 @@ -359,6 +417,9 @@ cstar_tracesys:
8937 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8938 movq %rsp,%rdi /* &pt_regs -> arg1 */
8939 call syscall_trace_enter
8940 +
8941 + pax_erase_kstack
8942 +
8943 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
8944 RESTORE_REST
8945 xchgl %ebp,%r9d
8946 @@ -404,19 +465,21 @@ ENTRY(ia32_syscall)
8947 CFI_REL_OFFSET rip,RIP-RIP
8948 PARAVIRT_ADJUST_EXCEPTION_FRAME
8949 SWAPGS
8950 - /*
8951 - * No need to follow this irqs on/off section: the syscall
8952 - * disabled irqs and here we enable it straight after entry:
8953 - */
8954 - ENABLE_INTERRUPTS(CLBR_NONE)
8955 movl %eax,%eax
8956 pushq_cfi %rax
8957 cld
8958 /* note the registers are not zero extended to the sf.
8959 this could be a problem. */
8960 SAVE_ARGS 0,1,0
8961 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8962 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8963 + pax_enter_kernel_user
8964 + /*
8965 + * No need to follow this irqs on/off section: the syscall
8966 + * disabled irqs and here we enable it straight after entry:
8967 + */
8968 + ENABLE_INTERRUPTS(CLBR_NONE)
8969 + GET_THREAD_INFO(%r11)
8970 + orl $TS_COMPAT,TI_status(%r11)
8971 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8972 jnz ia32_tracesys
8973 cmpq $(IA32_NR_syscalls-1),%rax
8974 ja ia32_badsys
8975 @@ -435,6 +498,9 @@ ia32_tracesys:
8976 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8977 movq %rsp,%rdi /* &pt_regs -> arg1 */
8978 call syscall_trace_enter
8979 +
8980 + pax_erase_kstack
8981 +
8982 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8983 RESTORE_REST
8984 cmpq $(IA32_NR_syscalls-1),%rax
8985 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
8986 index aec2202..f76174e 100644
8987 --- a/arch/x86/ia32/sys_ia32.c
8988 +++ b/arch/x86/ia32/sys_ia32.c
8989 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
8990 */
8991 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
8992 {
8993 - typeof(ubuf->st_uid) uid = 0;
8994 - typeof(ubuf->st_gid) gid = 0;
8995 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
8996 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
8997 SET_UID(uid, stat->uid);
8998 SET_GID(gid, stat->gid);
8999 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
9000 @@ -292,7 +292,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
9001 return alarm_setitimer(seconds);
9002 }
9003
9004 -asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
9005 +asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
9006 int options)
9007 {
9008 return compat_sys_wait4(pid, stat_addr, options, NULL);
9009 @@ -313,7 +313,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
9010 mm_segment_t old_fs = get_fs();
9011
9012 set_fs(KERNEL_DS);
9013 - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
9014 + ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
9015 set_fs(old_fs);
9016 if (put_compat_timespec(&t, interval))
9017 return -EFAULT;
9018 @@ -329,7 +329,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
9019 mm_segment_t old_fs = get_fs();
9020
9021 set_fs(KERNEL_DS);
9022 - ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
9023 + ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
9024 set_fs(old_fs);
9025 if (!ret) {
9026 switch (_NSIG_WORDS) {
9027 @@ -354,7 +354,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
9028 if (copy_siginfo_from_user32(&info, uinfo))
9029 return -EFAULT;
9030 set_fs(KERNEL_DS);
9031 - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
9032 + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
9033 set_fs(old_fs);
9034 return ret;
9035 }
9036 @@ -399,7 +399,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
9037 return -EFAULT;
9038
9039 set_fs(KERNEL_DS);
9040 - ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
9041 + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
9042 count);
9043 set_fs(old_fs);
9044
9045 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
9046 index 952bd01..7692c6f 100644
9047 --- a/arch/x86/include/asm/alternative-asm.h
9048 +++ b/arch/x86/include/asm/alternative-asm.h
9049 @@ -15,6 +15,45 @@
9050 .endm
9051 #endif
9052
9053 +#ifdef KERNEXEC_PLUGIN
9054 + .macro pax_force_retaddr_bts rip=0
9055 + btsq $63,\rip(%rsp)
9056 + .endm
9057 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
9058 + .macro pax_force_retaddr rip=0, reload=0
9059 + btsq $63,\rip(%rsp)
9060 + .endm
9061 + .macro pax_force_fptr ptr
9062 + btsq $63,\ptr
9063 + .endm
9064 + .macro pax_set_fptr_mask
9065 + .endm
9066 +#endif
9067 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
9068 + .macro pax_force_retaddr rip=0, reload=0
9069 + .if \reload
9070 + pax_set_fptr_mask
9071 + .endif
9072 + orq %r10,\rip(%rsp)
9073 + .endm
9074 + .macro pax_force_fptr ptr
9075 + orq %r10,\ptr
9076 + .endm
9077 + .macro pax_set_fptr_mask
9078 + movabs $0x8000000000000000,%r10
9079 + .endm
9080 +#endif
9081 +#else
9082 + .macro pax_force_retaddr rip=0, reload=0
9083 + .endm
9084 + .macro pax_force_fptr ptr
9085 + .endm
9086 + .macro pax_force_retaddr_bts rip=0
9087 + .endm
9088 + .macro pax_set_fptr_mask
9089 + .endm
9090 +#endif
9091 +
9092 .macro altinstruction_entry orig alt feature orig_len alt_len
9093 .long \orig - .
9094 .long \alt - .
9095 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
9096 index 49331be..9706065 100644
9097 --- a/arch/x86/include/asm/alternative.h
9098 +++ b/arch/x86/include/asm/alternative.h
9099 @@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
9100 ".section .discard,\"aw\",@progbits\n" \
9101 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
9102 ".previous\n" \
9103 - ".section .altinstr_replacement, \"ax\"\n" \
9104 + ".section .altinstr_replacement, \"a\"\n" \
9105 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
9106 ".previous"
9107
9108 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
9109 index d854101..f6ea947 100644
9110 --- a/arch/x86/include/asm/apic.h
9111 +++ b/arch/x86/include/asm/apic.h
9112 @@ -44,7 +44,7 @@ static inline void generic_apic_probe(void)
9113
9114 #ifdef CONFIG_X86_LOCAL_APIC
9115
9116 -extern unsigned int apic_verbosity;
9117 +extern int apic_verbosity;
9118 extern int local_apic_timer_c2_ok;
9119
9120 extern int disable_apic;
9121 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
9122 index 20370c6..a2eb9b0 100644
9123 --- a/arch/x86/include/asm/apm.h
9124 +++ b/arch/x86/include/asm/apm.h
9125 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
9126 __asm__ __volatile__(APM_DO_ZERO_SEGS
9127 "pushl %%edi\n\t"
9128 "pushl %%ebp\n\t"
9129 - "lcall *%%cs:apm_bios_entry\n\t"
9130 + "lcall *%%ss:apm_bios_entry\n\t"
9131 "setc %%al\n\t"
9132 "popl %%ebp\n\t"
9133 "popl %%edi\n\t"
9134 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
9135 __asm__ __volatile__(APM_DO_ZERO_SEGS
9136 "pushl %%edi\n\t"
9137 "pushl %%ebp\n\t"
9138 - "lcall *%%cs:apm_bios_entry\n\t"
9139 + "lcall *%%ss:apm_bios_entry\n\t"
9140 "setc %%bl\n\t"
9141 "popl %%ebp\n\t"
9142 "popl %%edi\n\t"
9143 diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
9144 index 58cb6d4..a4b806c 100644
9145 --- a/arch/x86/include/asm/atomic.h
9146 +++ b/arch/x86/include/asm/atomic.h
9147 @@ -22,7 +22,18 @@
9148 */
9149 static inline int atomic_read(const atomic_t *v)
9150 {
9151 - return (*(volatile int *)&(v)->counter);
9152 + return (*(volatile const int *)&(v)->counter);
9153 +}
9154 +
9155 +/**
9156 + * atomic_read_unchecked - read atomic variable
9157 + * @v: pointer of type atomic_unchecked_t
9158 + *
9159 + * Atomically reads the value of @v.
9160 + */
9161 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
9162 +{
9163 + return (*(volatile const int *)&(v)->counter);
9164 }
9165
9166 /**
9167 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
9168 }
9169
9170 /**
9171 + * atomic_set_unchecked - set atomic variable
9172 + * @v: pointer of type atomic_unchecked_t
9173 + * @i: required value
9174 + *
9175 + * Atomically sets the value of @v to @i.
9176 + */
9177 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
9178 +{
9179 + v->counter = i;
9180 +}
9181 +
9182 +/**
9183 * atomic_add - add integer to atomic variable
9184 * @i: integer value to add
9185 * @v: pointer of type atomic_t
9186 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
9187 */
9188 static inline void atomic_add(int i, atomic_t *v)
9189 {
9190 - asm volatile(LOCK_PREFIX "addl %1,%0"
9191 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
9192 +
9193 +#ifdef CONFIG_PAX_REFCOUNT
9194 + "jno 0f\n"
9195 + LOCK_PREFIX "subl %1,%0\n"
9196 + "int $4\n0:\n"
9197 + _ASM_EXTABLE(0b, 0b)
9198 +#endif
9199 +
9200 + : "+m" (v->counter)
9201 + : "ir" (i));
9202 +}
9203 +
9204 +/**
9205 + * atomic_add_unchecked - add integer to atomic variable
9206 + * @i: integer value to add
9207 + * @v: pointer of type atomic_unchecked_t
9208 + *
9209 + * Atomically adds @i to @v.
9210 + */
9211 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
9212 +{
9213 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
9214 : "+m" (v->counter)
9215 : "ir" (i));
9216 }
9217 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
9218 */
9219 static inline void atomic_sub(int i, atomic_t *v)
9220 {
9221 - asm volatile(LOCK_PREFIX "subl %1,%0"
9222 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
9223 +
9224 +#ifdef CONFIG_PAX_REFCOUNT
9225 + "jno 0f\n"
9226 + LOCK_PREFIX "addl %1,%0\n"
9227 + "int $4\n0:\n"
9228 + _ASM_EXTABLE(0b, 0b)
9229 +#endif
9230 +
9231 + : "+m" (v->counter)
9232 + : "ir" (i));
9233 +}
9234 +
9235 +/**
9236 + * atomic_sub_unchecked - subtract integer from atomic variable
9237 + * @i: integer value to subtract
9238 + * @v: pointer of type atomic_unchecked_t
9239 + *
9240 + * Atomically subtracts @i from @v.
9241 + */
9242 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
9243 +{
9244 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
9245 : "+m" (v->counter)
9246 : "ir" (i));
9247 }
9248 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9249 {
9250 unsigned char c;
9251
9252 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
9253 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
9254 +
9255 +#ifdef CONFIG_PAX_REFCOUNT
9256 + "jno 0f\n"
9257 + LOCK_PREFIX "addl %2,%0\n"
9258 + "int $4\n0:\n"
9259 + _ASM_EXTABLE(0b, 0b)
9260 +#endif
9261 +
9262 + "sete %1\n"
9263 : "+m" (v->counter), "=qm" (c)
9264 : "ir" (i) : "memory");
9265 return c;
9266 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
9267 */
9268 static inline void atomic_inc(atomic_t *v)
9269 {
9270 - asm volatile(LOCK_PREFIX "incl %0"
9271 + asm volatile(LOCK_PREFIX "incl %0\n"
9272 +
9273 +#ifdef CONFIG_PAX_REFCOUNT
9274 + "jno 0f\n"
9275 + LOCK_PREFIX "decl %0\n"
9276 + "int $4\n0:\n"
9277 + _ASM_EXTABLE(0b, 0b)
9278 +#endif
9279 +
9280 + : "+m" (v->counter));
9281 +}
9282 +
9283 +/**
9284 + * atomic_inc_unchecked - increment atomic variable
9285 + * @v: pointer of type atomic_unchecked_t
9286 + *
9287 + * Atomically increments @v by 1.
9288 + */
9289 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9290 +{
9291 + asm volatile(LOCK_PREFIX "incl %0\n"
9292 : "+m" (v->counter));
9293 }
9294
9295 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
9296 */
9297 static inline void atomic_dec(atomic_t *v)
9298 {
9299 - asm volatile(LOCK_PREFIX "decl %0"
9300 + asm volatile(LOCK_PREFIX "decl %0\n"
9301 +
9302 +#ifdef CONFIG_PAX_REFCOUNT
9303 + "jno 0f\n"
9304 + LOCK_PREFIX "incl %0\n"
9305 + "int $4\n0:\n"
9306 + _ASM_EXTABLE(0b, 0b)
9307 +#endif
9308 +
9309 + : "+m" (v->counter));
9310 +}
9311 +
9312 +/**
9313 + * atomic_dec_unchecked - decrement atomic variable
9314 + * @v: pointer of type atomic_unchecked_t
9315 + *
9316 + * Atomically decrements @v by 1.
9317 + */
9318 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9319 +{
9320 + asm volatile(LOCK_PREFIX "decl %0\n"
9321 : "+m" (v->counter));
9322 }
9323
9324 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
9325 {
9326 unsigned char c;
9327
9328 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
9329 + asm volatile(LOCK_PREFIX "decl %0\n"
9330 +
9331 +#ifdef CONFIG_PAX_REFCOUNT
9332 + "jno 0f\n"
9333 + LOCK_PREFIX "incl %0\n"
9334 + "int $4\n0:\n"
9335 + _ASM_EXTABLE(0b, 0b)
9336 +#endif
9337 +
9338 + "sete %1\n"
9339 : "+m" (v->counter), "=qm" (c)
9340 : : "memory");
9341 return c != 0;
9342 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
9343 {
9344 unsigned char c;
9345
9346 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
9347 + asm volatile(LOCK_PREFIX "incl %0\n"
9348 +
9349 +#ifdef CONFIG_PAX_REFCOUNT
9350 + "jno 0f\n"
9351 + LOCK_PREFIX "decl %0\n"
9352 + "int $4\n0:\n"
9353 + _ASM_EXTABLE(0b, 0b)
9354 +#endif
9355 +
9356 + "sete %1\n"
9357 + : "+m" (v->counter), "=qm" (c)
9358 + : : "memory");
9359 + return c != 0;
9360 +}
9361 +
9362 +/**
9363 + * atomic_inc_and_test_unchecked - increment and test
9364 + * @v: pointer of type atomic_unchecked_t
9365 + *
9366 + * Atomically increments @v by 1
9367 + * and returns true if the result is zero, or false for all
9368 + * other cases.
9369 + */
9370 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9371 +{
9372 + unsigned char c;
9373 +
9374 + asm volatile(LOCK_PREFIX "incl %0\n"
9375 + "sete %1\n"
9376 : "+m" (v->counter), "=qm" (c)
9377 : : "memory");
9378 return c != 0;
9379 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9380 {
9381 unsigned char c;
9382
9383 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9384 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
9385 +
9386 +#ifdef CONFIG_PAX_REFCOUNT
9387 + "jno 0f\n"
9388 + LOCK_PREFIX "subl %2,%0\n"
9389 + "int $4\n0:\n"
9390 + _ASM_EXTABLE(0b, 0b)
9391 +#endif
9392 +
9393 + "sets %1\n"
9394 : "+m" (v->counter), "=qm" (c)
9395 : "ir" (i) : "memory");
9396 return c;
9397 @@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
9398 goto no_xadd;
9399 #endif
9400 /* Modern 486+ processor */
9401 - return i + xadd(&v->counter, i);
9402 + return i + xadd_check_overflow(&v->counter, i);
9403
9404 #ifdef CONFIG_M386
9405 no_xadd: /* Legacy 386 processor */
9406 @@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
9407 }
9408
9409 /**
9410 + * atomic_add_return_unchecked - add integer and return
9411 + * @i: integer value to add
9412 + * @v: pointer of type atomic_unchecked_t
9413 + *
9414 + * Atomically adds @i to @v and returns @i + @v
9415 + */
9416 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9417 +{
9418 +#ifdef CONFIG_M386
9419 + int __i;
9420 + unsigned long flags;
9421 + if (unlikely(boot_cpu_data.x86 <= 3))
9422 + goto no_xadd;
9423 +#endif
9424 + /* Modern 486+ processor */
9425 + return i + xadd(&v->counter, i);
9426 +
9427 +#ifdef CONFIG_M386
9428 +no_xadd: /* Legacy 386 processor */
9429 + raw_local_irq_save(flags);
9430 + __i = atomic_read_unchecked(v);
9431 + atomic_set_unchecked(v, i + __i);
9432 + raw_local_irq_restore(flags);
9433 + return i + __i;
9434 +#endif
9435 +}
9436 +
9437 +/**
9438 * atomic_sub_return - subtract integer and return
9439 * @v: pointer of type atomic_t
9440 * @i: integer value to subtract
9441 @@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
9442 }
9443
9444 #define atomic_inc_return(v) (atomic_add_return(1, v))
9445 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9446 +{
9447 + return atomic_add_return_unchecked(1, v);
9448 +}
9449 #define atomic_dec_return(v) (atomic_sub_return(1, v))
9450
9451 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
9452 @@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
9453 return cmpxchg(&v->counter, old, new);
9454 }
9455
9456 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9457 +{
9458 + return cmpxchg(&v->counter, old, new);
9459 +}
9460 +
9461 static inline int atomic_xchg(atomic_t *v, int new)
9462 {
9463 return xchg(&v->counter, new);
9464 }
9465
9466 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9467 +{
9468 + return xchg(&v->counter, new);
9469 +}
9470 +
9471 /**
9472 * __atomic_add_unless - add unless the number is already a given value
9473 * @v: pointer of type atomic_t
9474 @@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
9475 */
9476 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9477 {
9478 - int c, old;
9479 + int c, old, new;
9480 c = atomic_read(v);
9481 for (;;) {
9482 - if (unlikely(c == (u)))
9483 + if (unlikely(c == u))
9484 break;
9485 - old = atomic_cmpxchg((v), c, c + (a));
9486 +
9487 + asm volatile("addl %2,%0\n"
9488 +
9489 +#ifdef CONFIG_PAX_REFCOUNT
9490 + "jno 0f\n"
9491 + "subl %2,%0\n"
9492 + "int $4\n0:\n"
9493 + _ASM_EXTABLE(0b, 0b)
9494 +#endif
9495 +
9496 + : "=r" (new)
9497 + : "0" (c), "ir" (a));
9498 +
9499 + old = atomic_cmpxchg(v, c, new);
9500 if (likely(old == c))
9501 break;
9502 c = old;
9503 @@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9504 return c;
9505 }
9506
9507 +/**
9508 + * atomic_inc_not_zero_hint - increment if not null
9509 + * @v: pointer of type atomic_t
9510 + * @hint: probable value of the atomic before the increment
9511 + *
9512 + * This version of atomic_inc_not_zero() gives a hint of probable
9513 + * value of the atomic. This helps processor to not read the memory
9514 + * before doing the atomic read/modify/write cycle, lowering
9515 + * number of bus transactions on some arches.
9516 + *
9517 + * Returns: 0 if increment was not done, 1 otherwise.
9518 + */
9519 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
9520 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
9521 +{
9522 + int val, c = hint, new;
9523 +
9524 + /* sanity test, should be removed by compiler if hint is a constant */
9525 + if (!hint)
9526 + return __atomic_add_unless(v, 1, 0);
9527 +
9528 + do {
9529 + asm volatile("incl %0\n"
9530 +
9531 +#ifdef CONFIG_PAX_REFCOUNT
9532 + "jno 0f\n"
9533 + "decl %0\n"
9534 + "int $4\n0:\n"
9535 + _ASM_EXTABLE(0b, 0b)
9536 +#endif
9537 +
9538 + : "=r" (new)
9539 + : "0" (c));
9540 +
9541 + val = atomic_cmpxchg(v, c, new);
9542 + if (val == c)
9543 + return 1;
9544 + c = val;
9545 + } while (c);
9546 +
9547 + return 0;
9548 +}
9549
9550 /*
9551 * atomic_dec_if_positive - decrement by 1 if old value positive
9552 @@ -293,14 +552,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
9553 #endif
9554
9555 /* These are x86-specific, used by some header files */
9556 -#define atomic_clear_mask(mask, addr) \
9557 - asm volatile(LOCK_PREFIX "andl %0,%1" \
9558 - : : "r" (~(mask)), "m" (*(addr)) : "memory")
9559 +static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
9560 +{
9561 + asm volatile(LOCK_PREFIX "andl %1,%0"
9562 + : "+m" (v->counter)
9563 + : "r" (~(mask))
9564 + : "memory");
9565 +}
9566
9567 -#define atomic_set_mask(mask, addr) \
9568 - asm volatile(LOCK_PREFIX "orl %0,%1" \
9569 - : : "r" ((unsigned)(mask)), "m" (*(addr)) \
9570 - : "memory")
9571 +static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
9572 +{
9573 + asm volatile(LOCK_PREFIX "andl %1,%0"
9574 + : "+m" (v->counter)
9575 + : "r" (~(mask))
9576 + : "memory");
9577 +}
9578 +
9579 +static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
9580 +{
9581 + asm volatile(LOCK_PREFIX "orl %1,%0"
9582 + : "+m" (v->counter)
9583 + : "r" (mask)
9584 + : "memory");
9585 +}
9586 +
9587 +static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
9588 +{
9589 + asm volatile(LOCK_PREFIX "orl %1,%0"
9590 + : "+m" (v->counter)
9591 + : "r" (mask)
9592 + : "memory");
9593 +}
9594
9595 /* Atomic operations are already serializing on x86 */
9596 #define smp_mb__before_atomic_dec() barrier()
9597 diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
9598 index 1981199..36b9dfb 100644
9599 --- a/arch/x86/include/asm/atomic64_32.h
9600 +++ b/arch/x86/include/asm/atomic64_32.h
9601 @@ -12,6 +12,14 @@ typedef struct {
9602 u64 __aligned(8) counter;
9603 } atomic64_t;
9604
9605 +#ifdef CONFIG_PAX_REFCOUNT
9606 +typedef struct {
9607 + u64 __aligned(8) counter;
9608 +} atomic64_unchecked_t;
9609 +#else
9610 +typedef atomic64_t atomic64_unchecked_t;
9611 +#endif
9612 +
9613 #define ATOMIC64_INIT(val) { (val) }
9614
9615 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
9616 @@ -37,21 +45,31 @@ typedef struct {
9617 ATOMIC64_DECL_ONE(sym##_386)
9618
9619 ATOMIC64_DECL_ONE(add_386);
9620 +ATOMIC64_DECL_ONE(add_unchecked_386);
9621 ATOMIC64_DECL_ONE(sub_386);
9622 +ATOMIC64_DECL_ONE(sub_unchecked_386);
9623 ATOMIC64_DECL_ONE(inc_386);
9624 +ATOMIC64_DECL_ONE(inc_unchecked_386);
9625 ATOMIC64_DECL_ONE(dec_386);
9626 +ATOMIC64_DECL_ONE(dec_unchecked_386);
9627 #endif
9628
9629 #define alternative_atomic64(f, out, in...) \
9630 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
9631
9632 ATOMIC64_DECL(read);
9633 +ATOMIC64_DECL(read_unchecked);
9634 ATOMIC64_DECL(set);
9635 +ATOMIC64_DECL(set_unchecked);
9636 ATOMIC64_DECL(xchg);
9637 ATOMIC64_DECL(add_return);
9638 +ATOMIC64_DECL(add_return_unchecked);
9639 ATOMIC64_DECL(sub_return);
9640 +ATOMIC64_DECL(sub_return_unchecked);
9641 ATOMIC64_DECL(inc_return);
9642 +ATOMIC64_DECL(inc_return_unchecked);
9643 ATOMIC64_DECL(dec_return);
9644 +ATOMIC64_DECL(dec_return_unchecked);
9645 ATOMIC64_DECL(dec_if_positive);
9646 ATOMIC64_DECL(inc_not_zero);
9647 ATOMIC64_DECL(add_unless);
9648 @@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
9649 }
9650
9651 /**
9652 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
9653 + * @p: pointer to type atomic64_unchecked_t
9654 + * @o: expected value
9655 + * @n: new value
9656 + *
9657 + * Atomically sets @v to @n if it was equal to @o and returns
9658 + * the old value.
9659 + */
9660 +
9661 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
9662 +{
9663 + return cmpxchg64(&v->counter, o, n);
9664 +}
9665 +
9666 +/**
9667 * atomic64_xchg - xchg atomic64 variable
9668 * @v: pointer to type atomic64_t
9669 * @n: value to assign
9670 @@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
9671 }
9672
9673 /**
9674 + * atomic64_set_unchecked - set atomic64 variable
9675 + * @v: pointer to type atomic64_unchecked_t
9676 + * @n: value to assign
9677 + *
9678 + * Atomically sets the value of @v to @n.
9679 + */
9680 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
9681 +{
9682 + unsigned high = (unsigned)(i >> 32);
9683 + unsigned low = (unsigned)i;
9684 + alternative_atomic64(set, /* no output */,
9685 + "S" (v), "b" (low), "c" (high)
9686 + : "eax", "edx", "memory");
9687 +}
9688 +
9689 +/**
9690 * atomic64_read - read atomic64 variable
9691 * @v: pointer to type atomic64_t
9692 *
9693 @@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
9694 }
9695
9696 /**
9697 + * atomic64_read_unchecked - read atomic64 variable
9698 + * @v: pointer to type atomic64_unchecked_t
9699 + *
9700 + * Atomically reads the value of @v and returns it.
9701 + */
9702 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
9703 +{
9704 + long long r;
9705 + alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
9706 + return r;
9707 + }
9708 +
9709 +/**
9710 * atomic64_add_return - add and return
9711 * @i: integer value to add
9712 * @v: pointer to type atomic64_t
9713 @@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
9714 return i;
9715 }
9716
9717 +/**
9718 + * atomic64_add_return_unchecked - add and return
9719 + * @i: integer value to add
9720 + * @v: pointer to type atomic64_unchecked_t
9721 + *
9722 + * Atomically adds @i to @v and returns @i + *@v
9723 + */
9724 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
9725 +{
9726 + alternative_atomic64(add_return_unchecked,
9727 + ASM_OUTPUT2("+A" (i), "+c" (v)),
9728 + ASM_NO_INPUT_CLOBBER("memory"));
9729 + return i;
9730 +}
9731 +
9732 /*
9733 * Other variants with different arithmetic operators:
9734 */
9735 @@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
9736 return a;
9737 }
9738
9739 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9740 +{
9741 + long long a;
9742 + alternative_atomic64(inc_return_unchecked, "=&A" (a),
9743 + "S" (v) : "memory", "ecx");
9744 + return a;
9745 +}
9746 +
9747 static inline long long atomic64_dec_return(atomic64_t *v)
9748 {
9749 long long a;
9750 @@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
9751 }
9752
9753 /**
9754 + * atomic64_add_unchecked - add integer to atomic64 variable
9755 + * @i: integer value to add
9756 + * @v: pointer to type atomic64_unchecked_t
9757 + *
9758 + * Atomically adds @i to @v.
9759 + */
9760 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
9761 +{
9762 + __alternative_atomic64(add_unchecked, add_return_unchecked,
9763 + ASM_OUTPUT2("+A" (i), "+c" (v)),
9764 + ASM_NO_INPUT_CLOBBER("memory"));
9765 + return i;
9766 +}
9767 +
9768 +/**
9769 * atomic64_sub - subtract the atomic64 variable
9770 * @i: integer value to subtract
9771 * @v: pointer to type atomic64_t
9772 diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
9773 index 0e1cbfc..5623683 100644
9774 --- a/arch/x86/include/asm/atomic64_64.h
9775 +++ b/arch/x86/include/asm/atomic64_64.h
9776 @@ -18,7 +18,19 @@
9777 */
9778 static inline long atomic64_read(const atomic64_t *v)
9779 {
9780 - return (*(volatile long *)&(v)->counter);
9781 + return (*(volatile const long *)&(v)->counter);
9782 +}
9783 +
9784 +/**
9785 + * atomic64_read_unchecked - read atomic64 variable
9786 + * @v: pointer of type atomic64_unchecked_t
9787 + *
9788 + * Atomically reads the value of @v.
9789 + * Doesn't imply a read memory barrier.
9790 + */
9791 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9792 +{
9793 + return (*(volatile const long *)&(v)->counter);
9794 }
9795
9796 /**
9797 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
9798 }
9799
9800 /**
9801 + * atomic64_set_unchecked - set atomic64 variable
9802 + * @v: pointer to type atomic64_unchecked_t
9803 + * @i: required value
9804 + *
9805 + * Atomically sets the value of @v to @i.
9806 + */
9807 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9808 +{
9809 + v->counter = i;
9810 +}
9811 +
9812 +/**
9813 * atomic64_add - add integer to atomic64 variable
9814 * @i: integer value to add
9815 * @v: pointer to type atomic64_t
9816 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
9817 */
9818 static inline void atomic64_add(long i, atomic64_t *v)
9819 {
9820 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
9821 +
9822 +#ifdef CONFIG_PAX_REFCOUNT
9823 + "jno 0f\n"
9824 + LOCK_PREFIX "subq %1,%0\n"
9825 + "int $4\n0:\n"
9826 + _ASM_EXTABLE(0b, 0b)
9827 +#endif
9828 +
9829 + : "=m" (v->counter)
9830 + : "er" (i), "m" (v->counter));
9831 +}
9832 +
9833 +/**
9834 + * atomic64_add_unchecked - add integer to atomic64 variable
9835 + * @i: integer value to add
9836 + * @v: pointer to type atomic64_unchecked_t
9837 + *
9838 + * Atomically adds @i to @v.
9839 + */
9840 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
9841 +{
9842 asm volatile(LOCK_PREFIX "addq %1,%0"
9843 : "=m" (v->counter)
9844 : "er" (i), "m" (v->counter));
9845 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
9846 */
9847 static inline void atomic64_sub(long i, atomic64_t *v)
9848 {
9849 - asm volatile(LOCK_PREFIX "subq %1,%0"
9850 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
9851 +
9852 +#ifdef CONFIG_PAX_REFCOUNT
9853 + "jno 0f\n"
9854 + LOCK_PREFIX "addq %1,%0\n"
9855 + "int $4\n0:\n"
9856 + _ASM_EXTABLE(0b, 0b)
9857 +#endif
9858 +
9859 + : "=m" (v->counter)
9860 + : "er" (i), "m" (v->counter));
9861 +}
9862 +
9863 +/**
9864 + * atomic64_sub_unchecked - subtract the atomic64 variable
9865 + * @i: integer value to subtract
9866 + * @v: pointer to type atomic64_unchecked_t
9867 + *
9868 + * Atomically subtracts @i from @v.
9869 + */
9870 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
9871 +{
9872 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
9873 : "=m" (v->counter)
9874 : "er" (i), "m" (v->counter));
9875 }
9876 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9877 {
9878 unsigned char c;
9879
9880 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9881 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
9882 +
9883 +#ifdef CONFIG_PAX_REFCOUNT
9884 + "jno 0f\n"
9885 + LOCK_PREFIX "addq %2,%0\n"
9886 + "int $4\n0:\n"
9887 + _ASM_EXTABLE(0b, 0b)
9888 +#endif
9889 +
9890 + "sete %1\n"
9891 : "=m" (v->counter), "=qm" (c)
9892 : "er" (i), "m" (v->counter) : "memory");
9893 return c;
9894 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9895 */
9896 static inline void atomic64_inc(atomic64_t *v)
9897 {
9898 + asm volatile(LOCK_PREFIX "incq %0\n"
9899 +
9900 +#ifdef CONFIG_PAX_REFCOUNT
9901 + "jno 0f\n"
9902 + LOCK_PREFIX "decq %0\n"
9903 + "int $4\n0:\n"
9904 + _ASM_EXTABLE(0b, 0b)
9905 +#endif
9906 +
9907 + : "=m" (v->counter)
9908 + : "m" (v->counter));
9909 +}
9910 +
9911 +/**
9912 + * atomic64_inc_unchecked - increment atomic64 variable
9913 + * @v: pointer to type atomic64_unchecked_t
9914 + *
9915 + * Atomically increments @v by 1.
9916 + */
9917 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9918 +{
9919 asm volatile(LOCK_PREFIX "incq %0"
9920 : "=m" (v->counter)
9921 : "m" (v->counter));
9922 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
9923 */
9924 static inline void atomic64_dec(atomic64_t *v)
9925 {
9926 - asm volatile(LOCK_PREFIX "decq %0"
9927 + asm volatile(LOCK_PREFIX "decq %0\n"
9928 +
9929 +#ifdef CONFIG_PAX_REFCOUNT
9930 + "jno 0f\n"
9931 + LOCK_PREFIX "incq %0\n"
9932 + "int $4\n0:\n"
9933 + _ASM_EXTABLE(0b, 0b)
9934 +#endif
9935 +
9936 + : "=m" (v->counter)
9937 + : "m" (v->counter));
9938 +}
9939 +
9940 +/**
9941 + * atomic64_dec_unchecked - decrement atomic64 variable
9942 + * @v: pointer to type atomic64_t
9943 + *
9944 + * Atomically decrements @v by 1.
9945 + */
9946 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9947 +{
9948 + asm volatile(LOCK_PREFIX "decq %0\n"
9949 : "=m" (v->counter)
9950 : "m" (v->counter));
9951 }
9952 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
9953 {
9954 unsigned char c;
9955
9956 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
9957 + asm volatile(LOCK_PREFIX "decq %0\n"
9958 +
9959 +#ifdef CONFIG_PAX_REFCOUNT
9960 + "jno 0f\n"
9961 + LOCK_PREFIX "incq %0\n"
9962 + "int $4\n0:\n"
9963 + _ASM_EXTABLE(0b, 0b)
9964 +#endif
9965 +
9966 + "sete %1\n"
9967 : "=m" (v->counter), "=qm" (c)
9968 : "m" (v->counter) : "memory");
9969 return c != 0;
9970 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
9971 {
9972 unsigned char c;
9973
9974 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
9975 + asm volatile(LOCK_PREFIX "incq %0\n"
9976 +
9977 +#ifdef CONFIG_PAX_REFCOUNT
9978 + "jno 0f\n"
9979 + LOCK_PREFIX "decq %0\n"
9980 + "int $4\n0:\n"
9981 + _ASM_EXTABLE(0b, 0b)
9982 +#endif
9983 +
9984 + "sete %1\n"
9985 : "=m" (v->counter), "=qm" (c)
9986 : "m" (v->counter) : "memory");
9987 return c != 0;
9988 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9989 {
9990 unsigned char c;
9991
9992 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
9993 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
9994 +
9995 +#ifdef CONFIG_PAX_REFCOUNT
9996 + "jno 0f\n"
9997 + LOCK_PREFIX "subq %2,%0\n"
9998 + "int $4\n0:\n"
9999 + _ASM_EXTABLE(0b, 0b)
10000 +#endif
10001 +
10002 + "sets %1\n"
10003 : "=m" (v->counter), "=qm" (c)
10004 : "er" (i), "m" (v->counter) : "memory");
10005 return c;
10006 @@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
10007 */
10008 static inline long atomic64_add_return(long i, atomic64_t *v)
10009 {
10010 + return i + xadd_check_overflow(&v->counter, i);
10011 +}
10012 +
10013 +/**
10014 + * atomic64_add_return_unchecked - add and return
10015 + * @i: integer value to add
10016 + * @v: pointer to type atomic64_unchecked_t
10017 + *
10018 + * Atomically adds @i to @v and returns @i + @v
10019 + */
10020 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
10021 +{
10022 return i + xadd(&v->counter, i);
10023 }
10024
10025 @@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
10026 }
10027
10028 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
10029 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
10030 +{
10031 + return atomic64_add_return_unchecked(1, v);
10032 +}
10033 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
10034
10035 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
10036 @@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
10037 return cmpxchg(&v->counter, old, new);
10038 }
10039
10040 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
10041 +{
10042 + return cmpxchg(&v->counter, old, new);
10043 +}
10044 +
10045 static inline long atomic64_xchg(atomic64_t *v, long new)
10046 {
10047 return xchg(&v->counter, new);
10048 @@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
10049 */
10050 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
10051 {
10052 - long c, old;
10053 + long c, old, new;
10054 c = atomic64_read(v);
10055 for (;;) {
10056 - if (unlikely(c == (u)))
10057 + if (unlikely(c == u))
10058 break;
10059 - old = atomic64_cmpxchg((v), c, c + (a));
10060 +
10061 + asm volatile("add %2,%0\n"
10062 +
10063 +#ifdef CONFIG_PAX_REFCOUNT
10064 + "jno 0f\n"
10065 + "sub %2,%0\n"
10066 + "int $4\n0:\n"
10067 + _ASM_EXTABLE(0b, 0b)
10068 +#endif
10069 +
10070 + : "=r" (new)
10071 + : "0" (c), "ir" (a));
10072 +
10073 + old = atomic64_cmpxchg(v, c, new);
10074 if (likely(old == c))
10075 break;
10076 c = old;
10077 }
10078 - return c != (u);
10079 + return c != u;
10080 }
10081
10082 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10083 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
10084 index b97596e..9bd48b06 100644
10085 --- a/arch/x86/include/asm/bitops.h
10086 +++ b/arch/x86/include/asm/bitops.h
10087 @@ -38,7 +38,7 @@
10088 * a mask operation on a byte.
10089 */
10090 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
10091 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
10092 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
10093 #define CONST_MASK(nr) (1 << ((nr) & 7))
10094
10095 /**
10096 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
10097 index 5e1a2ee..c9f9533 100644
10098 --- a/arch/x86/include/asm/boot.h
10099 +++ b/arch/x86/include/asm/boot.h
10100 @@ -11,10 +11,15 @@
10101 #include <asm/pgtable_types.h>
10102
10103 /* Physical address where kernel should be loaded. */
10104 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
10105 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
10106 + (CONFIG_PHYSICAL_ALIGN - 1)) \
10107 & ~(CONFIG_PHYSICAL_ALIGN - 1))
10108
10109 +#ifndef __ASSEMBLY__
10110 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
10111 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
10112 +#endif
10113 +
10114 /* Minimum kernel alignment, as a power of two */
10115 #ifdef CONFIG_X86_64
10116 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
10117 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
10118 index 48f99f1..d78ebf9 100644
10119 --- a/arch/x86/include/asm/cache.h
10120 +++ b/arch/x86/include/asm/cache.h
10121 @@ -5,12 +5,13 @@
10122
10123 /* L1 cache line size */
10124 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
10125 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10126 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10127
10128 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
10129 +#define __read_only __attribute__((__section__(".data..read_only")))
10130
10131 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
10132 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
10133 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
10134
10135 #ifdef CONFIG_X86_VSMP
10136 #ifdef CONFIG_SMP
10137 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
10138 index 9863ee3..4a1f8e1 100644
10139 --- a/arch/x86/include/asm/cacheflush.h
10140 +++ b/arch/x86/include/asm/cacheflush.h
10141 @@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
10142 unsigned long pg_flags = pg->flags & _PGMT_MASK;
10143
10144 if (pg_flags == _PGMT_DEFAULT)
10145 - return -1;
10146 + return ~0UL;
10147 else if (pg_flags == _PGMT_WC)
10148 return _PAGE_CACHE_WC;
10149 else if (pg_flags == _PGMT_UC_MINUS)
10150 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
10151 index 46fc474..b02b0f9 100644
10152 --- a/arch/x86/include/asm/checksum_32.h
10153 +++ b/arch/x86/include/asm/checksum_32.h
10154 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
10155 int len, __wsum sum,
10156 int *src_err_ptr, int *dst_err_ptr);
10157
10158 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
10159 + int len, __wsum sum,
10160 + int *src_err_ptr, int *dst_err_ptr);
10161 +
10162 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
10163 + int len, __wsum sum,
10164 + int *src_err_ptr, int *dst_err_ptr);
10165 +
10166 /*
10167 * Note: when you get a NULL pointer exception here this means someone
10168 * passed in an incorrect kernel address to one of these functions.
10169 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
10170 int *err_ptr)
10171 {
10172 might_sleep();
10173 - return csum_partial_copy_generic((__force void *)src, dst,
10174 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
10175 len, sum, err_ptr, NULL);
10176 }
10177
10178 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
10179 {
10180 might_sleep();
10181 if (access_ok(VERIFY_WRITE, dst, len))
10182 - return csum_partial_copy_generic(src, (__force void *)dst,
10183 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
10184 len, sum, NULL, err_ptr);
10185
10186 if (len)
10187 diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
10188 index 99480e5..d81165b 100644
10189 --- a/arch/x86/include/asm/cmpxchg.h
10190 +++ b/arch/x86/include/asm/cmpxchg.h
10191 @@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
10192 __compiletime_error("Bad argument size for cmpxchg");
10193 extern void __xadd_wrong_size(void)
10194 __compiletime_error("Bad argument size for xadd");
10195 +extern void __xadd_check_overflow_wrong_size(void)
10196 + __compiletime_error("Bad argument size for xadd_check_overflow");
10197 extern void __add_wrong_size(void)
10198 __compiletime_error("Bad argument size for add");
10199 +extern void __add_check_overflow_wrong_size(void)
10200 + __compiletime_error("Bad argument size for add_check_overflow");
10201
10202 /*
10203 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
10204 @@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
10205 __ret; \
10206 })
10207
10208 +#define __xchg_op_check_overflow(ptr, arg, op, lock) \
10209 + ({ \
10210 + __typeof__ (*(ptr)) __ret = (arg); \
10211 + switch (sizeof(*(ptr))) { \
10212 + case __X86_CASE_L: \
10213 + asm volatile (lock #op "l %0, %1\n" \
10214 + "jno 0f\n" \
10215 + "mov %0,%1\n" \
10216 + "int $4\n0:\n" \
10217 + _ASM_EXTABLE(0b, 0b) \
10218 + : "+r" (__ret), "+m" (*(ptr)) \
10219 + : : "memory", "cc"); \
10220 + break; \
10221 + case __X86_CASE_Q: \
10222 + asm volatile (lock #op "q %q0, %1\n" \
10223 + "jno 0f\n" \
10224 + "mov %0,%1\n" \
10225 + "int $4\n0:\n" \
10226 + _ASM_EXTABLE(0b, 0b) \
10227 + : "+r" (__ret), "+m" (*(ptr)) \
10228 + : : "memory", "cc"); \
10229 + break; \
10230 + default: \
10231 + __ ## op ## _check_overflow_wrong_size(); \
10232 + } \
10233 + __ret; \
10234 + })
10235 +
10236 /*
10237 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
10238 * Since this is generally used to protect other memory information, we
10239 @@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
10240 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
10241 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
10242
10243 +#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
10244 +#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
10245 +
10246 #define __add(ptr, inc, lock) \
10247 ({ \
10248 __typeof__ (*(ptr)) __ret = (inc); \
10249 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
10250 index f91e80f..7f9bd27 100644
10251 --- a/arch/x86/include/asm/cpufeature.h
10252 +++ b/arch/x86/include/asm/cpufeature.h
10253 @@ -371,7 +371,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
10254 ".section .discard,\"aw\",@progbits\n"
10255 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
10256 ".previous\n"
10257 - ".section .altinstr_replacement,\"ax\"\n"
10258 + ".section .altinstr_replacement,\"a\"\n"
10259 "3: movb $1,%0\n"
10260 "4:\n"
10261 ".previous\n"
10262 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
10263 index e95822d..a90010e 100644
10264 --- a/arch/x86/include/asm/desc.h
10265 +++ b/arch/x86/include/asm/desc.h
10266 @@ -4,6 +4,7 @@
10267 #include <asm/desc_defs.h>
10268 #include <asm/ldt.h>
10269 #include <asm/mmu.h>
10270 +#include <asm/pgtable.h>
10271
10272 #include <linux/smp.h>
10273
10274 @@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
10275
10276 desc->type = (info->read_exec_only ^ 1) << 1;
10277 desc->type |= info->contents << 2;
10278 + desc->type |= info->seg_not_present ^ 1;
10279
10280 desc->s = 1;
10281 desc->dpl = 0x3;
10282 @@ -34,19 +36,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
10283 }
10284
10285 extern struct desc_ptr idt_descr;
10286 -extern gate_desc idt_table[];
10287 extern struct desc_ptr nmi_idt_descr;
10288 -extern gate_desc nmi_idt_table[];
10289 -
10290 -struct gdt_page {
10291 - struct desc_struct gdt[GDT_ENTRIES];
10292 -} __attribute__((aligned(PAGE_SIZE)));
10293 -
10294 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
10295 +extern gate_desc idt_table[256];
10296 +extern gate_desc nmi_idt_table[256];
10297
10298 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
10299 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
10300 {
10301 - return per_cpu(gdt_page, cpu).gdt;
10302 + return cpu_gdt_table[cpu];
10303 }
10304
10305 #ifdef CONFIG_X86_64
10306 @@ -71,8 +68,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
10307 unsigned long base, unsigned dpl, unsigned flags,
10308 unsigned short seg)
10309 {
10310 - gate->a = (seg << 16) | (base & 0xffff);
10311 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
10312 + gate->gate.offset_low = base;
10313 + gate->gate.seg = seg;
10314 + gate->gate.reserved = 0;
10315 + gate->gate.type = type;
10316 + gate->gate.s = 0;
10317 + gate->gate.dpl = dpl;
10318 + gate->gate.p = 1;
10319 + gate->gate.offset_high = base >> 16;
10320 }
10321
10322 #endif
10323 @@ -117,12 +120,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
10324
10325 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
10326 {
10327 + pax_open_kernel();
10328 memcpy(&idt[entry], gate, sizeof(*gate));
10329 + pax_close_kernel();
10330 }
10331
10332 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
10333 {
10334 + pax_open_kernel();
10335 memcpy(&ldt[entry], desc, 8);
10336 + pax_close_kernel();
10337 }
10338
10339 static inline void
10340 @@ -136,7 +143,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
10341 default: size = sizeof(*gdt); break;
10342 }
10343
10344 + pax_open_kernel();
10345 memcpy(&gdt[entry], desc, size);
10346 + pax_close_kernel();
10347 }
10348
10349 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
10350 @@ -209,7 +218,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
10351
10352 static inline void native_load_tr_desc(void)
10353 {
10354 + pax_open_kernel();
10355 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
10356 + pax_close_kernel();
10357 }
10358
10359 static inline void native_load_gdt(const struct desc_ptr *dtr)
10360 @@ -246,8 +257,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
10361 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
10362 unsigned int i;
10363
10364 + pax_open_kernel();
10365 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
10366 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
10367 + pax_close_kernel();
10368 }
10369
10370 #define _LDT_empty(info) \
10371 @@ -310,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
10372 }
10373
10374 #ifdef CONFIG_X86_64
10375 -static inline void set_nmi_gate(int gate, void *addr)
10376 +static inline void set_nmi_gate(int gate, const void *addr)
10377 {
10378 gate_desc s;
10379
10380 @@ -319,7 +332,7 @@ static inline void set_nmi_gate(int gate, void *addr)
10381 }
10382 #endif
10383
10384 -static inline void _set_gate(int gate, unsigned type, void *addr,
10385 +static inline void _set_gate(int gate, unsigned type, const void *addr,
10386 unsigned dpl, unsigned ist, unsigned seg)
10387 {
10388 gate_desc s;
10389 @@ -338,7 +351,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
10390 * Pentium F0 0F bugfix can have resulted in the mapped
10391 * IDT being write-protected.
10392 */
10393 -static inline void set_intr_gate(unsigned int n, void *addr)
10394 +static inline void set_intr_gate(unsigned int n, const void *addr)
10395 {
10396 BUG_ON((unsigned)n > 0xFF);
10397 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
10398 @@ -368,19 +381,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
10399 /*
10400 * This routine sets up an interrupt gate at directory privilege level 3.
10401 */
10402 -static inline void set_system_intr_gate(unsigned int n, void *addr)
10403 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
10404 {
10405 BUG_ON((unsigned)n > 0xFF);
10406 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
10407 }
10408
10409 -static inline void set_system_trap_gate(unsigned int n, void *addr)
10410 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
10411 {
10412 BUG_ON((unsigned)n > 0xFF);
10413 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
10414 }
10415
10416 -static inline void set_trap_gate(unsigned int n, void *addr)
10417 +static inline void set_trap_gate(unsigned int n, const void *addr)
10418 {
10419 BUG_ON((unsigned)n > 0xFF);
10420 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
10421 @@ -389,19 +402,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
10422 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
10423 {
10424 BUG_ON((unsigned)n > 0xFF);
10425 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
10426 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
10427 }
10428
10429 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
10430 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
10431 {
10432 BUG_ON((unsigned)n > 0xFF);
10433 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
10434 }
10435
10436 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
10437 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
10438 {
10439 BUG_ON((unsigned)n > 0xFF);
10440 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
10441 }
10442
10443 +#ifdef CONFIG_X86_32
10444 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
10445 +{
10446 + struct desc_struct d;
10447 +
10448 + if (likely(limit))
10449 + limit = (limit - 1UL) >> PAGE_SHIFT;
10450 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
10451 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
10452 +}
10453 +#endif
10454 +
10455 #endif /* _ASM_X86_DESC_H */
10456 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
10457 index 278441f..b95a174 100644
10458 --- a/arch/x86/include/asm/desc_defs.h
10459 +++ b/arch/x86/include/asm/desc_defs.h
10460 @@ -31,6 +31,12 @@ struct desc_struct {
10461 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
10462 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
10463 };
10464 + struct {
10465 + u16 offset_low;
10466 + u16 seg;
10467 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
10468 + unsigned offset_high: 16;
10469 + } gate;
10470 };
10471 } __attribute__((packed));
10472
10473 diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
10474 index 3778256..c5d4fce 100644
10475 --- a/arch/x86/include/asm/e820.h
10476 +++ b/arch/x86/include/asm/e820.h
10477 @@ -69,7 +69,7 @@ struct e820map {
10478 #define ISA_START_ADDRESS 0xa0000
10479 #define ISA_END_ADDRESS 0x100000
10480
10481 -#define BIOS_BEGIN 0x000a0000
10482 +#define BIOS_BEGIN 0x000c0000
10483 #define BIOS_END 0x00100000
10484
10485 #define BIOS_ROM_BASE 0xffe00000
10486 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
10487 index 5939f44..f8845f6 100644
10488 --- a/arch/x86/include/asm/elf.h
10489 +++ b/arch/x86/include/asm/elf.h
10490 @@ -243,7 +243,25 @@ extern int force_personality32;
10491 the loader. We need to make sure that it is out of the way of the program
10492 that it will "exec", and that there is sufficient room for the brk. */
10493
10494 +#ifdef CONFIG_PAX_SEGMEXEC
10495 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
10496 +#else
10497 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
10498 +#endif
10499 +
10500 +#ifdef CONFIG_PAX_ASLR
10501 +#ifdef CONFIG_X86_32
10502 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
10503 +
10504 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10505 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10506 +#else
10507 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
10508 +
10509 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10510 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10511 +#endif
10512 +#endif
10513
10514 /* This yields a mask that user programs can use to figure out what
10515 instruction set this CPU supports. This could be done in user space,
10516 @@ -296,16 +314,12 @@ do { \
10517
10518 #define ARCH_DLINFO \
10519 do { \
10520 - if (vdso_enabled) \
10521 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10522 - (unsigned long)current->mm->context.vdso); \
10523 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10524 } while (0)
10525
10526 #define ARCH_DLINFO_X32 \
10527 do { \
10528 - if (vdso_enabled) \
10529 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10530 - (unsigned long)current->mm->context.vdso); \
10531 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10532 } while (0)
10533
10534 #define AT_SYSINFO 32
10535 @@ -320,7 +334,7 @@ else \
10536
10537 #endif /* !CONFIG_X86_32 */
10538
10539 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
10540 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
10541
10542 #define VDSO_ENTRY \
10543 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
10544 @@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
10545 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
10546 #define compat_arch_setup_additional_pages syscall32_setup_pages
10547
10548 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
10549 -#define arch_randomize_brk arch_randomize_brk
10550 -
10551 /*
10552 * True on X86_32 or when emulating IA32 on X86_64
10553 */
10554 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
10555 index cc70c1c..d96d011 100644
10556 --- a/arch/x86/include/asm/emergency-restart.h
10557 +++ b/arch/x86/include/asm/emergency-restart.h
10558 @@ -15,6 +15,6 @@ enum reboot_type {
10559
10560 extern enum reboot_type reboot_type;
10561
10562 -extern void machine_emergency_restart(void);
10563 +extern void machine_emergency_restart(void) __noreturn;
10564
10565 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
10566 diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
10567 index 4fa8815..71b121a 100644
10568 --- a/arch/x86/include/asm/fpu-internal.h
10569 +++ b/arch/x86/include/asm/fpu-internal.h
10570 @@ -86,6 +86,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10571 {
10572 int err;
10573
10574 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10575 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10576 + fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
10577 +#endif
10578 +
10579 /* See comment in fxsave() below. */
10580 #ifdef CONFIG_AS_FXSAVEQ
10581 asm volatile("1: fxrstorq %[fx]\n\t"
10582 @@ -115,6 +120,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
10583 {
10584 int err;
10585
10586 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10587 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10588 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10589 +#endif
10590 +
10591 /*
10592 * Clear the bytes not touched by the fxsave and reserved
10593 * for the SW usage.
10594 @@ -271,7 +281,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
10595 "emms\n\t" /* clear stack tags */
10596 "fildl %P[addr]", /* set F?P to defined value */
10597 X86_FEATURE_FXSAVE_LEAK,
10598 - [addr] "m" (tsk->thread.fpu.has_fpu));
10599 + [addr] "m" (init_tss[smp_processor_id()].x86_tss.sp0));
10600
10601 return fpu_restore_checking(&tsk->thread.fpu);
10602 }
10603 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10604 index 71ecbcb..bac10b7 100644
10605 --- a/arch/x86/include/asm/futex.h
10606 +++ b/arch/x86/include/asm/futex.h
10607 @@ -11,16 +11,18 @@
10608 #include <asm/processor.h>
10609
10610 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10611 + typecheck(u32 __user *, uaddr); \
10612 asm volatile("1:\t" insn "\n" \
10613 "2:\t.section .fixup,\"ax\"\n" \
10614 "3:\tmov\t%3, %1\n" \
10615 "\tjmp\t2b\n" \
10616 "\t.previous\n" \
10617 _ASM_EXTABLE(1b, 3b) \
10618 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
10619 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
10620 : "i" (-EFAULT), "0" (oparg), "1" (0))
10621
10622 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
10623 + typecheck(u32 __user *, uaddr); \
10624 asm volatile("1:\tmovl %2, %0\n" \
10625 "\tmovl\t%0, %3\n" \
10626 "\t" insn "\n" \
10627 @@ -33,7 +35,7 @@
10628 _ASM_EXTABLE(1b, 4b) \
10629 _ASM_EXTABLE(2b, 4b) \
10630 : "=&a" (oldval), "=&r" (ret), \
10631 - "+m" (*uaddr), "=&r" (tem) \
10632 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
10633 : "r" (oparg), "i" (-EFAULT), "1" (0))
10634
10635 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10636 @@ -60,10 +62,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10637
10638 switch (op) {
10639 case FUTEX_OP_SET:
10640 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
10641 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
10642 break;
10643 case FUTEX_OP_ADD:
10644 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
10645 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
10646 uaddr, oparg);
10647 break;
10648 case FUTEX_OP_OR:
10649 @@ -122,13 +124,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
10650 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
10651 return -EFAULT;
10652
10653 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
10654 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
10655 "2:\t.section .fixup, \"ax\"\n"
10656 "3:\tmov %3, %0\n"
10657 "\tjmp 2b\n"
10658 "\t.previous\n"
10659 _ASM_EXTABLE(1b, 3b)
10660 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
10661 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
10662 : "i" (-EFAULT), "r" (newval), "1" (oldval)
10663 : "memory"
10664 );
10665 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
10666 index eb92a6e..b98b2f4 100644
10667 --- a/arch/x86/include/asm/hw_irq.h
10668 +++ b/arch/x86/include/asm/hw_irq.h
10669 @@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
10670 extern void enable_IO_APIC(void);
10671
10672 /* Statistics */
10673 -extern atomic_t irq_err_count;
10674 -extern atomic_t irq_mis_count;
10675 +extern atomic_unchecked_t irq_err_count;
10676 +extern atomic_unchecked_t irq_mis_count;
10677
10678 /* EISA */
10679 extern void eisa_set_level_irq(unsigned int irq);
10680 diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
10681 index d8e8eef..99f81ae 100644
10682 --- a/arch/x86/include/asm/io.h
10683 +++ b/arch/x86/include/asm/io.h
10684 @@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
10685
10686 #include <linux/vmalloc.h>
10687
10688 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10689 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10690 +{
10691 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10692 +}
10693 +
10694 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10695 +{
10696 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10697 +}
10698 +
10699 /*
10700 * Convert a virtual cached pointer to an uncached pointer
10701 */
10702 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10703 index bba3cf8..06bc8da 100644
10704 --- a/arch/x86/include/asm/irqflags.h
10705 +++ b/arch/x86/include/asm/irqflags.h
10706 @@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
10707 sti; \
10708 sysexit
10709
10710 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
10711 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10712 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
10713 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
10714 +
10715 #else
10716 #define INTERRUPT_RETURN iret
10717 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
10718 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10719 index 5478825..839e88c 100644
10720 --- a/arch/x86/include/asm/kprobes.h
10721 +++ b/arch/x86/include/asm/kprobes.h
10722 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
10723 #define RELATIVEJUMP_SIZE 5
10724 #define RELATIVECALL_OPCODE 0xe8
10725 #define RELATIVE_ADDR_SIZE 4
10726 -#define MAX_STACK_SIZE 64
10727 -#define MIN_STACK_SIZE(ADDR) \
10728 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10729 - THREAD_SIZE - (unsigned long)(ADDR))) \
10730 - ? (MAX_STACK_SIZE) \
10731 - : (((unsigned long)current_thread_info()) + \
10732 - THREAD_SIZE - (unsigned long)(ADDR)))
10733 +#define MAX_STACK_SIZE 64UL
10734 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10735
10736 #define flush_insn_slot(p) do { } while (0)
10737
10738 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
10739 index e216ba0..453f6ec 100644
10740 --- a/arch/x86/include/asm/kvm_host.h
10741 +++ b/arch/x86/include/asm/kvm_host.h
10742 @@ -679,7 +679,7 @@ struct kvm_x86_ops {
10743 int (*check_intercept)(struct kvm_vcpu *vcpu,
10744 struct x86_instruction_info *info,
10745 enum x86_intercept_stage stage);
10746 -};
10747 +} __do_const;
10748
10749 struct kvm_arch_async_pf {
10750 u32 token;
10751 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10752 index c8bed0d..e5721fa 100644
10753 --- a/arch/x86/include/asm/local.h
10754 +++ b/arch/x86/include/asm/local.h
10755 @@ -17,26 +17,58 @@ typedef struct {
10756
10757 static inline void local_inc(local_t *l)
10758 {
10759 - asm volatile(_ASM_INC "%0"
10760 + asm volatile(_ASM_INC "%0\n"
10761 +
10762 +#ifdef CONFIG_PAX_REFCOUNT
10763 + "jno 0f\n"
10764 + _ASM_DEC "%0\n"
10765 + "int $4\n0:\n"
10766 + _ASM_EXTABLE(0b, 0b)
10767 +#endif
10768 +
10769 : "+m" (l->a.counter));
10770 }
10771
10772 static inline void local_dec(local_t *l)
10773 {
10774 - asm volatile(_ASM_DEC "%0"
10775 + asm volatile(_ASM_DEC "%0\n"
10776 +
10777 +#ifdef CONFIG_PAX_REFCOUNT
10778 + "jno 0f\n"
10779 + _ASM_INC "%0\n"
10780 + "int $4\n0:\n"
10781 + _ASM_EXTABLE(0b, 0b)
10782 +#endif
10783 +
10784 : "+m" (l->a.counter));
10785 }
10786
10787 static inline void local_add(long i, local_t *l)
10788 {
10789 - asm volatile(_ASM_ADD "%1,%0"
10790 + asm volatile(_ASM_ADD "%1,%0\n"
10791 +
10792 +#ifdef CONFIG_PAX_REFCOUNT
10793 + "jno 0f\n"
10794 + _ASM_SUB "%1,%0\n"
10795 + "int $4\n0:\n"
10796 + _ASM_EXTABLE(0b, 0b)
10797 +#endif
10798 +
10799 : "+m" (l->a.counter)
10800 : "ir" (i));
10801 }
10802
10803 static inline void local_sub(long i, local_t *l)
10804 {
10805 - asm volatile(_ASM_SUB "%1,%0"
10806 + asm volatile(_ASM_SUB "%1,%0\n"
10807 +
10808 +#ifdef CONFIG_PAX_REFCOUNT
10809 + "jno 0f\n"
10810 + _ASM_ADD "%1,%0\n"
10811 + "int $4\n0:\n"
10812 + _ASM_EXTABLE(0b, 0b)
10813 +#endif
10814 +
10815 : "+m" (l->a.counter)
10816 : "ir" (i));
10817 }
10818 @@ -54,7 +86,16 @@ static inline int local_sub_and_test(long i, local_t *l)
10819 {
10820 unsigned char c;
10821
10822 - asm volatile(_ASM_SUB "%2,%0; sete %1"
10823 + asm volatile(_ASM_SUB "%2,%0\n"
10824 +
10825 +#ifdef CONFIG_PAX_REFCOUNT
10826 + "jno 0f\n"
10827 + _ASM_ADD "%2,%0\n"
10828 + "int $4\n0:\n"
10829 + _ASM_EXTABLE(0b, 0b)
10830 +#endif
10831 +
10832 + "sete %1\n"
10833 : "+m" (l->a.counter), "=qm" (c)
10834 : "ir" (i) : "memory");
10835 return c;
10836 @@ -72,7 +113,16 @@ static inline int local_dec_and_test(local_t *l)
10837 {
10838 unsigned char c;
10839
10840 - asm volatile(_ASM_DEC "%0; sete %1"
10841 + asm volatile(_ASM_DEC "%0\n"
10842 +
10843 +#ifdef CONFIG_PAX_REFCOUNT
10844 + "jno 0f\n"
10845 + _ASM_INC "%0\n"
10846 + "int $4\n0:\n"
10847 + _ASM_EXTABLE(0b, 0b)
10848 +#endif
10849 +
10850 + "sete %1\n"
10851 : "+m" (l->a.counter), "=qm" (c)
10852 : : "memory");
10853 return c != 0;
10854 @@ -90,7 +140,16 @@ static inline int local_inc_and_test(local_t *l)
10855 {
10856 unsigned char c;
10857
10858 - asm volatile(_ASM_INC "%0; sete %1"
10859 + asm volatile(_ASM_INC "%0\n"
10860 +
10861 +#ifdef CONFIG_PAX_REFCOUNT
10862 + "jno 0f\n"
10863 + _ASM_DEC "%0\n"
10864 + "int $4\n0:\n"
10865 + _ASM_EXTABLE(0b, 0b)
10866 +#endif
10867 +
10868 + "sete %1\n"
10869 : "+m" (l->a.counter), "=qm" (c)
10870 : : "memory");
10871 return c != 0;
10872 @@ -109,7 +168,16 @@ static inline int local_add_negative(long i, local_t *l)
10873 {
10874 unsigned char c;
10875
10876 - asm volatile(_ASM_ADD "%2,%0; sets %1"
10877 + asm volatile(_ASM_ADD "%2,%0\n"
10878 +
10879 +#ifdef CONFIG_PAX_REFCOUNT
10880 + "jno 0f\n"
10881 + _ASM_SUB "%2,%0\n"
10882 + "int $4\n0:\n"
10883 + _ASM_EXTABLE(0b, 0b)
10884 +#endif
10885 +
10886 + "sets %1\n"
10887 : "+m" (l->a.counter), "=qm" (c)
10888 : "ir" (i) : "memory");
10889 return c;
10890 @@ -132,7 +200,15 @@ static inline long local_add_return(long i, local_t *l)
10891 #endif
10892 /* Modern 486+ processor */
10893 __i = i;
10894 - asm volatile(_ASM_XADD "%0, %1;"
10895 + asm volatile(_ASM_XADD "%0, %1\n"
10896 +
10897 +#ifdef CONFIG_PAX_REFCOUNT
10898 + "jno 0f\n"
10899 + _ASM_MOV "%0,%1\n"
10900 + "int $4\n0:\n"
10901 + _ASM_EXTABLE(0b, 0b)
10902 +#endif
10903 +
10904 : "+r" (i), "+m" (l->a.counter)
10905 : : "memory");
10906 return i + __i;
10907 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10908 index 593e51d..fa69c9a 100644
10909 --- a/arch/x86/include/asm/mman.h
10910 +++ b/arch/x86/include/asm/mman.h
10911 @@ -5,4 +5,14 @@
10912
10913 #include <asm-generic/mman.h>
10914
10915 +#ifdef __KERNEL__
10916 +#ifndef __ASSEMBLY__
10917 +#ifdef CONFIG_X86_32
10918 +#define arch_mmap_check i386_mmap_check
10919 +int i386_mmap_check(unsigned long addr, unsigned long len,
10920 + unsigned long flags);
10921 +#endif
10922 +#endif
10923 +#endif
10924 +
10925 #endif /* _ASM_X86_MMAN_H */
10926 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10927 index 5f55e69..e20bfb1 100644
10928 --- a/arch/x86/include/asm/mmu.h
10929 +++ b/arch/x86/include/asm/mmu.h
10930 @@ -9,7 +9,7 @@
10931 * we put the segment information here.
10932 */
10933 typedef struct {
10934 - void *ldt;
10935 + struct desc_struct *ldt;
10936 int size;
10937
10938 #ifdef CONFIG_X86_64
10939 @@ -18,7 +18,19 @@ typedef struct {
10940 #endif
10941
10942 struct mutex lock;
10943 - void *vdso;
10944 + unsigned long vdso;
10945 +
10946 +#ifdef CONFIG_X86_32
10947 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10948 + unsigned long user_cs_base;
10949 + unsigned long user_cs_limit;
10950 +
10951 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10952 + cpumask_t cpu_user_cs_mask;
10953 +#endif
10954 +
10955 +#endif
10956 +#endif
10957 } mm_context_t;
10958
10959 #ifdef CONFIG_SMP
10960 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
10961 index 6902152..da4283a 100644
10962 --- a/arch/x86/include/asm/mmu_context.h
10963 +++ b/arch/x86/include/asm/mmu_context.h
10964 @@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
10965
10966 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10967 {
10968 +
10969 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10970 + unsigned int i;
10971 + pgd_t *pgd;
10972 +
10973 + pax_open_kernel();
10974 + pgd = get_cpu_pgd(smp_processor_id());
10975 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
10976 + set_pgd_batched(pgd+i, native_make_pgd(0));
10977 + pax_close_kernel();
10978 +#endif
10979 +
10980 #ifdef CONFIG_SMP
10981 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
10982 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
10983 @@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10984 struct task_struct *tsk)
10985 {
10986 unsigned cpu = smp_processor_id();
10987 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10988 + int tlbstate = TLBSTATE_OK;
10989 +#endif
10990
10991 if (likely(prev != next)) {
10992 #ifdef CONFIG_SMP
10993 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10994 + tlbstate = percpu_read(cpu_tlbstate.state);
10995 +#endif
10996 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10997 percpu_write(cpu_tlbstate.active_mm, next);
10998 #endif
10999 cpumask_set_cpu(cpu, mm_cpumask(next));
11000
11001 /* Re-load page tables */
11002 +#ifdef CONFIG_PAX_PER_CPU_PGD
11003 + pax_open_kernel();
11004 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
11005 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
11006 + pax_close_kernel();
11007 + load_cr3(get_cpu_pgd(cpu));
11008 +#else
11009 load_cr3(next->pgd);
11010 +#endif
11011
11012 /* stop flush ipis for the previous mm */
11013 cpumask_clear_cpu(cpu, mm_cpumask(prev));
11014 @@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
11015 */
11016 if (unlikely(prev->context.ldt != next->context.ldt))
11017 load_LDT_nolock(&next->context);
11018 - }
11019 +
11020 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
11021 + if (!(__supported_pte_mask & _PAGE_NX)) {
11022 + smp_mb__before_clear_bit();
11023 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
11024 + smp_mb__after_clear_bit();
11025 + cpu_set(cpu, next->context.cpu_user_cs_mask);
11026 + }
11027 +#endif
11028 +
11029 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11030 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
11031 + prev->context.user_cs_limit != next->context.user_cs_limit))
11032 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11033 #ifdef CONFIG_SMP
11034 + else if (unlikely(tlbstate != TLBSTATE_OK))
11035 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11036 +#endif
11037 +#endif
11038 +
11039 + }
11040 else {
11041 +
11042 +#ifdef CONFIG_PAX_PER_CPU_PGD
11043 + pax_open_kernel();
11044 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
11045 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
11046 + pax_close_kernel();
11047 + load_cr3(get_cpu_pgd(cpu));
11048 +#endif
11049 +
11050 +#ifdef CONFIG_SMP
11051 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
11052 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
11053
11054 @@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
11055 * tlb flush IPI delivery. We must reload CR3
11056 * to make sure to use no freed page tables.
11057 */
11058 +
11059 +#ifndef CONFIG_PAX_PER_CPU_PGD
11060 load_cr3(next->pgd);
11061 +#endif
11062 +
11063 load_LDT_nolock(&next->context);
11064 +
11065 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
11066 + if (!(__supported_pte_mask & _PAGE_NX))
11067 + cpu_set(cpu, next->context.cpu_user_cs_mask);
11068 +#endif
11069 +
11070 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11071 +#ifdef CONFIG_PAX_PAGEEXEC
11072 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
11073 +#endif
11074 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11075 +#endif
11076 +
11077 }
11078 +#endif
11079 }
11080 -#endif
11081 }
11082
11083 #define activate_mm(prev, next) \
11084 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
11085 index 9eae775..c914fea 100644
11086 --- a/arch/x86/include/asm/module.h
11087 +++ b/arch/x86/include/asm/module.h
11088 @@ -5,6 +5,7 @@
11089
11090 #ifdef CONFIG_X86_64
11091 /* X86_64 does not define MODULE_PROC_FAMILY */
11092 +#define MODULE_PROC_FAMILY ""
11093 #elif defined CONFIG_M386
11094 #define MODULE_PROC_FAMILY "386 "
11095 #elif defined CONFIG_M486
11096 @@ -59,8 +60,20 @@
11097 #error unknown processor family
11098 #endif
11099
11100 -#ifdef CONFIG_X86_32
11101 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
11102 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
11103 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
11104 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
11105 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
11106 +#else
11107 +#define MODULE_PAX_KERNEXEC ""
11108 #endif
11109
11110 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11111 +#define MODULE_PAX_UDEREF "UDEREF "
11112 +#else
11113 +#define MODULE_PAX_UDEREF ""
11114 +#endif
11115 +
11116 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
11117 +
11118 #endif /* _ASM_X86_MODULE_H */
11119 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
11120 index 7639dbf..e08a58c 100644
11121 --- a/arch/x86/include/asm/page_64_types.h
11122 +++ b/arch/x86/include/asm/page_64_types.h
11123 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
11124
11125 /* duplicated to the one in bootmem.h */
11126 extern unsigned long max_pfn;
11127 -extern unsigned long phys_base;
11128 +extern const unsigned long phys_base;
11129
11130 extern unsigned long __phys_addr(unsigned long);
11131 #define __phys_reloc_hide(x) (x)
11132 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
11133 index aa0f913..0c5bc6a 100644
11134 --- a/arch/x86/include/asm/paravirt.h
11135 +++ b/arch/x86/include/asm/paravirt.h
11136 @@ -668,6 +668,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
11137 val);
11138 }
11139
11140 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11141 +{
11142 + pgdval_t val = native_pgd_val(pgd);
11143 +
11144 + if (sizeof(pgdval_t) > sizeof(long))
11145 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
11146 + val, (u64)val >> 32);
11147 + else
11148 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
11149 + val);
11150 +}
11151 +
11152 static inline void pgd_clear(pgd_t *pgdp)
11153 {
11154 set_pgd(pgdp, __pgd(0));
11155 @@ -749,6 +761,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
11156 pv_mmu_ops.set_fixmap(idx, phys, flags);
11157 }
11158
11159 +#ifdef CONFIG_PAX_KERNEXEC
11160 +static inline unsigned long pax_open_kernel(void)
11161 +{
11162 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
11163 +}
11164 +
11165 +static inline unsigned long pax_close_kernel(void)
11166 +{
11167 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
11168 +}
11169 +#else
11170 +static inline unsigned long pax_open_kernel(void) { return 0; }
11171 +static inline unsigned long pax_close_kernel(void) { return 0; }
11172 +#endif
11173 +
11174 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
11175
11176 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
11177 @@ -965,7 +992,7 @@ extern void default_banner(void);
11178
11179 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
11180 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
11181 -#define PARA_INDIRECT(addr) *%cs:addr
11182 +#define PARA_INDIRECT(addr) *%ss:addr
11183 #endif
11184
11185 #define INTERRUPT_RETURN \
11186 @@ -1042,6 +1069,21 @@ extern void default_banner(void);
11187 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
11188 CLBR_NONE, \
11189 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
11190 +
11191 +#define GET_CR0_INTO_RDI \
11192 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
11193 + mov %rax,%rdi
11194 +
11195 +#define SET_RDI_INTO_CR0 \
11196 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11197 +
11198 +#define GET_CR3_INTO_RDI \
11199 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
11200 + mov %rax,%rdi
11201 +
11202 +#define SET_RDI_INTO_CR3 \
11203 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
11204 +
11205 #endif /* CONFIG_X86_32 */
11206
11207 #endif /* __ASSEMBLY__ */
11208 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
11209 index 8e8b9a4..f07d725 100644
11210 --- a/arch/x86/include/asm/paravirt_types.h
11211 +++ b/arch/x86/include/asm/paravirt_types.h
11212 @@ -84,20 +84,20 @@ struct pv_init_ops {
11213 */
11214 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
11215 unsigned long addr, unsigned len);
11216 -};
11217 +} __no_const;
11218
11219
11220 struct pv_lazy_ops {
11221 /* Set deferred update mode, used for batching operations. */
11222 void (*enter)(void);
11223 void (*leave)(void);
11224 -};
11225 +} __no_const;
11226
11227 struct pv_time_ops {
11228 unsigned long long (*sched_clock)(void);
11229 unsigned long long (*steal_clock)(int cpu);
11230 unsigned long (*get_tsc_khz)(void);
11231 -};
11232 +} __no_const;
11233
11234 struct pv_cpu_ops {
11235 /* hooks for various privileged instructions */
11236 @@ -193,7 +193,7 @@ struct pv_cpu_ops {
11237
11238 void (*start_context_switch)(struct task_struct *prev);
11239 void (*end_context_switch)(struct task_struct *next);
11240 -};
11241 +} __no_const;
11242
11243 struct pv_irq_ops {
11244 /*
11245 @@ -224,7 +224,7 @@ struct pv_apic_ops {
11246 unsigned long start_eip,
11247 unsigned long start_esp);
11248 #endif
11249 -};
11250 +} __no_const;
11251
11252 struct pv_mmu_ops {
11253 unsigned long (*read_cr2)(void);
11254 @@ -313,6 +313,7 @@ struct pv_mmu_ops {
11255 struct paravirt_callee_save make_pud;
11256
11257 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
11258 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
11259 #endif /* PAGETABLE_LEVELS == 4 */
11260 #endif /* PAGETABLE_LEVELS >= 3 */
11261
11262 @@ -324,6 +325,12 @@ struct pv_mmu_ops {
11263 an mfn. We can tell which is which from the index. */
11264 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
11265 phys_addr_t phys, pgprot_t flags);
11266 +
11267 +#ifdef CONFIG_PAX_KERNEXEC
11268 + unsigned long (*pax_open_kernel)(void);
11269 + unsigned long (*pax_close_kernel)(void);
11270 +#endif
11271 +
11272 };
11273
11274 struct arch_spinlock;
11275 @@ -334,7 +341,7 @@ struct pv_lock_ops {
11276 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
11277 int (*spin_trylock)(struct arch_spinlock *lock);
11278 void (*spin_unlock)(struct arch_spinlock *lock);
11279 -};
11280 +} __no_const;
11281
11282 /* This contains all the paravirt structures: we get a convenient
11283 * number for each function using the offset which we use to indicate
11284 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
11285 index b4389a4..7024269 100644
11286 --- a/arch/x86/include/asm/pgalloc.h
11287 +++ b/arch/x86/include/asm/pgalloc.h
11288 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
11289 pmd_t *pmd, pte_t *pte)
11290 {
11291 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11292 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
11293 +}
11294 +
11295 +static inline void pmd_populate_user(struct mm_struct *mm,
11296 + pmd_t *pmd, pte_t *pte)
11297 +{
11298 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11299 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
11300 }
11301
11302 @@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
11303
11304 #ifdef CONFIG_X86_PAE
11305 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
11306 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
11307 +{
11308 + pud_populate(mm, pudp, pmd);
11309 +}
11310 #else /* !CONFIG_X86_PAE */
11311 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
11312 {
11313 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
11314 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
11315 }
11316 +
11317 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
11318 +{
11319 + paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
11320 + set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
11321 +}
11322 #endif /* CONFIG_X86_PAE */
11323
11324 #if PAGETABLE_LEVELS > 3
11325 @@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
11326 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
11327 }
11328
11329 +static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
11330 +{
11331 + paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
11332 + set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
11333 +}
11334 +
11335 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
11336 {
11337 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
11338 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
11339 index 98391db..8f6984e 100644
11340 --- a/arch/x86/include/asm/pgtable-2level.h
11341 +++ b/arch/x86/include/asm/pgtable-2level.h
11342 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
11343
11344 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11345 {
11346 + pax_open_kernel();
11347 *pmdp = pmd;
11348 + pax_close_kernel();
11349 }
11350
11351 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11352 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
11353 index cb00ccc..17e9054 100644
11354 --- a/arch/x86/include/asm/pgtable-3level.h
11355 +++ b/arch/x86/include/asm/pgtable-3level.h
11356 @@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11357
11358 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11359 {
11360 + pax_open_kernel();
11361 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
11362 + pax_close_kernel();
11363 }
11364
11365 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11366 {
11367 + pax_open_kernel();
11368 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
11369 + pax_close_kernel();
11370 }
11371
11372 /*
11373 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
11374 index 49afb3f..91a8c63 100644
11375 --- a/arch/x86/include/asm/pgtable.h
11376 +++ b/arch/x86/include/asm/pgtable.h
11377 @@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
11378
11379 #ifndef __PAGETABLE_PUD_FOLDED
11380 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
11381 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
11382 #define pgd_clear(pgd) native_pgd_clear(pgd)
11383 #endif
11384
11385 @@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
11386
11387 #define arch_end_context_switch(prev) do {} while(0)
11388
11389 +#define pax_open_kernel() native_pax_open_kernel()
11390 +#define pax_close_kernel() native_pax_close_kernel()
11391 #endif /* CONFIG_PARAVIRT */
11392
11393 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
11394 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
11395 +
11396 +#ifdef CONFIG_PAX_KERNEXEC
11397 +static inline unsigned long native_pax_open_kernel(void)
11398 +{
11399 + unsigned long cr0;
11400 +
11401 + preempt_disable();
11402 + barrier();
11403 + cr0 = read_cr0() ^ X86_CR0_WP;
11404 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
11405 + write_cr0(cr0);
11406 + return cr0 ^ X86_CR0_WP;
11407 +}
11408 +
11409 +static inline unsigned long native_pax_close_kernel(void)
11410 +{
11411 + unsigned long cr0;
11412 +
11413 + cr0 = read_cr0() ^ X86_CR0_WP;
11414 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
11415 + write_cr0(cr0);
11416 + barrier();
11417 + preempt_enable_no_resched();
11418 + return cr0 ^ X86_CR0_WP;
11419 +}
11420 +#else
11421 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
11422 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
11423 +#endif
11424 +
11425 /*
11426 * The following only work if pte_present() is true.
11427 * Undefined behaviour if not..
11428 */
11429 +static inline int pte_user(pte_t pte)
11430 +{
11431 + return pte_val(pte) & _PAGE_USER;
11432 +}
11433 +
11434 static inline int pte_dirty(pte_t pte)
11435 {
11436 return pte_flags(pte) & _PAGE_DIRTY;
11437 @@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
11438 return pte_clear_flags(pte, _PAGE_RW);
11439 }
11440
11441 +static inline pte_t pte_mkread(pte_t pte)
11442 +{
11443 + return __pte(pte_val(pte) | _PAGE_USER);
11444 +}
11445 +
11446 static inline pte_t pte_mkexec(pte_t pte)
11447 {
11448 - return pte_clear_flags(pte, _PAGE_NX);
11449 +#ifdef CONFIG_X86_PAE
11450 + if (__supported_pte_mask & _PAGE_NX)
11451 + return pte_clear_flags(pte, _PAGE_NX);
11452 + else
11453 +#endif
11454 + return pte_set_flags(pte, _PAGE_USER);
11455 +}
11456 +
11457 +static inline pte_t pte_exprotect(pte_t pte)
11458 +{
11459 +#ifdef CONFIG_X86_PAE
11460 + if (__supported_pte_mask & _PAGE_NX)
11461 + return pte_set_flags(pte, _PAGE_NX);
11462 + else
11463 +#endif
11464 + return pte_clear_flags(pte, _PAGE_USER);
11465 }
11466
11467 static inline pte_t pte_mkdirty(pte_t pte)
11468 @@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
11469 #endif
11470
11471 #ifndef __ASSEMBLY__
11472 +
11473 +#ifdef CONFIG_PAX_PER_CPU_PGD
11474 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
11475 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
11476 +{
11477 + return cpu_pgd[cpu];
11478 +}
11479 +#endif
11480 +
11481 #include <linux/mm_types.h>
11482
11483 static inline int pte_none(pte_t pte)
11484 @@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
11485
11486 static inline int pgd_bad(pgd_t pgd)
11487 {
11488 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
11489 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
11490 }
11491
11492 static inline int pgd_none(pgd_t pgd)
11493 @@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
11494 * pgd_offset() returns a (pgd_t *)
11495 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
11496 */
11497 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
11498 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
11499 +
11500 +#ifdef CONFIG_PAX_PER_CPU_PGD
11501 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
11502 +#endif
11503 +
11504 /*
11505 * a shortcut which implies the use of the kernel's pgd, instead
11506 * of a process's
11507 @@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
11508 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
11509 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
11510
11511 +#ifdef CONFIG_X86_32
11512 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
11513 +#else
11514 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
11515 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
11516 +
11517 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11518 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
11519 +#else
11520 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
11521 +#endif
11522 +
11523 +#endif
11524 +
11525 #ifndef __ASSEMBLY__
11526
11527 extern int direct_gbpages;
11528 @@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
11529 * dst and src can be on the same page, but the range must not overlap,
11530 * and must not cross a page boundary.
11531 */
11532 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
11533 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
11534 {
11535 - memcpy(dst, src, count * sizeof(pgd_t));
11536 + pax_open_kernel();
11537 + while (count--)
11538 + *dst++ = *src++;
11539 + pax_close_kernel();
11540 }
11541
11542 +#ifdef CONFIG_PAX_PER_CPU_PGD
11543 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
11544 +#endif
11545 +
11546 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11547 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
11548 +#else
11549 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
11550 +#endif
11551
11552 #include <asm-generic/pgtable.h>
11553 #endif /* __ASSEMBLY__ */
11554 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
11555 index 0c92113..34a77c6 100644
11556 --- a/arch/x86/include/asm/pgtable_32.h
11557 +++ b/arch/x86/include/asm/pgtable_32.h
11558 @@ -25,9 +25,6 @@
11559 struct mm_struct;
11560 struct vm_area_struct;
11561
11562 -extern pgd_t swapper_pg_dir[1024];
11563 -extern pgd_t initial_page_table[1024];
11564 -
11565 static inline void pgtable_cache_init(void) { }
11566 static inline void check_pgt_cache(void) { }
11567 void paging_init(void);
11568 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11569 # include <asm/pgtable-2level.h>
11570 #endif
11571
11572 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
11573 +extern pgd_t initial_page_table[PTRS_PER_PGD];
11574 +#ifdef CONFIG_X86_PAE
11575 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
11576 +#endif
11577 +
11578 #if defined(CONFIG_HIGHPTE)
11579 #define pte_offset_map(dir, address) \
11580 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
11581 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11582 /* Clear a kernel PTE and flush it from the TLB */
11583 #define kpte_clear_flush(ptep, vaddr) \
11584 do { \
11585 + pax_open_kernel(); \
11586 pte_clear(&init_mm, (vaddr), (ptep)); \
11587 + pax_close_kernel(); \
11588 __flush_tlb_one((vaddr)); \
11589 } while (0)
11590
11591 @@ -74,6 +79,9 @@ do { \
11592
11593 #endif /* !__ASSEMBLY__ */
11594
11595 +#define HAVE_ARCH_UNMAPPED_AREA
11596 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11597 +
11598 /*
11599 * kern_addr_valid() is (1) for FLATMEM and (0) for
11600 * SPARSEMEM and DISCONTIGMEM
11601 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11602 index ed5903b..c7fe163 100644
11603 --- a/arch/x86/include/asm/pgtable_32_types.h
11604 +++ b/arch/x86/include/asm/pgtable_32_types.h
11605 @@ -8,7 +8,7 @@
11606 */
11607 #ifdef CONFIG_X86_PAE
11608 # include <asm/pgtable-3level_types.h>
11609 -# define PMD_SIZE (1UL << PMD_SHIFT)
11610 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11611 # define PMD_MASK (~(PMD_SIZE - 1))
11612 #else
11613 # include <asm/pgtable-2level_types.h>
11614 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11615 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11616 #endif
11617
11618 +#ifdef CONFIG_PAX_KERNEXEC
11619 +#ifndef __ASSEMBLY__
11620 +extern unsigned char MODULES_EXEC_VADDR[];
11621 +extern unsigned char MODULES_EXEC_END[];
11622 +#endif
11623 +#include <asm/boot.h>
11624 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11625 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11626 +#else
11627 +#define ktla_ktva(addr) (addr)
11628 +#define ktva_ktla(addr) (addr)
11629 +#endif
11630 +
11631 #define MODULES_VADDR VMALLOC_START
11632 #define MODULES_END VMALLOC_END
11633 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11634 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
11635 index 975f709..9f779c9 100644
11636 --- a/arch/x86/include/asm/pgtable_64.h
11637 +++ b/arch/x86/include/asm/pgtable_64.h
11638 @@ -16,10 +16,14 @@
11639
11640 extern pud_t level3_kernel_pgt[512];
11641 extern pud_t level3_ident_pgt[512];
11642 +extern pud_t level3_vmalloc_start_pgt[512];
11643 +extern pud_t level3_vmalloc_end_pgt[512];
11644 +extern pud_t level3_vmemmap_pgt[512];
11645 +extern pud_t level2_vmemmap_pgt[512];
11646 extern pmd_t level2_kernel_pgt[512];
11647 extern pmd_t level2_fixmap_pgt[512];
11648 -extern pmd_t level2_ident_pgt[512];
11649 -extern pgd_t init_level4_pgt[];
11650 +extern pmd_t level2_ident_pgt[512*2];
11651 +extern pgd_t init_level4_pgt[512];
11652
11653 #define swapper_pg_dir init_level4_pgt
11654
11655 @@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11656
11657 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11658 {
11659 + pax_open_kernel();
11660 *pmdp = pmd;
11661 + pax_close_kernel();
11662 }
11663
11664 static inline void native_pmd_clear(pmd_t *pmd)
11665 @@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
11666
11667 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11668 {
11669 + pax_open_kernel();
11670 *pudp = pud;
11671 + pax_close_kernel();
11672 }
11673
11674 static inline void native_pud_clear(pud_t *pud)
11675 @@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
11676
11677 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11678 {
11679 + pax_open_kernel();
11680 + *pgdp = pgd;
11681 + pax_close_kernel();
11682 +}
11683 +
11684 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11685 +{
11686 *pgdp = pgd;
11687 }
11688
11689 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11690 index 766ea16..5b96cb3 100644
11691 --- a/arch/x86/include/asm/pgtable_64_types.h
11692 +++ b/arch/x86/include/asm/pgtable_64_types.h
11693 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11694 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11695 #define MODULES_END _AC(0xffffffffff000000, UL)
11696 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11697 +#define MODULES_EXEC_VADDR MODULES_VADDR
11698 +#define MODULES_EXEC_END MODULES_END
11699 +
11700 +#define ktla_ktva(addr) (addr)
11701 +#define ktva_ktla(addr) (addr)
11702
11703 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11704 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11705 index 013286a..8b42f4f 100644
11706 --- a/arch/x86/include/asm/pgtable_types.h
11707 +++ b/arch/x86/include/asm/pgtable_types.h
11708 @@ -16,13 +16,12 @@
11709 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11710 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11711 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11712 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11713 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11714 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11715 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11716 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11717 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11718 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
11719 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
11720 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
11721 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
11722 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11723
11724 /* If _PAGE_BIT_PRESENT is clear, we use these: */
11725 @@ -40,7 +39,6 @@
11726 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11727 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11728 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11729 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11730 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11731 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11732 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
11733 @@ -57,8 +55,10 @@
11734
11735 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11736 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11737 -#else
11738 +#elif defined(CONFIG_KMEMCHECK)
11739 #define _PAGE_NX (_AT(pteval_t, 0))
11740 +#else
11741 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11742 #endif
11743
11744 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
11745 @@ -96,6 +96,9 @@
11746 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11747 _PAGE_ACCESSED)
11748
11749 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
11750 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
11751 +
11752 #define __PAGE_KERNEL_EXEC \
11753 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11754 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
11755 @@ -106,7 +109,7 @@
11756 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11757 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11758 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11759 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
11760 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
11761 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
11762 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
11763 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
11764 @@ -168,8 +171,8 @@
11765 * bits are combined, this will alow user to access the high address mapped
11766 * VDSO in the presence of CONFIG_COMPAT_VDSO
11767 */
11768 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11769 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11770 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11771 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11772 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11773 #endif
11774
11775 @@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
11776 {
11777 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11778 }
11779 +#endif
11780
11781 +#if PAGETABLE_LEVELS == 3
11782 +#include <asm-generic/pgtable-nopud.h>
11783 +#endif
11784 +
11785 +#if PAGETABLE_LEVELS == 2
11786 +#include <asm-generic/pgtable-nopmd.h>
11787 +#endif
11788 +
11789 +#ifndef __ASSEMBLY__
11790 #if PAGETABLE_LEVELS > 3
11791 typedef struct { pudval_t pud; } pud_t;
11792
11793 @@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
11794 return pud.pud;
11795 }
11796 #else
11797 -#include <asm-generic/pgtable-nopud.h>
11798 -
11799 static inline pudval_t native_pud_val(pud_t pud)
11800 {
11801 return native_pgd_val(pud.pgd);
11802 @@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
11803 return pmd.pmd;
11804 }
11805 #else
11806 -#include <asm-generic/pgtable-nopmd.h>
11807 -
11808 static inline pmdval_t native_pmd_val(pmd_t pmd)
11809 {
11810 return native_pgd_val(pmd.pud.pgd);
11811 @@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
11812
11813 extern pteval_t __supported_pte_mask;
11814 extern void set_nx(void);
11815 -extern int nx_enabled;
11816
11817 #define pgprot_writecombine pgprot_writecombine
11818 extern pgprot_t pgprot_writecombine(pgprot_t prot);
11819 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
11820 index 4fa7dcc..764e33a 100644
11821 --- a/arch/x86/include/asm/processor.h
11822 +++ b/arch/x86/include/asm/processor.h
11823 @@ -276,7 +276,7 @@ struct tss_struct {
11824
11825 } ____cacheline_aligned;
11826
11827 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11828 +extern struct tss_struct init_tss[NR_CPUS];
11829
11830 /*
11831 * Save the original ist values for checking stack pointers during debugging
11832 @@ -807,11 +807,18 @@ static inline void spin_lock_prefetch(const void *x)
11833 */
11834 #define TASK_SIZE PAGE_OFFSET
11835 #define TASK_SIZE_MAX TASK_SIZE
11836 +
11837 +#ifdef CONFIG_PAX_SEGMEXEC
11838 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
11839 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11840 +#else
11841 #define STACK_TOP TASK_SIZE
11842 -#define STACK_TOP_MAX STACK_TOP
11843 +#endif
11844 +
11845 +#define STACK_TOP_MAX TASK_SIZE
11846
11847 #define INIT_THREAD { \
11848 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11849 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11850 .vm86_info = NULL, \
11851 .sysenter_cs = __KERNEL_CS, \
11852 .io_bitmap_ptr = NULL, \
11853 @@ -825,7 +832,7 @@ static inline void spin_lock_prefetch(const void *x)
11854 */
11855 #define INIT_TSS { \
11856 .x86_tss = { \
11857 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
11858 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11859 .ss0 = __KERNEL_DS, \
11860 .ss1 = __KERNEL_CS, \
11861 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
11862 @@ -836,11 +843,7 @@ static inline void spin_lock_prefetch(const void *x)
11863 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11864
11865 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11866 -#define KSTK_TOP(info) \
11867 -({ \
11868 - unsigned long *__ptr = (unsigned long *)(info); \
11869 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11870 -})
11871 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
11872
11873 /*
11874 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
11875 @@ -855,7 +858,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11876 #define task_pt_regs(task) \
11877 ({ \
11878 struct pt_regs *__regs__; \
11879 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11880 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11881 __regs__ - 1; \
11882 })
11883
11884 @@ -865,13 +868,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11885 /*
11886 * User space process size. 47bits minus one guard page.
11887 */
11888 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11889 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11890
11891 /* This decides where the kernel will search for a free chunk of vm
11892 * space during mmap's.
11893 */
11894 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11895 - 0xc0000000 : 0xFFFFe000)
11896 + 0xc0000000 : 0xFFFFf000)
11897
11898 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
11899 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
11900 @@ -882,11 +885,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11901 #define STACK_TOP_MAX TASK_SIZE_MAX
11902
11903 #define INIT_THREAD { \
11904 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11905 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11906 }
11907
11908 #define INIT_TSS { \
11909 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11910 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11911 }
11912
11913 /*
11914 @@ -914,6 +917,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
11915 */
11916 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11917
11918 +#ifdef CONFIG_PAX_SEGMEXEC
11919 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
11920 +#endif
11921 +
11922 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
11923
11924 /* Get/set a process' ability to use the timestamp counter instruction */
11925 @@ -976,12 +983,12 @@ extern bool cpu_has_amd_erratum(const int *);
11926
11927 void cpu_idle_wait(void);
11928
11929 -extern unsigned long arch_align_stack(unsigned long sp);
11930 +#define arch_align_stack(x) ((x) & ~0xfUL)
11931 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
11932
11933 void default_idle(void);
11934 bool set_pm_idle_to_default(void);
11935
11936 -void stop_this_cpu(void *dummy);
11937 +void stop_this_cpu(void *dummy) __noreturn;
11938
11939 #endif /* _ASM_X86_PROCESSOR_H */
11940 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
11941 index dcfde52..dbfea06 100644
11942 --- a/arch/x86/include/asm/ptrace.h
11943 +++ b/arch/x86/include/asm/ptrace.h
11944 @@ -155,28 +155,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
11945 }
11946
11947 /*
11948 - * user_mode_vm(regs) determines whether a register set came from user mode.
11949 + * user_mode(regs) determines whether a register set came from user mode.
11950 * This is true if V8086 mode was enabled OR if the register set was from
11951 * protected mode with RPL-3 CS value. This tricky test checks that with
11952 * one comparison. Many places in the kernel can bypass this full check
11953 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
11954 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
11955 + * be used.
11956 */
11957 -static inline int user_mode(struct pt_regs *regs)
11958 +static inline int user_mode_novm(struct pt_regs *regs)
11959 {
11960 #ifdef CONFIG_X86_32
11961 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
11962 #else
11963 - return !!(regs->cs & 3);
11964 + return !!(regs->cs & SEGMENT_RPL_MASK);
11965 #endif
11966 }
11967
11968 -static inline int user_mode_vm(struct pt_regs *regs)
11969 +static inline int user_mode(struct pt_regs *regs)
11970 {
11971 #ifdef CONFIG_X86_32
11972 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
11973 USER_RPL;
11974 #else
11975 - return user_mode(regs);
11976 + return user_mode_novm(regs);
11977 #endif
11978 }
11979
11980 @@ -192,15 +193,16 @@ static inline int v8086_mode(struct pt_regs *regs)
11981 #ifdef CONFIG_X86_64
11982 static inline bool user_64bit_mode(struct pt_regs *regs)
11983 {
11984 + unsigned long cs = regs->cs & 0xffff;
11985 #ifndef CONFIG_PARAVIRT
11986 /*
11987 * On non-paravirt systems, this is the only long mode CPL 3
11988 * selector. We do not allow long mode selectors in the LDT.
11989 */
11990 - return regs->cs == __USER_CS;
11991 + return cs == __USER_CS;
11992 #else
11993 /* Headers are too twisted for this to go in paravirt.h. */
11994 - return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
11995 + return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
11996 #endif
11997 }
11998 #endif
11999 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
12000 index 92f29706..a79cbbb 100644
12001 --- a/arch/x86/include/asm/reboot.h
12002 +++ b/arch/x86/include/asm/reboot.h
12003 @@ -6,19 +6,19 @@
12004 struct pt_regs;
12005
12006 struct machine_ops {
12007 - void (*restart)(char *cmd);
12008 - void (*halt)(void);
12009 - void (*power_off)(void);
12010 + void (* __noreturn restart)(char *cmd);
12011 + void (* __noreturn halt)(void);
12012 + void (* __noreturn power_off)(void);
12013 void (*shutdown)(void);
12014 void (*crash_shutdown)(struct pt_regs *);
12015 - void (*emergency_restart)(void);
12016 -};
12017 + void (* __noreturn emergency_restart)(void);
12018 +} __no_const;
12019
12020 extern struct machine_ops machine_ops;
12021
12022 void native_machine_crash_shutdown(struct pt_regs *regs);
12023 void native_machine_shutdown(void);
12024 -void machine_real_restart(unsigned int type);
12025 +void machine_real_restart(unsigned int type) __noreturn;
12026 /* These must match dispatch_table in reboot_32.S */
12027 #define MRR_BIOS 0
12028 #define MRR_APM 1
12029 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
12030 index 2dbe4a7..ce1db00 100644
12031 --- a/arch/x86/include/asm/rwsem.h
12032 +++ b/arch/x86/include/asm/rwsem.h
12033 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
12034 {
12035 asm volatile("# beginning down_read\n\t"
12036 LOCK_PREFIX _ASM_INC "(%1)\n\t"
12037 +
12038 +#ifdef CONFIG_PAX_REFCOUNT
12039 + "jno 0f\n"
12040 + LOCK_PREFIX _ASM_DEC "(%1)\n"
12041 + "int $4\n0:\n"
12042 + _ASM_EXTABLE(0b, 0b)
12043 +#endif
12044 +
12045 /* adds 0x00000001 */
12046 " jns 1f\n"
12047 " call call_rwsem_down_read_failed\n"
12048 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
12049 "1:\n\t"
12050 " mov %1,%2\n\t"
12051 " add %3,%2\n\t"
12052 +
12053 +#ifdef CONFIG_PAX_REFCOUNT
12054 + "jno 0f\n"
12055 + "sub %3,%2\n"
12056 + "int $4\n0:\n"
12057 + _ASM_EXTABLE(0b, 0b)
12058 +#endif
12059 +
12060 " jle 2f\n\t"
12061 LOCK_PREFIX " cmpxchg %2,%0\n\t"
12062 " jnz 1b\n\t"
12063 @@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
12064 long tmp;
12065 asm volatile("# beginning down_write\n\t"
12066 LOCK_PREFIX " xadd %1,(%2)\n\t"
12067 +
12068 +#ifdef CONFIG_PAX_REFCOUNT
12069 + "jno 0f\n"
12070 + "mov %1,(%2)\n"
12071 + "int $4\n0:\n"
12072 + _ASM_EXTABLE(0b, 0b)
12073 +#endif
12074 +
12075 /* adds 0xffff0001, returns the old value */
12076 " test %1,%1\n\t"
12077 /* was the count 0 before? */
12078 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
12079 long tmp;
12080 asm volatile("# beginning __up_read\n\t"
12081 LOCK_PREFIX " xadd %1,(%2)\n\t"
12082 +
12083 +#ifdef CONFIG_PAX_REFCOUNT
12084 + "jno 0f\n"
12085 + "mov %1,(%2)\n"
12086 + "int $4\n0:\n"
12087 + _ASM_EXTABLE(0b, 0b)
12088 +#endif
12089 +
12090 /* subtracts 1, returns the old value */
12091 " jns 1f\n\t"
12092 " call call_rwsem_wake\n" /* expects old value in %edx */
12093 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
12094 long tmp;
12095 asm volatile("# beginning __up_write\n\t"
12096 LOCK_PREFIX " xadd %1,(%2)\n\t"
12097 +
12098 +#ifdef CONFIG_PAX_REFCOUNT
12099 + "jno 0f\n"
12100 + "mov %1,(%2)\n"
12101 + "int $4\n0:\n"
12102 + _ASM_EXTABLE(0b, 0b)
12103 +#endif
12104 +
12105 /* subtracts 0xffff0001, returns the old value */
12106 " jns 1f\n\t"
12107 " call call_rwsem_wake\n" /* expects old value in %edx */
12108 @@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12109 {
12110 asm volatile("# beginning __downgrade_write\n\t"
12111 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
12112 +
12113 +#ifdef CONFIG_PAX_REFCOUNT
12114 + "jno 0f\n"
12115 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
12116 + "int $4\n0:\n"
12117 + _ASM_EXTABLE(0b, 0b)
12118 +#endif
12119 +
12120 /*
12121 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
12122 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
12123 @@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
12124 */
12125 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
12126 {
12127 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
12128 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
12129 +
12130 +#ifdef CONFIG_PAX_REFCOUNT
12131 + "jno 0f\n"
12132 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
12133 + "int $4\n0:\n"
12134 + _ASM_EXTABLE(0b, 0b)
12135 +#endif
12136 +
12137 : "+m" (sem->count)
12138 : "er" (delta));
12139 }
12140 @@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
12141 */
12142 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
12143 {
12144 - return delta + xadd(&sem->count, delta);
12145 + return delta + xadd_check_overflow(&sem->count, delta);
12146 }
12147
12148 #endif /* __KERNEL__ */
12149 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
12150 index 1654662..5af4157 100644
12151 --- a/arch/x86/include/asm/segment.h
12152 +++ b/arch/x86/include/asm/segment.h
12153 @@ -64,10 +64,15 @@
12154 * 26 - ESPFIX small SS
12155 * 27 - per-cpu [ offset to per-cpu data area ]
12156 * 28 - stack_canary-20 [ for stack protector ]
12157 - * 29 - unused
12158 - * 30 - unused
12159 + * 29 - PCI BIOS CS
12160 + * 30 - PCI BIOS DS
12161 * 31 - TSS for double fault handler
12162 */
12163 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
12164 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
12165 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
12166 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
12167 +
12168 #define GDT_ENTRY_TLS_MIN 6
12169 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
12170
12171 @@ -79,6 +84,8 @@
12172
12173 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
12174
12175 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
12176 +
12177 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
12178
12179 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
12180 @@ -104,6 +111,12 @@
12181 #define __KERNEL_STACK_CANARY 0
12182 #endif
12183
12184 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
12185 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
12186 +
12187 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
12188 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
12189 +
12190 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
12191
12192 /*
12193 @@ -141,7 +154,7 @@
12194 */
12195
12196 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
12197 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
12198 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
12199
12200
12201 #else
12202 @@ -165,6 +178,8 @@
12203 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
12204 #define __USER32_DS __USER_DS
12205
12206 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
12207 +
12208 #define GDT_ENTRY_TSS 8 /* needs two entries */
12209 #define GDT_ENTRY_LDT 10 /* needs two entries */
12210 #define GDT_ENTRY_TLS_MIN 12
12211 @@ -185,6 +200,7 @@
12212 #endif
12213
12214 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
12215 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
12216 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
12217 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
12218 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
12219 @@ -263,7 +279,7 @@ static inline unsigned long get_limit(unsigned long segment)
12220 {
12221 unsigned long __limit;
12222 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
12223 - return __limit + 1;
12224 + return __limit;
12225 }
12226
12227 #endif /* !__ASSEMBLY__ */
12228 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
12229 index 0434c40..1714bf0 100644
12230 --- a/arch/x86/include/asm/smp.h
12231 +++ b/arch/x86/include/asm/smp.h
12232 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
12233 /* cpus sharing the last level cache: */
12234 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
12235 DECLARE_PER_CPU(u16, cpu_llc_id);
12236 -DECLARE_PER_CPU(int, cpu_number);
12237 +DECLARE_PER_CPU(unsigned int, cpu_number);
12238
12239 static inline struct cpumask *cpu_sibling_mask(int cpu)
12240 {
12241 @@ -77,7 +77,7 @@ struct smp_ops {
12242
12243 void (*send_call_func_ipi)(const struct cpumask *mask);
12244 void (*send_call_func_single_ipi)(int cpu);
12245 -};
12246 +} __no_const;
12247
12248 /* Globals due to paravirt */
12249 extern void set_cpu_sibling_map(int cpu);
12250 @@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
12251 extern int safe_smp_processor_id(void);
12252
12253 #elif defined(CONFIG_X86_64_SMP)
12254 -#define raw_smp_processor_id() (percpu_read(cpu_number))
12255 -
12256 -#define stack_smp_processor_id() \
12257 -({ \
12258 - struct thread_info *ti; \
12259 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
12260 - ti->cpu; \
12261 -})
12262 +#define raw_smp_processor_id() (percpu_read(cpu_number))
12263 +#define stack_smp_processor_id() raw_smp_processor_id()
12264 #define safe_smp_processor_id() smp_processor_id()
12265
12266 #endif
12267 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
12268 index 76bfa2c..12d3fe7 100644
12269 --- a/arch/x86/include/asm/spinlock.h
12270 +++ b/arch/x86/include/asm/spinlock.h
12271 @@ -175,6 +175,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
12272 static inline void arch_read_lock(arch_rwlock_t *rw)
12273 {
12274 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
12275 +
12276 +#ifdef CONFIG_PAX_REFCOUNT
12277 + "jno 0f\n"
12278 + LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
12279 + "int $4\n0:\n"
12280 + _ASM_EXTABLE(0b, 0b)
12281 +#endif
12282 +
12283 "jns 1f\n"
12284 "call __read_lock_failed\n\t"
12285 "1:\n"
12286 @@ -184,6 +192,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
12287 static inline void arch_write_lock(arch_rwlock_t *rw)
12288 {
12289 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
12290 +
12291 +#ifdef CONFIG_PAX_REFCOUNT
12292 + "jno 0f\n"
12293 + LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
12294 + "int $4\n0:\n"
12295 + _ASM_EXTABLE(0b, 0b)
12296 +#endif
12297 +
12298 "jz 1f\n"
12299 "call __write_lock_failed\n\t"
12300 "1:\n"
12301 @@ -213,13 +229,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
12302
12303 static inline void arch_read_unlock(arch_rwlock_t *rw)
12304 {
12305 - asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
12306 + asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
12307 +
12308 +#ifdef CONFIG_PAX_REFCOUNT
12309 + "jno 0f\n"
12310 + LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
12311 + "int $4\n0:\n"
12312 + _ASM_EXTABLE(0b, 0b)
12313 +#endif
12314 +
12315 :"+m" (rw->lock) : : "memory");
12316 }
12317
12318 static inline void arch_write_unlock(arch_rwlock_t *rw)
12319 {
12320 - asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
12321 + asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
12322 +
12323 +#ifdef CONFIG_PAX_REFCOUNT
12324 + "jno 0f\n"
12325 + LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
12326 + "int $4\n0:\n"
12327 + _ASM_EXTABLE(0b, 0b)
12328 +#endif
12329 +
12330 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
12331 }
12332
12333 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
12334 index b5d9533..41655fa 100644
12335 --- a/arch/x86/include/asm/stackprotector.h
12336 +++ b/arch/x86/include/asm/stackprotector.h
12337 @@ -47,7 +47,7 @@
12338 * head_32 for boot CPU and setup_per_cpu_areas() for others.
12339 */
12340 #define GDT_STACK_CANARY_INIT \
12341 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
12342 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
12343
12344 /*
12345 * Initialize the stackprotector canary value.
12346 @@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
12347
12348 static inline void load_stack_canary_segment(void)
12349 {
12350 -#ifdef CONFIG_X86_32
12351 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
12352 asm volatile ("mov %0, %%gs" : : "r" (0));
12353 #endif
12354 }
12355 diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
12356 index 70bbe39..4ae2bd4 100644
12357 --- a/arch/x86/include/asm/stacktrace.h
12358 +++ b/arch/x86/include/asm/stacktrace.h
12359 @@ -11,28 +11,20 @@
12360
12361 extern int kstack_depth_to_print;
12362
12363 -struct thread_info;
12364 +struct task_struct;
12365 struct stacktrace_ops;
12366
12367 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
12368 - unsigned long *stack,
12369 - unsigned long bp,
12370 - const struct stacktrace_ops *ops,
12371 - void *data,
12372 - unsigned long *end,
12373 - int *graph);
12374 +typedef unsigned long walk_stack_t(struct task_struct *task,
12375 + void *stack_start,
12376 + unsigned long *stack,
12377 + unsigned long bp,
12378 + const struct stacktrace_ops *ops,
12379 + void *data,
12380 + unsigned long *end,
12381 + int *graph);
12382
12383 -extern unsigned long
12384 -print_context_stack(struct thread_info *tinfo,
12385 - unsigned long *stack, unsigned long bp,
12386 - const struct stacktrace_ops *ops, void *data,
12387 - unsigned long *end, int *graph);
12388 -
12389 -extern unsigned long
12390 -print_context_stack_bp(struct thread_info *tinfo,
12391 - unsigned long *stack, unsigned long bp,
12392 - const struct stacktrace_ops *ops, void *data,
12393 - unsigned long *end, int *graph);
12394 +extern walk_stack_t print_context_stack;
12395 +extern walk_stack_t print_context_stack_bp;
12396
12397 /* Generic stack tracer with callbacks */
12398
12399 @@ -40,7 +32,7 @@ struct stacktrace_ops {
12400 void (*address)(void *data, unsigned long address, int reliable);
12401 /* On negative return stop dumping */
12402 int (*stack)(void *data, char *name);
12403 - walk_stack_t walk_stack;
12404 + walk_stack_t *walk_stack;
12405 };
12406
12407 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
12408 diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
12409 index 4ec45b3..a4f0a8a 100644
12410 --- a/arch/x86/include/asm/switch_to.h
12411 +++ b/arch/x86/include/asm/switch_to.h
12412 @@ -108,7 +108,7 @@ do { \
12413 "call __switch_to\n\t" \
12414 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
12415 __switch_canary \
12416 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
12417 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
12418 "movq %%rax,%%rdi\n\t" \
12419 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
12420 "jnz ret_from_fork\n\t" \
12421 @@ -119,7 +119,7 @@ do { \
12422 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
12423 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
12424 [_tif_fork] "i" (_TIF_FORK), \
12425 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
12426 + [thread_info] "m" (current_tinfo), \
12427 [current_task] "m" (current_task) \
12428 __switch_canary_iparam \
12429 : "memory", "cc" __EXTRA_CLOBBER)
12430 diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
12431 index 3fda9db4..4ca1c61 100644
12432 --- a/arch/x86/include/asm/sys_ia32.h
12433 +++ b/arch/x86/include/asm/sys_ia32.h
12434 @@ -40,7 +40,7 @@ asmlinkage long sys32_sigaction(int, struct old_sigaction32 __user *,
12435 struct old_sigaction32 __user *);
12436 asmlinkage long sys32_alarm(unsigned int);
12437
12438 -asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
12439 +asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
12440 asmlinkage long sys32_sysfs(int, u32, u32);
12441
12442 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
12443 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
12444 index ad6df8c..5e0cf6e 100644
12445 --- a/arch/x86/include/asm/thread_info.h
12446 +++ b/arch/x86/include/asm/thread_info.h
12447 @@ -10,6 +10,7 @@
12448 #include <linux/compiler.h>
12449 #include <asm/page.h>
12450 #include <asm/types.h>
12451 +#include <asm/percpu.h>
12452
12453 /*
12454 * low level task data that entry.S needs immediate access to
12455 @@ -24,7 +25,6 @@ struct exec_domain;
12456 #include <linux/atomic.h>
12457
12458 struct thread_info {
12459 - struct task_struct *task; /* main task structure */
12460 struct exec_domain *exec_domain; /* execution domain */
12461 __u32 flags; /* low level flags */
12462 __u32 status; /* thread synchronous flags */
12463 @@ -34,19 +34,13 @@ struct thread_info {
12464 mm_segment_t addr_limit;
12465 struct restart_block restart_block;
12466 void __user *sysenter_return;
12467 -#ifdef CONFIG_X86_32
12468 - unsigned long previous_esp; /* ESP of the previous stack in
12469 - case of nested (IRQ) stacks
12470 - */
12471 - __u8 supervisor_stack[0];
12472 -#endif
12473 + unsigned long lowest_stack;
12474 unsigned int sig_on_uaccess_error:1;
12475 unsigned int uaccess_err:1; /* uaccess failed */
12476 };
12477
12478 -#define INIT_THREAD_INFO(tsk) \
12479 +#define INIT_THREAD_INFO \
12480 { \
12481 - .task = &tsk, \
12482 .exec_domain = &default_exec_domain, \
12483 .flags = 0, \
12484 .cpu = 0, \
12485 @@ -57,7 +51,7 @@ struct thread_info {
12486 }, \
12487 }
12488
12489 -#define init_thread_info (init_thread_union.thread_info)
12490 +#define init_thread_info (init_thread_union.stack)
12491 #define init_stack (init_thread_union.stack)
12492
12493 #else /* !__ASSEMBLY__ */
12494 @@ -97,6 +91,7 @@ struct thread_info {
12495 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
12496 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
12497 #define TIF_X32 30 /* 32-bit native x86-64 binary */
12498 +#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
12499
12500 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
12501 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
12502 @@ -120,16 +115,18 @@ struct thread_info {
12503 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
12504 #define _TIF_ADDR32 (1 << TIF_ADDR32)
12505 #define _TIF_X32 (1 << TIF_X32)
12506 +#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
12507
12508 /* work to do in syscall_trace_enter() */
12509 #define _TIF_WORK_SYSCALL_ENTRY \
12510 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
12511 - _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
12512 + _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
12513 + _TIF_GRSEC_SETXID)
12514
12515 /* work to do in syscall_trace_leave() */
12516 #define _TIF_WORK_SYSCALL_EXIT \
12517 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
12518 - _TIF_SYSCALL_TRACEPOINT)
12519 + _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
12520
12521 /* work to do on interrupt/exception return */
12522 #define _TIF_WORK_MASK \
12523 @@ -139,7 +136,8 @@ struct thread_info {
12524
12525 /* work to do on any return to user space */
12526 #define _TIF_ALLWORK_MASK \
12527 - ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT)
12528 + ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
12529 + _TIF_GRSEC_SETXID)
12530
12531 /* Only used for 64 bit */
12532 #define _TIF_DO_NOTIFY_MASK \
12533 @@ -173,45 +171,40 @@ struct thread_info {
12534 ret; \
12535 })
12536
12537 -#ifdef CONFIG_X86_32
12538 -
12539 -#define STACK_WARN (THREAD_SIZE/8)
12540 -/*
12541 - * macros/functions for gaining access to the thread information structure
12542 - *
12543 - * preempt_count needs to be 1 initially, until the scheduler is functional.
12544 - */
12545 -#ifndef __ASSEMBLY__
12546 -
12547 -
12548 -/* how to get the current stack pointer from C */
12549 -register unsigned long current_stack_pointer asm("esp") __used;
12550 -
12551 -/* how to get the thread information struct from C */
12552 -static inline struct thread_info *current_thread_info(void)
12553 -{
12554 - return (struct thread_info *)
12555 - (current_stack_pointer & ~(THREAD_SIZE - 1));
12556 -}
12557 -
12558 -#else /* !__ASSEMBLY__ */
12559 -
12560 +#ifdef __ASSEMBLY__
12561 /* how to get the thread information struct from ASM */
12562 #define GET_THREAD_INFO(reg) \
12563 - movl $-THREAD_SIZE, reg; \
12564 - andl %esp, reg
12565 + mov PER_CPU_VAR(current_tinfo), reg
12566
12567 /* use this one if reg already contains %esp */
12568 -#define GET_THREAD_INFO_WITH_ESP(reg) \
12569 - andl $-THREAD_SIZE, reg
12570 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
12571 +#else
12572 +/* how to get the thread information struct from C */
12573 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
12574 +
12575 +static __always_inline struct thread_info *current_thread_info(void)
12576 +{
12577 + return percpu_read_stable(current_tinfo);
12578 +}
12579 +#endif
12580 +
12581 +#ifdef CONFIG_X86_32
12582 +
12583 +#define STACK_WARN (THREAD_SIZE/8)
12584 +/*
12585 + * macros/functions for gaining access to the thread information structure
12586 + *
12587 + * preempt_count needs to be 1 initially, until the scheduler is functional.
12588 + */
12589 +#ifndef __ASSEMBLY__
12590 +
12591 +/* how to get the current stack pointer from C */
12592 +register unsigned long current_stack_pointer asm("esp") __used;
12593
12594 #endif
12595
12596 #else /* X86_32 */
12597
12598 -#include <asm/percpu.h>
12599 -#define KERNEL_STACK_OFFSET (5*8)
12600 -
12601 /*
12602 * macros/functions for gaining access to the thread information structure
12603 * preempt_count needs to be 1 initially, until the scheduler is functional.
12604 @@ -219,27 +212,8 @@ static inline struct thread_info *current_thread_info(void)
12605 #ifndef __ASSEMBLY__
12606 DECLARE_PER_CPU(unsigned long, kernel_stack);
12607
12608 -static inline struct thread_info *current_thread_info(void)
12609 -{
12610 - struct thread_info *ti;
12611 - ti = (void *)(percpu_read_stable(kernel_stack) +
12612 - KERNEL_STACK_OFFSET - THREAD_SIZE);
12613 - return ti;
12614 -}
12615 -
12616 -#else /* !__ASSEMBLY__ */
12617 -
12618 -/* how to get the thread information struct from ASM */
12619 -#define GET_THREAD_INFO(reg) \
12620 - movq PER_CPU_VAR(kernel_stack),reg ; \
12621 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
12622 -
12623 -/*
12624 - * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
12625 - * a certain register (to be used in assembler memory operands).
12626 - */
12627 -#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
12628 -
12629 +/* how to get the current stack pointer from C */
12630 +register unsigned long current_stack_pointer asm("rsp") __used;
12631 #endif
12632
12633 #endif /* !X86_32 */
12634 @@ -285,5 +259,16 @@ extern void arch_task_cache_init(void);
12635 extern void free_thread_info(struct thread_info *ti);
12636 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
12637 #define arch_task_cache_init arch_task_cache_init
12638 +
12639 +#define __HAVE_THREAD_FUNCTIONS
12640 +#define task_thread_info(task) (&(task)->tinfo)
12641 +#define task_stack_page(task) ((task)->stack)
12642 +#define setup_thread_stack(p, org) do {} while (0)
12643 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12644 +
12645 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
12646 +extern struct task_struct *alloc_task_struct_node(int node);
12647 +extern void free_task_struct(struct task_struct *);
12648 +
12649 #endif
12650 #endif /* _ASM_X86_THREAD_INFO_H */
12651 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
12652 index e054459..14bc8a7 100644
12653 --- a/arch/x86/include/asm/uaccess.h
12654 +++ b/arch/x86/include/asm/uaccess.h
12655 @@ -7,12 +7,15 @@
12656 #include <linux/compiler.h>
12657 #include <linux/thread_info.h>
12658 #include <linux/string.h>
12659 +#include <linux/sched.h>
12660 #include <asm/asm.h>
12661 #include <asm/page.h>
12662
12663 #define VERIFY_READ 0
12664 #define VERIFY_WRITE 1
12665
12666 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
12667 +
12668 /*
12669 * The fs value determines whether argument validity checking should be
12670 * performed or not. If get_fs() == USER_DS, checking is performed, with
12671 @@ -28,7 +31,12 @@
12672
12673 #define get_ds() (KERNEL_DS)
12674 #define get_fs() (current_thread_info()->addr_limit)
12675 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12676 +void __set_fs(mm_segment_t x);
12677 +void set_fs(mm_segment_t x);
12678 +#else
12679 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12680 +#endif
12681
12682 #define segment_eq(a, b) ((a).seg == (b).seg)
12683
12684 @@ -76,7 +84,33 @@
12685 * checks that the pointer is in the user space range - after calling
12686 * this function, memory access functions may still return -EFAULT.
12687 */
12688 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12689 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12690 +#define access_ok(type, addr, size) \
12691 +({ \
12692 + long __size = size; \
12693 + unsigned long __addr = (unsigned long)addr; \
12694 + unsigned long __addr_ao = __addr & PAGE_MASK; \
12695 + unsigned long __end_ao = __addr + __size - 1; \
12696 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
12697 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12698 + while(__addr_ao <= __end_ao) { \
12699 + char __c_ao; \
12700 + __addr_ao += PAGE_SIZE; \
12701 + if (__size > PAGE_SIZE) \
12702 + cond_resched(); \
12703 + if (__get_user(__c_ao, (char __user *)__addr)) \
12704 + break; \
12705 + if (type != VERIFY_WRITE) { \
12706 + __addr = __addr_ao; \
12707 + continue; \
12708 + } \
12709 + if (__put_user(__c_ao, (char __user *)__addr)) \
12710 + break; \
12711 + __addr = __addr_ao; \
12712 + } \
12713 + } \
12714 + __ret_ao; \
12715 +})
12716
12717 /*
12718 * The exception table consists of pairs of addresses: the first is the
12719 @@ -182,12 +216,20 @@ extern int __get_user_bad(void);
12720 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12721 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12722
12723 -
12724 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12725 +#define __copyuser_seg "gs;"
12726 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12727 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12728 +#else
12729 +#define __copyuser_seg
12730 +#define __COPYUSER_SET_ES
12731 +#define __COPYUSER_RESTORE_ES
12732 +#endif
12733
12734 #ifdef CONFIG_X86_32
12735 #define __put_user_asm_u64(x, addr, err, errret) \
12736 - asm volatile("1: movl %%eax,0(%2)\n" \
12737 - "2: movl %%edx,4(%2)\n" \
12738 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12739 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12740 "3:\n" \
12741 ".section .fixup,\"ax\"\n" \
12742 "4: movl %3,%0\n" \
12743 @@ -199,8 +241,8 @@ extern int __get_user_bad(void);
12744 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12745
12746 #define __put_user_asm_ex_u64(x, addr) \
12747 - asm volatile("1: movl %%eax,0(%1)\n" \
12748 - "2: movl %%edx,4(%1)\n" \
12749 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12750 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12751 "3:\n" \
12752 _ASM_EXTABLE(1b, 2b - 1b) \
12753 _ASM_EXTABLE(2b, 3b - 2b) \
12754 @@ -252,7 +294,7 @@ extern void __put_user_8(void);
12755 __typeof__(*(ptr)) __pu_val; \
12756 __chk_user_ptr(ptr); \
12757 might_fault(); \
12758 - __pu_val = x; \
12759 + __pu_val = (x); \
12760 switch (sizeof(*(ptr))) { \
12761 case 1: \
12762 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12763 @@ -373,7 +415,7 @@ do { \
12764 } while (0)
12765
12766 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12767 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12768 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12769 "2:\n" \
12770 ".section .fixup,\"ax\"\n" \
12771 "3: mov %3,%0\n" \
12772 @@ -381,7 +423,7 @@ do { \
12773 " jmp 2b\n" \
12774 ".previous\n" \
12775 _ASM_EXTABLE(1b, 3b) \
12776 - : "=r" (err), ltype(x) \
12777 + : "=r" (err), ltype (x) \
12778 : "m" (__m(addr)), "i" (errret), "0" (err))
12779
12780 #define __get_user_size_ex(x, ptr, size) \
12781 @@ -406,7 +448,7 @@ do { \
12782 } while (0)
12783
12784 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12785 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12786 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12787 "2:\n" \
12788 _ASM_EXTABLE(1b, 2b - 1b) \
12789 : ltype(x) : "m" (__m(addr)))
12790 @@ -423,13 +465,24 @@ do { \
12791 int __gu_err; \
12792 unsigned long __gu_val; \
12793 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12794 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
12795 + (x) = (__typeof__(*(ptr)))__gu_val; \
12796 __gu_err; \
12797 })
12798
12799 /* FIXME: this hack is definitely wrong -AK */
12800 struct __large_struct { unsigned long buf[100]; };
12801 -#define __m(x) (*(struct __large_struct __user *)(x))
12802 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12803 +#define ____m(x) \
12804 +({ \
12805 + unsigned long ____x = (unsigned long)(x); \
12806 + if (____x < PAX_USER_SHADOW_BASE) \
12807 + ____x += PAX_USER_SHADOW_BASE; \
12808 + (void __user *)____x; \
12809 +})
12810 +#else
12811 +#define ____m(x) (x)
12812 +#endif
12813 +#define __m(x) (*(struct __large_struct __user *)____m(x))
12814
12815 /*
12816 * Tell gcc we read from memory instead of writing: this is because
12817 @@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
12818 * aliasing issues.
12819 */
12820 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12821 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12822 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12823 "2:\n" \
12824 ".section .fixup,\"ax\"\n" \
12825 "3: mov %3,%0\n" \
12826 @@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
12827 ".previous\n" \
12828 _ASM_EXTABLE(1b, 3b) \
12829 : "=r"(err) \
12830 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12831 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12832
12833 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12834 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12835 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12836 "2:\n" \
12837 _ASM_EXTABLE(1b, 2b - 1b) \
12838 : : ltype(x), "m" (__m(addr)))
12839 @@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
12840 * On error, the variable @x is set to zero.
12841 */
12842
12843 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12844 +#define __get_user(x, ptr) get_user((x), (ptr))
12845 +#else
12846 #define __get_user(x, ptr) \
12847 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12848 +#endif
12849
12850 /**
12851 * __put_user: - Write a simple value into user space, with less checking.
12852 @@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
12853 * Returns zero on success, or -EFAULT on error.
12854 */
12855
12856 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12857 +#define __put_user(x, ptr) put_user((x), (ptr))
12858 +#else
12859 #define __put_user(x, ptr) \
12860 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12861 +#endif
12862
12863 #define __get_user_unaligned __get_user
12864 #define __put_user_unaligned __put_user
12865 @@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
12866 #define get_user_ex(x, ptr) do { \
12867 unsigned long __gue_val; \
12868 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12869 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
12870 + (x) = (__typeof__(*(ptr)))__gue_val; \
12871 } while (0)
12872
12873 #ifdef CONFIG_X86_WP_WORKS_OK
12874 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
12875 index 8084bc7..3d6ec37 100644
12876 --- a/arch/x86/include/asm/uaccess_32.h
12877 +++ b/arch/x86/include/asm/uaccess_32.h
12878 @@ -11,15 +11,15 @@
12879 #include <asm/page.h>
12880
12881 unsigned long __must_check __copy_to_user_ll
12882 - (void __user *to, const void *from, unsigned long n);
12883 + (void __user *to, const void *from, unsigned long n) __size_overflow(3);
12884 unsigned long __must_check __copy_from_user_ll
12885 - (void *to, const void __user *from, unsigned long n);
12886 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12887 unsigned long __must_check __copy_from_user_ll_nozero
12888 - (void *to, const void __user *from, unsigned long n);
12889 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12890 unsigned long __must_check __copy_from_user_ll_nocache
12891 - (void *to, const void __user *from, unsigned long n);
12892 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12893 unsigned long __must_check __copy_from_user_ll_nocache_nozero
12894 - (void *to, const void __user *from, unsigned long n);
12895 + (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12896
12897 /**
12898 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
12899 @@ -43,6 +43,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
12900 static __always_inline unsigned long __must_check
12901 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12902 {
12903 + if ((long)n < 0)
12904 + return n;
12905 +
12906 if (__builtin_constant_p(n)) {
12907 unsigned long ret;
12908
12909 @@ -61,6 +64,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12910 return ret;
12911 }
12912 }
12913 + if (!__builtin_constant_p(n))
12914 + check_object_size(from, n, true);
12915 return __copy_to_user_ll(to, from, n);
12916 }
12917
12918 @@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
12919 __copy_to_user(void __user *to, const void *from, unsigned long n)
12920 {
12921 might_fault();
12922 +
12923 return __copy_to_user_inatomic(to, from, n);
12924 }
12925
12926 static __always_inline unsigned long
12927 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12928 {
12929 + if ((long)n < 0)
12930 + return n;
12931 +
12932 /* Avoid zeroing the tail if the copy fails..
12933 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
12934 * but as the zeroing behaviour is only significant when n is not
12935 @@ -137,6 +146,10 @@ static __always_inline unsigned long
12936 __copy_from_user(void *to, const void __user *from, unsigned long n)
12937 {
12938 might_fault();
12939 +
12940 + if ((long)n < 0)
12941 + return n;
12942 +
12943 if (__builtin_constant_p(n)) {
12944 unsigned long ret;
12945
12946 @@ -152,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
12947 return ret;
12948 }
12949 }
12950 + if (!__builtin_constant_p(n))
12951 + check_object_size(to, n, false);
12952 return __copy_from_user_ll(to, from, n);
12953 }
12954
12955 @@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
12956 const void __user *from, unsigned long n)
12957 {
12958 might_fault();
12959 +
12960 + if ((long)n < 0)
12961 + return n;
12962 +
12963 if (__builtin_constant_p(n)) {
12964 unsigned long ret;
12965
12966 @@ -181,15 +200,19 @@ static __always_inline unsigned long
12967 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
12968 unsigned long n)
12969 {
12970 - return __copy_from_user_ll_nocache_nozero(to, from, n);
12971 + if ((long)n < 0)
12972 + return n;
12973 +
12974 + return __copy_from_user_ll_nocache_nozero(to, from, n);
12975 }
12976
12977 -unsigned long __must_check copy_to_user(void __user *to,
12978 - const void *from, unsigned long n);
12979 -unsigned long __must_check _copy_from_user(void *to,
12980 - const void __user *from,
12981 - unsigned long n);
12982 -
12983 +extern void copy_to_user_overflow(void)
12984 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12985 + __compiletime_error("copy_to_user() buffer size is not provably correct")
12986 +#else
12987 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
12988 +#endif
12989 +;
12990
12991 extern void copy_from_user_overflow(void)
12992 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
12993 @@ -199,17 +222,61 @@ extern void copy_from_user_overflow(void)
12994 #endif
12995 ;
12996
12997 -static inline unsigned long __must_check copy_from_user(void *to,
12998 - const void __user *from,
12999 - unsigned long n)
13000 +/**
13001 + * copy_to_user: - Copy a block of data into user space.
13002 + * @to: Destination address, in user space.
13003 + * @from: Source address, in kernel space.
13004 + * @n: Number of bytes to copy.
13005 + *
13006 + * Context: User context only. This function may sleep.
13007 + *
13008 + * Copy data from kernel space to user space.
13009 + *
13010 + * Returns number of bytes that could not be copied.
13011 + * On success, this will be zero.
13012 + */
13013 +static inline unsigned long __must_check
13014 +copy_to_user(void __user *to, const void *from, unsigned long n)
13015 {
13016 - int sz = __compiletime_object_size(to);
13017 + size_t sz = __compiletime_object_size(from);
13018
13019 - if (likely(sz == -1 || sz >= n))
13020 - n = _copy_from_user(to, from, n);
13021 - else
13022 + if (unlikely(sz != (size_t)-1 && sz < n))
13023 + copy_to_user_overflow();
13024 + else if (access_ok(VERIFY_WRITE, to, n))
13025 + n = __copy_to_user(to, from, n);
13026 + return n;
13027 +}
13028 +
13029 +/**
13030 + * copy_from_user: - Copy a block of data from user space.
13031 + * @to: Destination address, in kernel space.
13032 + * @from: Source address, in user space.
13033 + * @n: Number of bytes to copy.
13034 + *
13035 + * Context: User context only. This function may sleep.
13036 + *
13037 + * Copy data from user space to kernel space.
13038 + *
13039 + * Returns number of bytes that could not be copied.
13040 + * On success, this will be zero.
13041 + *
13042 + * If some data could not be copied, this function will pad the copied
13043 + * data to the requested size using zero bytes.
13044 + */
13045 +static inline unsigned long __must_check
13046 +copy_from_user(void *to, const void __user *from, unsigned long n)
13047 +{
13048 + size_t sz = __compiletime_object_size(to);
13049 +
13050 + if (unlikely(sz != (size_t)-1 && sz < n))
13051 copy_from_user_overflow();
13052 -
13053 + else if (access_ok(VERIFY_READ, from, n))
13054 + n = __copy_from_user(to, from, n);
13055 + else if ((long)n > 0) {
13056 + if (!__builtin_constant_p(n))
13057 + check_object_size(to, n, false);
13058 + memset(to, 0, n);
13059 + }
13060 return n;
13061 }
13062
13063 @@ -230,7 +297,7 @@ static inline unsigned long __must_check copy_from_user(void *to,
13064 #define strlen_user(str) strnlen_user(str, LONG_MAX)
13065
13066 long strnlen_user(const char __user *str, long n);
13067 -unsigned long __must_check clear_user(void __user *mem, unsigned long len);
13068 -unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
13069 +unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13070 +unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13071
13072 #endif /* _ASM_X86_UACCESS_32_H */
13073 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
13074 index fcd4b6f..ef04f8f 100644
13075 --- a/arch/x86/include/asm/uaccess_64.h
13076 +++ b/arch/x86/include/asm/uaccess_64.h
13077 @@ -10,6 +10,9 @@
13078 #include <asm/alternative.h>
13079 #include <asm/cpufeature.h>
13080 #include <asm/page.h>
13081 +#include <asm/pgtable.h>
13082 +
13083 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
13084
13085 /*
13086 * Copy To/From Userspace
13087 @@ -17,12 +20,14 @@
13088
13089 /* Handles exceptions in both to and from, but doesn't do access_ok */
13090 __must_check unsigned long
13091 -copy_user_generic_string(void *to, const void *from, unsigned len);
13092 +copy_user_generic_string(void *to, const void *from, unsigned long len) __size_overflow(3);
13093 __must_check unsigned long
13094 -copy_user_generic_unrolled(void *to, const void *from, unsigned len);
13095 +copy_user_generic_unrolled(void *to, const void *from, unsigned long len) __size_overflow(3);
13096
13097 static __always_inline __must_check unsigned long
13098 -copy_user_generic(void *to, const void *from, unsigned len)
13099 +copy_user_generic(void *to, const void *from, unsigned long len) __size_overflow(3);
13100 +static __always_inline __must_check unsigned long
13101 +copy_user_generic(void *to, const void *from, unsigned long len)
13102 {
13103 unsigned ret;
13104
13105 @@ -32,142 +37,238 @@ copy_user_generic(void *to, const void *from, unsigned len)
13106 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
13107 "=d" (len)),
13108 "1" (to), "2" (from), "3" (len)
13109 - : "memory", "rcx", "r8", "r9", "r10", "r11");
13110 + : "memory", "rcx", "r8", "r9", "r11");
13111 return ret;
13112 }
13113
13114 +static __always_inline __must_check unsigned long
13115 +__copy_to_user(void __user *to, const void *from, unsigned long len) __size_overflow(3);
13116 +static __always_inline __must_check unsigned long
13117 +__copy_from_user(void *to, const void __user *from, unsigned long len) __size_overflow(3);
13118 __must_check unsigned long
13119 -_copy_to_user(void __user *to, const void *from, unsigned len);
13120 -__must_check unsigned long
13121 -_copy_from_user(void *to, const void __user *from, unsigned len);
13122 -__must_check unsigned long
13123 -copy_in_user(void __user *to, const void __user *from, unsigned len);
13124 +copy_in_user(void __user *to, const void __user *from, unsigned long len) __size_overflow(3);
13125 +
13126 +extern void copy_to_user_overflow(void)
13127 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
13128 + __compiletime_error("copy_to_user() buffer size is not provably correct")
13129 +#else
13130 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
13131 +#endif
13132 +;
13133 +
13134 +extern void copy_from_user_overflow(void)
13135 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
13136 + __compiletime_error("copy_from_user() buffer size is not provably correct")
13137 +#else
13138 + __compiletime_warning("copy_from_user() buffer size is not provably correct")
13139 +#endif
13140 +;
13141
13142 static inline unsigned long __must_check copy_from_user(void *to,
13143 const void __user *from,
13144 unsigned long n)
13145 {
13146 - int sz = __compiletime_object_size(to);
13147 -
13148 might_fault();
13149 - if (likely(sz == -1 || sz >= n))
13150 - n = _copy_from_user(to, from, n);
13151 -#ifdef CONFIG_DEBUG_VM
13152 - else
13153 - WARN(1, "Buffer overflow detected!\n");
13154 -#endif
13155 +
13156 + if (access_ok(VERIFY_READ, from, n))
13157 + n = __copy_from_user(to, from, n);
13158 + else if (n < INT_MAX) {
13159 + if (!__builtin_constant_p(n))
13160 + check_object_size(to, n, false);
13161 + memset(to, 0, n);
13162 + }
13163 return n;
13164 }
13165
13166 static __always_inline __must_check
13167 -int copy_to_user(void __user *dst, const void *src, unsigned size)
13168 +int copy_to_user(void __user *dst, const void *src, unsigned long size)
13169 {
13170 might_fault();
13171
13172 - return _copy_to_user(dst, src, size);
13173 + if (access_ok(VERIFY_WRITE, dst, size))
13174 + size = __copy_to_user(dst, src, size);
13175 + return size;
13176 }
13177
13178 static __always_inline __must_check
13179 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
13180 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
13181 {
13182 - int ret = 0;
13183 + size_t sz = __compiletime_object_size(dst);
13184 + unsigned ret = 0;
13185
13186 might_fault();
13187 - if (!__builtin_constant_p(size))
13188 - return copy_user_generic(dst, (__force void *)src, size);
13189 +
13190 + if (size > INT_MAX)
13191 + return size;
13192 +
13193 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13194 + if (!__access_ok(VERIFY_READ, src, size))
13195 + return size;
13196 +#endif
13197 +
13198 + if (unlikely(sz != (size_t)-1 && sz < size)) {
13199 + copy_from_user_overflow();
13200 + return size;
13201 + }
13202 +
13203 + if (!__builtin_constant_p(size)) {
13204 + check_object_size(dst, size, false);
13205 +
13206 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13207 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13208 + src += PAX_USER_SHADOW_BASE;
13209 +#endif
13210 +
13211 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
13212 + }
13213 switch (size) {
13214 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
13215 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
13216 ret, "b", "b", "=q", 1);
13217 return ret;
13218 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
13219 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
13220 ret, "w", "w", "=r", 2);
13221 return ret;
13222 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
13223 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
13224 ret, "l", "k", "=r", 4);
13225 return ret;
13226 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
13227 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13228 ret, "q", "", "=r", 8);
13229 return ret;
13230 case 10:
13231 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13232 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13233 ret, "q", "", "=r", 10);
13234 if (unlikely(ret))
13235 return ret;
13236 __get_user_asm(*(u16 *)(8 + (char *)dst),
13237 - (u16 __user *)(8 + (char __user *)src),
13238 + (const u16 __user *)(8 + (const char __user *)src),
13239 ret, "w", "w", "=r", 2);
13240 return ret;
13241 case 16:
13242 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13243 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13244 ret, "q", "", "=r", 16);
13245 if (unlikely(ret))
13246 return ret;
13247 __get_user_asm(*(u64 *)(8 + (char *)dst),
13248 - (u64 __user *)(8 + (char __user *)src),
13249 + (const u64 __user *)(8 + (const char __user *)src),
13250 ret, "q", "", "=r", 8);
13251 return ret;
13252 default:
13253 - return copy_user_generic(dst, (__force void *)src, size);
13254 +
13255 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13256 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13257 + src += PAX_USER_SHADOW_BASE;
13258 +#endif
13259 +
13260 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
13261 }
13262 }
13263
13264 static __always_inline __must_check
13265 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
13266 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
13267 {
13268 - int ret = 0;
13269 + size_t sz = __compiletime_object_size(src);
13270 + unsigned ret = 0;
13271
13272 might_fault();
13273 - if (!__builtin_constant_p(size))
13274 - return copy_user_generic((__force void *)dst, src, size);
13275 +
13276 + if (size > INT_MAX)
13277 + return size;
13278 +
13279 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13280 + if (!__access_ok(VERIFY_WRITE, dst, size))
13281 + return size;
13282 +#endif
13283 +
13284 + if (unlikely(sz != (size_t)-1 && sz < size)) {
13285 + copy_to_user_overflow();
13286 + return size;
13287 + }
13288 +
13289 + if (!__builtin_constant_p(size)) {
13290 + check_object_size(src, size, true);
13291 +
13292 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13293 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13294 + dst += PAX_USER_SHADOW_BASE;
13295 +#endif
13296 +
13297 + return copy_user_generic((__force_kernel void *)dst, src, size);
13298 + }
13299 switch (size) {
13300 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
13301 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
13302 ret, "b", "b", "iq", 1);
13303 return ret;
13304 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
13305 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
13306 ret, "w", "w", "ir", 2);
13307 return ret;
13308 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
13309 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
13310 ret, "l", "k", "ir", 4);
13311 return ret;
13312 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
13313 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13314 ret, "q", "", "er", 8);
13315 return ret;
13316 case 10:
13317 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13318 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13319 ret, "q", "", "er", 10);
13320 if (unlikely(ret))
13321 return ret;
13322 asm("":::"memory");
13323 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
13324 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
13325 ret, "w", "w", "ir", 2);
13326 return ret;
13327 case 16:
13328 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13329 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13330 ret, "q", "", "er", 16);
13331 if (unlikely(ret))
13332 return ret;
13333 asm("":::"memory");
13334 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
13335 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
13336 ret, "q", "", "er", 8);
13337 return ret;
13338 default:
13339 - return copy_user_generic((__force void *)dst, src, size);
13340 +
13341 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13342 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13343 + dst += PAX_USER_SHADOW_BASE;
13344 +#endif
13345 +
13346 + return copy_user_generic((__force_kernel void *)dst, src, size);
13347 }
13348 }
13349
13350 static __always_inline __must_check
13351 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13352 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
13353 {
13354 - int ret = 0;
13355 + unsigned ret = 0;
13356
13357 might_fault();
13358 - if (!__builtin_constant_p(size))
13359 - return copy_user_generic((__force void *)dst,
13360 - (__force void *)src, size);
13361 +
13362 + if (size > INT_MAX)
13363 + return size;
13364 +
13365 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13366 + if (!__access_ok(VERIFY_READ, src, size))
13367 + return size;
13368 + if (!__access_ok(VERIFY_WRITE, dst, size))
13369 + return size;
13370 +#endif
13371 +
13372 + if (!__builtin_constant_p(size)) {
13373 +
13374 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13375 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13376 + src += PAX_USER_SHADOW_BASE;
13377 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13378 + dst += PAX_USER_SHADOW_BASE;
13379 +#endif
13380 +
13381 + return copy_user_generic((__force_kernel void *)dst,
13382 + (__force_kernel const void *)src, size);
13383 + }
13384 switch (size) {
13385 case 1: {
13386 u8 tmp;
13387 - __get_user_asm(tmp, (u8 __user *)src,
13388 + __get_user_asm(tmp, (const u8 __user *)src,
13389 ret, "b", "b", "=q", 1);
13390 if (likely(!ret))
13391 __put_user_asm(tmp, (u8 __user *)dst,
13392 @@ -176,7 +277,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13393 }
13394 case 2: {
13395 u16 tmp;
13396 - __get_user_asm(tmp, (u16 __user *)src,
13397 + __get_user_asm(tmp, (const u16 __user *)src,
13398 ret, "w", "w", "=r", 2);
13399 if (likely(!ret))
13400 __put_user_asm(tmp, (u16 __user *)dst,
13401 @@ -186,7 +287,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13402
13403 case 4: {
13404 u32 tmp;
13405 - __get_user_asm(tmp, (u32 __user *)src,
13406 + __get_user_asm(tmp, (const u32 __user *)src,
13407 ret, "l", "k", "=r", 4);
13408 if (likely(!ret))
13409 __put_user_asm(tmp, (u32 __user *)dst,
13410 @@ -195,7 +296,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13411 }
13412 case 8: {
13413 u64 tmp;
13414 - __get_user_asm(tmp, (u64 __user *)src,
13415 + __get_user_asm(tmp, (const u64 __user *)src,
13416 ret, "q", "", "=r", 8);
13417 if (likely(!ret))
13418 __put_user_asm(tmp, (u64 __user *)dst,
13419 @@ -203,47 +304,92 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
13420 return ret;
13421 }
13422 default:
13423 - return copy_user_generic((__force void *)dst,
13424 - (__force void *)src, size);
13425 +
13426 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13427 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13428 + src += PAX_USER_SHADOW_BASE;
13429 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13430 + dst += PAX_USER_SHADOW_BASE;
13431 +#endif
13432 +
13433 + return copy_user_generic((__force_kernel void *)dst,
13434 + (__force_kernel const void *)src, size);
13435 }
13436 }
13437
13438 __must_check long strnlen_user(const char __user *str, long n);
13439 __must_check long __strnlen_user(const char __user *str, long n);
13440 __must_check long strlen_user(const char __user *str);
13441 -__must_check unsigned long clear_user(void __user *mem, unsigned long len);
13442 -__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
13443 +__must_check unsigned long clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13444 +__must_check unsigned long __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13445
13446 static __must_check __always_inline int
13447 -__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
13448 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
13449 {
13450 - return copy_user_generic(dst, (__force const void *)src, size);
13451 + if (size > INT_MAX)
13452 + return size;
13453 +
13454 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13455 + if (!__access_ok(VERIFY_READ, src, size))
13456 + return size;
13457 +
13458 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13459 + src += PAX_USER_SHADOW_BASE;
13460 +#endif
13461 +
13462 + return copy_user_generic(dst, (__force_kernel const void *)src, size);
13463 }
13464
13465 -static __must_check __always_inline int
13466 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
13467 +static __must_check __always_inline unsigned long
13468 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
13469 {
13470 - return copy_user_generic((__force void *)dst, src, size);
13471 + if (size > INT_MAX)
13472 + return size;
13473 +
13474 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13475 + if (!__access_ok(VERIFY_WRITE, dst, size))
13476 + return size;
13477 +
13478 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13479 + dst += PAX_USER_SHADOW_BASE;
13480 +#endif
13481 +
13482 + return copy_user_generic((__force_kernel void *)dst, src, size);
13483 }
13484
13485 -extern long __copy_user_nocache(void *dst, const void __user *src,
13486 - unsigned size, int zerorest);
13487 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
13488 + unsigned long size, int zerorest) __size_overflow(3);
13489
13490 -static inline int
13491 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
13492 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
13493 {
13494 might_sleep();
13495 +
13496 + if (size > INT_MAX)
13497 + return size;
13498 +
13499 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13500 + if (!__access_ok(VERIFY_READ, src, size))
13501 + return size;
13502 +#endif
13503 +
13504 return __copy_user_nocache(dst, src, size, 1);
13505 }
13506
13507 -static inline int
13508 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13509 - unsigned size)
13510 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
13511 + unsigned long size)
13512 {
13513 + if (size > INT_MAX)
13514 + return size;
13515 +
13516 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13517 + if (!__access_ok(VERIFY_READ, src, size))
13518 + return size;
13519 +#endif
13520 +
13521 return __copy_user_nocache(dst, src, size, 0);
13522 }
13523
13524 -unsigned long
13525 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
13526 +extern unsigned long
13527 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
13528
13529 #endif /* _ASM_X86_UACCESS_64_H */
13530 diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
13531 index bb05228..d763d5b 100644
13532 --- a/arch/x86/include/asm/vdso.h
13533 +++ b/arch/x86/include/asm/vdso.h
13534 @@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
13535 #define VDSO32_SYMBOL(base, name) \
13536 ({ \
13537 extern const char VDSO32_##name[]; \
13538 - (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13539 + (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13540 })
13541 #endif
13542
13543 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
13544 index 764b66a..ad3cfc8 100644
13545 --- a/arch/x86/include/asm/x86_init.h
13546 +++ b/arch/x86/include/asm/x86_init.h
13547 @@ -29,7 +29,7 @@ struct x86_init_mpparse {
13548 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
13549 void (*find_smp_config)(void);
13550 void (*get_smp_config)(unsigned int early);
13551 -};
13552 +} __no_const;
13553
13554 /**
13555 * struct x86_init_resources - platform specific resource related ops
13556 @@ -43,7 +43,7 @@ struct x86_init_resources {
13557 void (*probe_roms)(void);
13558 void (*reserve_resources)(void);
13559 char *(*memory_setup)(void);
13560 -};
13561 +} __no_const;
13562
13563 /**
13564 * struct x86_init_irqs - platform specific interrupt setup
13565 @@ -56,7 +56,7 @@ struct x86_init_irqs {
13566 void (*pre_vector_init)(void);
13567 void (*intr_init)(void);
13568 void (*trap_init)(void);
13569 -};
13570 +} __no_const;
13571
13572 /**
13573 * struct x86_init_oem - oem platform specific customizing functions
13574 @@ -66,7 +66,7 @@ struct x86_init_irqs {
13575 struct x86_init_oem {
13576 void (*arch_setup)(void);
13577 void (*banner)(void);
13578 -};
13579 +} __no_const;
13580
13581 /**
13582 * struct x86_init_mapping - platform specific initial kernel pagetable setup
13583 @@ -77,7 +77,7 @@ struct x86_init_oem {
13584 */
13585 struct x86_init_mapping {
13586 void (*pagetable_reserve)(u64 start, u64 end);
13587 -};
13588 +} __no_const;
13589
13590 /**
13591 * struct x86_init_paging - platform specific paging functions
13592 @@ -87,7 +87,7 @@ struct x86_init_mapping {
13593 struct x86_init_paging {
13594 void (*pagetable_setup_start)(pgd_t *base);
13595 void (*pagetable_setup_done)(pgd_t *base);
13596 -};
13597 +} __no_const;
13598
13599 /**
13600 * struct x86_init_timers - platform specific timer setup
13601 @@ -102,7 +102,7 @@ struct x86_init_timers {
13602 void (*tsc_pre_init)(void);
13603 void (*timer_init)(void);
13604 void (*wallclock_init)(void);
13605 -};
13606 +} __no_const;
13607
13608 /**
13609 * struct x86_init_iommu - platform specific iommu setup
13610 @@ -110,7 +110,7 @@ struct x86_init_timers {
13611 */
13612 struct x86_init_iommu {
13613 int (*iommu_init)(void);
13614 -};
13615 +} __no_const;
13616
13617 /**
13618 * struct x86_init_pci - platform specific pci init functions
13619 @@ -124,7 +124,7 @@ struct x86_init_pci {
13620 int (*init)(void);
13621 void (*init_irq)(void);
13622 void (*fixup_irqs)(void);
13623 -};
13624 +} __no_const;
13625
13626 /**
13627 * struct x86_init_ops - functions for platform specific setup
13628 @@ -140,7 +140,7 @@ struct x86_init_ops {
13629 struct x86_init_timers timers;
13630 struct x86_init_iommu iommu;
13631 struct x86_init_pci pci;
13632 -};
13633 +} __no_const;
13634
13635 /**
13636 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
13637 @@ -151,7 +151,7 @@ struct x86_cpuinit_ops {
13638 void (*setup_percpu_clockev)(void);
13639 void (*early_percpu_clock_init)(void);
13640 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
13641 -};
13642 +} __no_const;
13643
13644 /**
13645 * struct x86_platform_ops - platform specific runtime functions
13646 @@ -177,7 +177,7 @@ struct x86_platform_ops {
13647 int (*i8042_detect)(void);
13648 void (*save_sched_clock_state)(void);
13649 void (*restore_sched_clock_state)(void);
13650 -};
13651 +} __no_const;
13652
13653 struct pci_dev;
13654
13655 @@ -186,7 +186,7 @@ struct x86_msi_ops {
13656 void (*teardown_msi_irq)(unsigned int irq);
13657 void (*teardown_msi_irqs)(struct pci_dev *dev);
13658 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
13659 -};
13660 +} __no_const;
13661
13662 extern struct x86_init_ops x86_init;
13663 extern struct x86_cpuinit_ops x86_cpuinit;
13664 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
13665 index c6ce245..ffbdab7 100644
13666 --- a/arch/x86/include/asm/xsave.h
13667 +++ b/arch/x86/include/asm/xsave.h
13668 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13669 {
13670 int err;
13671
13672 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13673 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
13674 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
13675 +#endif
13676 +
13677 /*
13678 * Clear the xsave header first, so that reserved fields are
13679 * initialized to zero.
13680 @@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13681 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
13682 {
13683 int err;
13684 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
13685 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
13686 u32 lmask = mask;
13687 u32 hmask = mask >> 32;
13688
13689 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13690 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13691 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13692 +#endif
13693 +
13694 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13695 "2:\n"
13696 ".section .fixup,\"ax\"\n"
13697 diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
13698 index 6a564ac..9b1340c 100644
13699 --- a/arch/x86/kernel/acpi/realmode/Makefile
13700 +++ b/arch/x86/kernel/acpi/realmode/Makefile
13701 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
13702 $(call cc-option, -fno-stack-protector) \
13703 $(call cc-option, -mpreferred-stack-boundary=2)
13704 KBUILD_CFLAGS += $(call cc-option, -m32)
13705 +ifdef CONSTIFY_PLUGIN
13706 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
13707 +endif
13708 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13709 GCOV_PROFILE := n
13710
13711 diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
13712 index b4fd836..4358fe3 100644
13713 --- a/arch/x86/kernel/acpi/realmode/wakeup.S
13714 +++ b/arch/x86/kernel/acpi/realmode/wakeup.S
13715 @@ -108,6 +108,9 @@ wakeup_code:
13716 /* Do any other stuff... */
13717
13718 #ifndef CONFIG_64BIT
13719 + /* Recheck NX bit overrides (64bit path does this in trampoline */
13720 + call verify_cpu
13721 +
13722 /* This could also be done in C code... */
13723 movl pmode_cr3, %eax
13724 movl %eax, %cr3
13725 @@ -131,6 +134,7 @@ wakeup_code:
13726 movl pmode_cr0, %eax
13727 movl %eax, %cr0
13728 jmp pmode_return
13729 +# include "../../verify_cpu.S"
13730 #else
13731 pushw $0
13732 pushw trampoline_segment
13733 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13734 index 146a49c..1b5338b 100644
13735 --- a/arch/x86/kernel/acpi/sleep.c
13736 +++ b/arch/x86/kernel/acpi/sleep.c
13737 @@ -98,8 +98,12 @@ int acpi_suspend_lowlevel(void)
13738 header->trampoline_segment = trampoline_address() >> 4;
13739 #ifdef CONFIG_SMP
13740 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
13741 +
13742 + pax_open_kernel();
13743 early_gdt_descr.address =
13744 (unsigned long)get_cpu_gdt_table(smp_processor_id());
13745 + pax_close_kernel();
13746 +
13747 initial_gs = per_cpu_offset(smp_processor_id());
13748 #endif
13749 initial_code = (unsigned long)wakeup_long64;
13750 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13751 index 7261083..5c12053 100644
13752 --- a/arch/x86/kernel/acpi/wakeup_32.S
13753 +++ b/arch/x86/kernel/acpi/wakeup_32.S
13754 @@ -30,13 +30,11 @@ wakeup_pmode_return:
13755 # and restore the stack ... but you need gdt for this to work
13756 movl saved_context_esp, %esp
13757
13758 - movl %cs:saved_magic, %eax
13759 - cmpl $0x12345678, %eax
13760 + cmpl $0x12345678, saved_magic
13761 jne bogus_magic
13762
13763 # jump to place where we left off
13764 - movl saved_eip, %eax
13765 - jmp *%eax
13766 + jmp *(saved_eip)
13767
13768 bogus_magic:
13769 jmp bogus_magic
13770 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
13771 index 1f84794..e23f862 100644
13772 --- a/arch/x86/kernel/alternative.c
13773 +++ b/arch/x86/kernel/alternative.c
13774 @@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
13775 */
13776 for (a = start; a < end; a++) {
13777 instr = (u8 *)&a->instr_offset + a->instr_offset;
13778 +
13779 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13780 + instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13781 + if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
13782 + instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13783 +#endif
13784 +
13785 replacement = (u8 *)&a->repl_offset + a->repl_offset;
13786 BUG_ON(a->replacementlen > a->instrlen);
13787 BUG_ON(a->instrlen > sizeof(insnbuf));
13788 @@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
13789 for (poff = start; poff < end; poff++) {
13790 u8 *ptr = (u8 *)poff + *poff;
13791
13792 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13793 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13794 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13795 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13796 +#endif
13797 +
13798 if (!*poff || ptr < text || ptr >= text_end)
13799 continue;
13800 /* turn DS segment override prefix into lock prefix */
13801 - if (*ptr == 0x3e)
13802 + if (*ktla_ktva(ptr) == 0x3e)
13803 text_poke(ptr, ((unsigned char []){0xf0}), 1);
13804 };
13805 mutex_unlock(&text_mutex);
13806 @@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
13807 for (poff = start; poff < end; poff++) {
13808 u8 *ptr = (u8 *)poff + *poff;
13809
13810 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13811 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13812 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13813 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13814 +#endif
13815 +
13816 if (!*poff || ptr < text || ptr >= text_end)
13817 continue;
13818 /* turn lock prefix into DS segment override prefix */
13819 - if (*ptr == 0xf0)
13820 + if (*ktla_ktva(ptr) == 0xf0)
13821 text_poke(ptr, ((unsigned char []){0x3E}), 1);
13822 };
13823 mutex_unlock(&text_mutex);
13824 @@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
13825
13826 BUG_ON(p->len > MAX_PATCH_LEN);
13827 /* prep the buffer with the original instructions */
13828 - memcpy(insnbuf, p->instr, p->len);
13829 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13830 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13831 (unsigned long)p->instr, p->len);
13832
13833 @@ -568,7 +587,7 @@ void __init alternative_instructions(void)
13834 if (smp_alt_once)
13835 free_init_pages("SMP alternatives",
13836 (unsigned long)__smp_locks,
13837 - (unsigned long)__smp_locks_end);
13838 + PAGE_ALIGN((unsigned long)__smp_locks_end));
13839
13840 restart_nmi();
13841 }
13842 @@ -585,13 +604,17 @@ void __init alternative_instructions(void)
13843 * instructions. And on the local CPU you need to be protected again NMI or MCE
13844 * handlers seeing an inconsistent instruction while you patch.
13845 */
13846 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
13847 +void *__kprobes text_poke_early(void *addr, const void *opcode,
13848 size_t len)
13849 {
13850 unsigned long flags;
13851 local_irq_save(flags);
13852 - memcpy(addr, opcode, len);
13853 +
13854 + pax_open_kernel();
13855 + memcpy(ktla_ktva(addr), opcode, len);
13856 sync_core();
13857 + pax_close_kernel();
13858 +
13859 local_irq_restore(flags);
13860 /* Could also do a CLFLUSH here to speed up CPU recovery; but
13861 that causes hangs on some VIA CPUs. */
13862 @@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
13863 */
13864 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13865 {
13866 - unsigned long flags;
13867 - char *vaddr;
13868 + unsigned char *vaddr = ktla_ktva(addr);
13869 struct page *pages[2];
13870 - int i;
13871 + size_t i;
13872
13873 if (!core_kernel_text((unsigned long)addr)) {
13874 - pages[0] = vmalloc_to_page(addr);
13875 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
13876 + pages[0] = vmalloc_to_page(vaddr);
13877 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13878 } else {
13879 - pages[0] = virt_to_page(addr);
13880 + pages[0] = virt_to_page(vaddr);
13881 WARN_ON(!PageReserved(pages[0]));
13882 - pages[1] = virt_to_page(addr + PAGE_SIZE);
13883 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13884 }
13885 BUG_ON(!pages[0]);
13886 - local_irq_save(flags);
13887 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13888 - if (pages[1])
13889 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13890 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13891 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13892 - clear_fixmap(FIX_TEXT_POKE0);
13893 - if (pages[1])
13894 - clear_fixmap(FIX_TEXT_POKE1);
13895 - local_flush_tlb();
13896 - sync_core();
13897 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
13898 - that causes hangs on some VIA CPUs. */
13899 + text_poke_early(addr, opcode, len);
13900 for (i = 0; i < len; i++)
13901 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13902 - local_irq_restore(flags);
13903 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
13904 return addr;
13905 }
13906
13907 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
13908 index edc2448..553e7c5 100644
13909 --- a/arch/x86/kernel/apic/apic.c
13910 +++ b/arch/x86/kernel/apic/apic.c
13911 @@ -184,7 +184,7 @@ int first_system_vector = 0xfe;
13912 /*
13913 * Debug level, exported for io_apic.c
13914 */
13915 -unsigned int apic_verbosity;
13916 +int apic_verbosity;
13917
13918 int pic_mode;
13919
13920 @@ -1917,7 +1917,7 @@ void smp_error_interrupt(struct pt_regs *regs)
13921 apic_write(APIC_ESR, 0);
13922 v1 = apic_read(APIC_ESR);
13923 ack_APIC_irq();
13924 - atomic_inc(&irq_err_count);
13925 + atomic_inc_unchecked(&irq_err_count);
13926
13927 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
13928 smp_processor_id(), v0 , v1);
13929 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
13930 index e88300d..cd5a87a 100644
13931 --- a/arch/x86/kernel/apic/io_apic.c
13932 +++ b/arch/x86/kernel/apic/io_apic.c
13933 @@ -83,7 +83,9 @@ static struct io_apic_ops io_apic_ops = {
13934
13935 void __init set_io_apic_ops(const struct io_apic_ops *ops)
13936 {
13937 - io_apic_ops = *ops;
13938 + pax_open_kernel();
13939 + memcpy((void*)&io_apic_ops, ops, sizeof io_apic_ops);
13940 + pax_close_kernel();
13941 }
13942
13943 /*
13944 @@ -1135,7 +1137,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
13945 }
13946 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
13947
13948 -void lock_vector_lock(void)
13949 +void lock_vector_lock(void) __acquires(vector_lock)
13950 {
13951 /* Used to the online set of cpus does not change
13952 * during assign_irq_vector.
13953 @@ -1143,7 +1145,7 @@ void lock_vector_lock(void)
13954 raw_spin_lock(&vector_lock);
13955 }
13956
13957 -void unlock_vector_lock(void)
13958 +void unlock_vector_lock(void) __releases(vector_lock)
13959 {
13960 raw_spin_unlock(&vector_lock);
13961 }
13962 @@ -2549,7 +2551,7 @@ static void ack_apic_edge(struct irq_data *data)
13963 ack_APIC_irq();
13964 }
13965
13966 -atomic_t irq_mis_count;
13967 +atomic_unchecked_t irq_mis_count;
13968
13969 #ifdef CONFIG_GENERIC_PENDING_IRQ
13970 static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
13971 @@ -2667,7 +2669,7 @@ static void ack_apic_level(struct irq_data *data)
13972 * at the cpu.
13973 */
13974 if (!(v & (1 << (i & 0x1f)))) {
13975 - atomic_inc(&irq_mis_count);
13976 + atomic_inc_unchecked(&irq_mis_count);
13977
13978 eoi_ioapic_irq(irq, cfg);
13979 }
13980 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
13981 index 459e78c..f037006 100644
13982 --- a/arch/x86/kernel/apm_32.c
13983 +++ b/arch/x86/kernel/apm_32.c
13984 @@ -410,7 +410,7 @@ static DEFINE_MUTEX(apm_mutex);
13985 * This is for buggy BIOS's that refer to (real mode) segment 0x40
13986 * even though they are called in protected mode.
13987 */
13988 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
13989 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
13990 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
13991
13992 static const char driver_version[] = "1.16ac"; /* no spaces */
13993 @@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
13994 BUG_ON(cpu != 0);
13995 gdt = get_cpu_gdt_table(cpu);
13996 save_desc_40 = gdt[0x40 / 8];
13997 +
13998 + pax_open_kernel();
13999 gdt[0x40 / 8] = bad_bios_desc;
14000 + pax_close_kernel();
14001
14002 apm_irq_save(flags);
14003 APM_DO_SAVE_SEGS;
14004 @@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
14005 &call->esi);
14006 APM_DO_RESTORE_SEGS;
14007 apm_irq_restore(flags);
14008 +
14009 + pax_open_kernel();
14010 gdt[0x40 / 8] = save_desc_40;
14011 + pax_close_kernel();
14012 +
14013 put_cpu();
14014
14015 return call->eax & 0xff;
14016 @@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void *_call)
14017 BUG_ON(cpu != 0);
14018 gdt = get_cpu_gdt_table(cpu);
14019 save_desc_40 = gdt[0x40 / 8];
14020 +
14021 + pax_open_kernel();
14022 gdt[0x40 / 8] = bad_bios_desc;
14023 + pax_close_kernel();
14024
14025 apm_irq_save(flags);
14026 APM_DO_SAVE_SEGS;
14027 @@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void *_call)
14028 &call->eax);
14029 APM_DO_RESTORE_SEGS;
14030 apm_irq_restore(flags);
14031 +
14032 + pax_open_kernel();
14033 gdt[0x40 / 8] = save_desc_40;
14034 + pax_close_kernel();
14035 +
14036 put_cpu();
14037 return error;
14038 }
14039 @@ -2345,12 +2359,15 @@ static int __init apm_init(void)
14040 * code to that CPU.
14041 */
14042 gdt = get_cpu_gdt_table(0);
14043 +
14044 + pax_open_kernel();
14045 set_desc_base(&gdt[APM_CS >> 3],
14046 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
14047 set_desc_base(&gdt[APM_CS_16 >> 3],
14048 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
14049 set_desc_base(&gdt[APM_DS >> 3],
14050 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
14051 + pax_close_kernel();
14052
14053 proc_create("apm", 0, NULL, &apm_file_ops);
14054
14055 diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
14056 index 68de2dc..1f3c720 100644
14057 --- a/arch/x86/kernel/asm-offsets.c
14058 +++ b/arch/x86/kernel/asm-offsets.c
14059 @@ -33,6 +33,8 @@ void common(void) {
14060 OFFSET(TI_status, thread_info, status);
14061 OFFSET(TI_addr_limit, thread_info, addr_limit);
14062 OFFSET(TI_preempt_count, thread_info, preempt_count);
14063 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
14064 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
14065
14066 BLANK();
14067 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
14068 @@ -53,8 +55,26 @@ void common(void) {
14069 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
14070 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
14071 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
14072 +
14073 +#ifdef CONFIG_PAX_KERNEXEC
14074 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
14075 #endif
14076
14077 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14078 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
14079 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
14080 +#ifdef CONFIG_X86_64
14081 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
14082 +#endif
14083 +#endif
14084 +
14085 +#endif
14086 +
14087 + BLANK();
14088 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
14089 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
14090 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
14091 +
14092 #ifdef CONFIG_XEN
14093 BLANK();
14094 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
14095 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
14096 index 1b4754f..fbb4227 100644
14097 --- a/arch/x86/kernel/asm-offsets_64.c
14098 +++ b/arch/x86/kernel/asm-offsets_64.c
14099 @@ -76,6 +76,7 @@ int main(void)
14100 BLANK();
14101 #undef ENTRY
14102
14103 + DEFINE(TSS_size, sizeof(struct tss_struct));
14104 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
14105 BLANK();
14106
14107 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
14108 index 6ab6aa2..8f71507 100644
14109 --- a/arch/x86/kernel/cpu/Makefile
14110 +++ b/arch/x86/kernel/cpu/Makefile
14111 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
14112 CFLAGS_REMOVE_perf_event.o = -pg
14113 endif
14114
14115 -# Make sure load_percpu_segment has no stackprotector
14116 -nostackp := $(call cc-option, -fno-stack-protector)
14117 -CFLAGS_common.o := $(nostackp)
14118 -
14119 obj-y := intel_cacheinfo.o scattered.o topology.o
14120 obj-y += proc.o capflags.o powerflags.o common.o
14121 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
14122 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
14123 index 146bb62..ac9c74a 100644
14124 --- a/arch/x86/kernel/cpu/amd.c
14125 +++ b/arch/x86/kernel/cpu/amd.c
14126 @@ -691,7 +691,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
14127 unsigned int size)
14128 {
14129 /* AMD errata T13 (order #21922) */
14130 - if ((c->x86 == 6)) {
14131 + if (c->x86 == 6) {
14132 /* Duron Rev A0 */
14133 if (c->x86_model == 3 && c->x86_mask == 0)
14134 size = 64;
14135 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
14136 index cf79302..b1b28ae 100644
14137 --- a/arch/x86/kernel/cpu/common.c
14138 +++ b/arch/x86/kernel/cpu/common.c
14139 @@ -86,60 +86,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
14140
14141 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
14142
14143 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
14144 -#ifdef CONFIG_X86_64
14145 - /*
14146 - * We need valid kernel segments for data and code in long mode too
14147 - * IRET will check the segment types kkeil 2000/10/28
14148 - * Also sysret mandates a special GDT layout
14149 - *
14150 - * TLS descriptors are currently at a different place compared to i386.
14151 - * Hopefully nobody expects them at a fixed place (Wine?)
14152 - */
14153 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
14154 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
14155 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
14156 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
14157 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
14158 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
14159 -#else
14160 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
14161 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14162 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
14163 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
14164 - /*
14165 - * Segments used for calling PnP BIOS have byte granularity.
14166 - * They code segments and data segments have fixed 64k limits,
14167 - * the transfer segment sizes are set at run time.
14168 - */
14169 - /* 32-bit code */
14170 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14171 - /* 16-bit code */
14172 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14173 - /* 16-bit data */
14174 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
14175 - /* 16-bit data */
14176 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
14177 - /* 16-bit data */
14178 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
14179 - /*
14180 - * The APM segments have byte granularity and their bases
14181 - * are set at run time. All have 64k limits.
14182 - */
14183 - /* 32-bit code */
14184 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
14185 - /* 16-bit code */
14186 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
14187 - /* data */
14188 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
14189 -
14190 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14191 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14192 - GDT_STACK_CANARY_INIT
14193 -#endif
14194 -} };
14195 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
14196 -
14197 static int __init x86_xsave_setup(char *s)
14198 {
14199 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
14200 @@ -374,7 +320,7 @@ void switch_to_new_gdt(int cpu)
14201 {
14202 struct desc_ptr gdt_descr;
14203
14204 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
14205 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14206 gdt_descr.size = GDT_SIZE - 1;
14207 load_gdt(&gdt_descr);
14208 /* Reload the per-cpu base */
14209 @@ -841,6 +787,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
14210 /* Filter out anything that depends on CPUID levels we don't have */
14211 filter_cpuid_features(c, true);
14212
14213 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14214 + setup_clear_cpu_cap(X86_FEATURE_SEP);
14215 +#endif
14216 +
14217 /* If the model name is still unset, do table lookup. */
14218 if (!c->x86_model_id[0]) {
14219 const char *p;
14220 @@ -1021,10 +971,12 @@ static __init int setup_disablecpuid(char *arg)
14221 }
14222 __setup("clearcpuid=", setup_disablecpuid);
14223
14224 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
14225 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
14226 +
14227 #ifdef CONFIG_X86_64
14228 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
14229 -struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
14230 - (unsigned long) nmi_idt_table };
14231 +struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
14232
14233 DEFINE_PER_CPU_FIRST(union irq_stack_union,
14234 irq_stack_union) __aligned(PAGE_SIZE);
14235 @@ -1038,7 +990,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
14236 EXPORT_PER_CPU_SYMBOL(current_task);
14237
14238 DEFINE_PER_CPU(unsigned long, kernel_stack) =
14239 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
14240 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
14241 EXPORT_PER_CPU_SYMBOL(kernel_stack);
14242
14243 DEFINE_PER_CPU(char *, irq_stack_ptr) =
14244 @@ -1126,7 +1078,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
14245 {
14246 memset(regs, 0, sizeof(struct pt_regs));
14247 regs->fs = __KERNEL_PERCPU;
14248 - regs->gs = __KERNEL_STACK_CANARY;
14249 + savesegment(gs, regs->gs);
14250
14251 return regs;
14252 }
14253 @@ -1181,7 +1133,7 @@ void __cpuinit cpu_init(void)
14254 int i;
14255
14256 cpu = stack_smp_processor_id();
14257 - t = &per_cpu(init_tss, cpu);
14258 + t = init_tss + cpu;
14259 oist = &per_cpu(orig_ist, cpu);
14260
14261 #ifdef CONFIG_NUMA
14262 @@ -1207,7 +1159,7 @@ void __cpuinit cpu_init(void)
14263 switch_to_new_gdt(cpu);
14264 loadsegment(fs, 0);
14265
14266 - load_idt((const struct desc_ptr *)&idt_descr);
14267 + load_idt(&idt_descr);
14268
14269 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
14270 syscall_init();
14271 @@ -1216,7 +1168,6 @@ void __cpuinit cpu_init(void)
14272 wrmsrl(MSR_KERNEL_GS_BASE, 0);
14273 barrier();
14274
14275 - x86_configure_nx();
14276 if (cpu != 0)
14277 enable_x2apic();
14278
14279 @@ -1272,7 +1223,7 @@ void __cpuinit cpu_init(void)
14280 {
14281 int cpu = smp_processor_id();
14282 struct task_struct *curr = current;
14283 - struct tss_struct *t = &per_cpu(init_tss, cpu);
14284 + struct tss_struct *t = init_tss + cpu;
14285 struct thread_struct *thread = &curr->thread;
14286
14287 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
14288 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
14289 index 3e6ff6c..54b4992 100644
14290 --- a/arch/x86/kernel/cpu/intel.c
14291 +++ b/arch/x86/kernel/cpu/intel.c
14292 @@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
14293 * Update the IDT descriptor and reload the IDT so that
14294 * it uses the read-only mapped virtual address.
14295 */
14296 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
14297 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
14298 load_idt(&idt_descr);
14299 }
14300 #endif
14301 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
14302 index 61604ae..98250a5 100644
14303 --- a/arch/x86/kernel/cpu/mcheck/mce.c
14304 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
14305 @@ -42,6 +42,7 @@
14306 #include <asm/processor.h>
14307 #include <asm/mce.h>
14308 #include <asm/msr.h>
14309 +#include <asm/local.h>
14310
14311 #include "mce-internal.h"
14312
14313 @@ -250,7 +251,7 @@ static void print_mce(struct mce *m)
14314 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
14315 m->cs, m->ip);
14316
14317 - if (m->cs == __KERNEL_CS)
14318 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
14319 print_symbol("{%s}", m->ip);
14320 pr_cont("\n");
14321 }
14322 @@ -283,10 +284,10 @@ static void print_mce(struct mce *m)
14323
14324 #define PANIC_TIMEOUT 5 /* 5 seconds */
14325
14326 -static atomic_t mce_paniced;
14327 +static atomic_unchecked_t mce_paniced;
14328
14329 static int fake_panic;
14330 -static atomic_t mce_fake_paniced;
14331 +static atomic_unchecked_t mce_fake_paniced;
14332
14333 /* Panic in progress. Enable interrupts and wait for final IPI */
14334 static void wait_for_panic(void)
14335 @@ -310,7 +311,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14336 /*
14337 * Make sure only one CPU runs in machine check panic
14338 */
14339 - if (atomic_inc_return(&mce_paniced) > 1)
14340 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
14341 wait_for_panic();
14342 barrier();
14343
14344 @@ -318,7 +319,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
14345 console_verbose();
14346 } else {
14347 /* Don't log too much for fake panic */
14348 - if (atomic_inc_return(&mce_fake_paniced) > 1)
14349 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
14350 return;
14351 }
14352 /* First print corrected ones that are still unlogged */
14353 @@ -684,7 +685,7 @@ static int mce_timed_out(u64 *t)
14354 * might have been modified by someone else.
14355 */
14356 rmb();
14357 - if (atomic_read(&mce_paniced))
14358 + if (atomic_read_unchecked(&mce_paniced))
14359 wait_for_panic();
14360 if (!monarch_timeout)
14361 goto out;
14362 @@ -1535,7 +1536,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
14363 }
14364
14365 /* Call the installed machine check handler for this CPU setup. */
14366 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
14367 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
14368 unexpected_machine_check;
14369
14370 /*
14371 @@ -1558,7 +1559,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
14372 return;
14373 }
14374
14375 + pax_open_kernel();
14376 machine_check_vector = do_machine_check;
14377 + pax_close_kernel();
14378
14379 __mcheck_cpu_init_generic();
14380 __mcheck_cpu_init_vendor(c);
14381 @@ -1572,7 +1575,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
14382 */
14383
14384 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
14385 -static int mce_chrdev_open_count; /* #times opened */
14386 +static local_t mce_chrdev_open_count; /* #times opened */
14387 static int mce_chrdev_open_exclu; /* already open exclusive? */
14388
14389 static int mce_chrdev_open(struct inode *inode, struct file *file)
14390 @@ -1580,7 +1583,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
14391 spin_lock(&mce_chrdev_state_lock);
14392
14393 if (mce_chrdev_open_exclu ||
14394 - (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
14395 + (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
14396 spin_unlock(&mce_chrdev_state_lock);
14397
14398 return -EBUSY;
14399 @@ -1588,7 +1591,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
14400
14401 if (file->f_flags & O_EXCL)
14402 mce_chrdev_open_exclu = 1;
14403 - mce_chrdev_open_count++;
14404 + local_inc(&mce_chrdev_open_count);
14405
14406 spin_unlock(&mce_chrdev_state_lock);
14407
14408 @@ -1599,7 +1602,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
14409 {
14410 spin_lock(&mce_chrdev_state_lock);
14411
14412 - mce_chrdev_open_count--;
14413 + local_dec(&mce_chrdev_open_count);
14414 mce_chrdev_open_exclu = 0;
14415
14416 spin_unlock(&mce_chrdev_state_lock);
14417 @@ -2324,7 +2327,7 @@ struct dentry *mce_get_debugfs_dir(void)
14418 static void mce_reset(void)
14419 {
14420 cpu_missing = 0;
14421 - atomic_set(&mce_fake_paniced, 0);
14422 + atomic_set_unchecked(&mce_fake_paniced, 0);
14423 atomic_set(&mce_executing, 0);
14424 atomic_set(&mce_callin, 0);
14425 atomic_set(&global_nwo, 0);
14426 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
14427 index 2d5454c..51987eb 100644
14428 --- a/arch/x86/kernel/cpu/mcheck/p5.c
14429 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
14430 @@ -11,6 +11,7 @@
14431 #include <asm/processor.h>
14432 #include <asm/mce.h>
14433 #include <asm/msr.h>
14434 +#include <asm/pgtable.h>
14435
14436 /* By default disabled */
14437 int mce_p5_enabled __read_mostly;
14438 @@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
14439 if (!cpu_has(c, X86_FEATURE_MCE))
14440 return;
14441
14442 + pax_open_kernel();
14443 machine_check_vector = pentium_machine_check;
14444 + pax_close_kernel();
14445 /* Make sure the vector pointer is visible before we enable MCEs: */
14446 wmb();
14447
14448 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
14449 index 2d7998f..17c9de1 100644
14450 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
14451 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
14452 @@ -10,6 +10,7 @@
14453 #include <asm/processor.h>
14454 #include <asm/mce.h>
14455 #include <asm/msr.h>
14456 +#include <asm/pgtable.h>
14457
14458 /* Machine check handler for WinChip C6: */
14459 static void winchip_machine_check(struct pt_regs *regs, long error_code)
14460 @@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
14461 {
14462 u32 lo, hi;
14463
14464 + pax_open_kernel();
14465 machine_check_vector = winchip_machine_check;
14466 + pax_close_kernel();
14467 /* Make sure the vector pointer is visible before we enable MCEs: */
14468 wmb();
14469
14470 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
14471 index 6b96110..0da73eb 100644
14472 --- a/arch/x86/kernel/cpu/mtrr/main.c
14473 +++ b/arch/x86/kernel/cpu/mtrr/main.c
14474 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
14475 u64 size_or_mask, size_and_mask;
14476 static bool mtrr_aps_delayed_init;
14477
14478 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
14479 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
14480
14481 const struct mtrr_ops *mtrr_if;
14482
14483 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
14484 index df5e41f..816c719 100644
14485 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
14486 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
14487 @@ -25,7 +25,7 @@ struct mtrr_ops {
14488 int (*validate_add_page)(unsigned long base, unsigned long size,
14489 unsigned int type);
14490 int (*have_wrcomb)(void);
14491 -};
14492 +} __do_const;
14493
14494 extern int generic_get_free_region(unsigned long base, unsigned long size,
14495 int replace_reg);
14496 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
14497 index bb8e034..fb9020b 100644
14498 --- a/arch/x86/kernel/cpu/perf_event.c
14499 +++ b/arch/x86/kernel/cpu/perf_event.c
14500 @@ -1835,7 +1835,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
14501 break;
14502
14503 perf_callchain_store(entry, frame.return_address);
14504 - fp = frame.next_frame;
14505 + fp = (const void __force_user *)frame.next_frame;
14506 }
14507 }
14508
14509 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
14510 index 13ad899..f642b9a 100644
14511 --- a/arch/x86/kernel/crash.c
14512 +++ b/arch/x86/kernel/crash.c
14513 @@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
14514 {
14515 #ifdef CONFIG_X86_32
14516 struct pt_regs fixed_regs;
14517 -#endif
14518
14519 -#ifdef CONFIG_X86_32
14520 - if (!user_mode_vm(regs)) {
14521 + if (!user_mode(regs)) {
14522 crash_fixup_ss_esp(&fixed_regs, regs);
14523 regs = &fixed_regs;
14524 }
14525 diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
14526 index 37250fe..bf2ec74 100644
14527 --- a/arch/x86/kernel/doublefault_32.c
14528 +++ b/arch/x86/kernel/doublefault_32.c
14529 @@ -11,7 +11,7 @@
14530
14531 #define DOUBLEFAULT_STACKSIZE (1024)
14532 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
14533 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
14534 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
14535
14536 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
14537
14538 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
14539 unsigned long gdt, tss;
14540
14541 store_gdt(&gdt_desc);
14542 - gdt = gdt_desc.address;
14543 + gdt = (unsigned long)gdt_desc.address;
14544
14545 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
14546
14547 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
14548 /* 0x2 bit is always set */
14549 .flags = X86_EFLAGS_SF | 0x2,
14550 .sp = STACK_START,
14551 - .es = __USER_DS,
14552 + .es = __KERNEL_DS,
14553 .cs = __KERNEL_CS,
14554 .ss = __KERNEL_DS,
14555 - .ds = __USER_DS,
14556 + .ds = __KERNEL_DS,
14557 .fs = __KERNEL_PERCPU,
14558
14559 .__cr3 = __pa_nodebug(swapper_pg_dir),
14560 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
14561 index 1b81839..0b4e7b0 100644
14562 --- a/arch/x86/kernel/dumpstack.c
14563 +++ b/arch/x86/kernel/dumpstack.c
14564 @@ -2,6 +2,9 @@
14565 * Copyright (C) 1991, 1992 Linus Torvalds
14566 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
14567 */
14568 +#ifdef CONFIG_GRKERNSEC_HIDESYM
14569 +#define __INCLUDED_BY_HIDESYM 1
14570 +#endif
14571 #include <linux/kallsyms.h>
14572 #include <linux/kprobes.h>
14573 #include <linux/uaccess.h>
14574 @@ -35,16 +38,14 @@ void printk_address(unsigned long address, int reliable)
14575 static void
14576 print_ftrace_graph_addr(unsigned long addr, void *data,
14577 const struct stacktrace_ops *ops,
14578 - struct thread_info *tinfo, int *graph)
14579 + struct task_struct *task, int *graph)
14580 {
14581 - struct task_struct *task;
14582 unsigned long ret_addr;
14583 int index;
14584
14585 if (addr != (unsigned long)return_to_handler)
14586 return;
14587
14588 - task = tinfo->task;
14589 index = task->curr_ret_stack;
14590
14591 if (!task->ret_stack || index < *graph)
14592 @@ -61,7 +62,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14593 static inline void
14594 print_ftrace_graph_addr(unsigned long addr, void *data,
14595 const struct stacktrace_ops *ops,
14596 - struct thread_info *tinfo, int *graph)
14597 + struct task_struct *task, int *graph)
14598 { }
14599 #endif
14600
14601 @@ -72,10 +73,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14602 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
14603 */
14604
14605 -static inline int valid_stack_ptr(struct thread_info *tinfo,
14606 - void *p, unsigned int size, void *end)
14607 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
14608 {
14609 - void *t = tinfo;
14610 if (end) {
14611 if (p < end && p >= (end-THREAD_SIZE))
14612 return 1;
14613 @@ -86,14 +85,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
14614 }
14615
14616 unsigned long
14617 -print_context_stack(struct thread_info *tinfo,
14618 +print_context_stack(struct task_struct *task, void *stack_start,
14619 unsigned long *stack, unsigned long bp,
14620 const struct stacktrace_ops *ops, void *data,
14621 unsigned long *end, int *graph)
14622 {
14623 struct stack_frame *frame = (struct stack_frame *)bp;
14624
14625 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
14626 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
14627 unsigned long addr;
14628
14629 addr = *stack;
14630 @@ -105,7 +104,7 @@ print_context_stack(struct thread_info *tinfo,
14631 } else {
14632 ops->address(data, addr, 0);
14633 }
14634 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14635 + print_ftrace_graph_addr(addr, data, ops, task, graph);
14636 }
14637 stack++;
14638 }
14639 @@ -114,7 +113,7 @@ print_context_stack(struct thread_info *tinfo,
14640 EXPORT_SYMBOL_GPL(print_context_stack);
14641
14642 unsigned long
14643 -print_context_stack_bp(struct thread_info *tinfo,
14644 +print_context_stack_bp(struct task_struct *task, void *stack_start,
14645 unsigned long *stack, unsigned long bp,
14646 const struct stacktrace_ops *ops, void *data,
14647 unsigned long *end, int *graph)
14648 @@ -122,7 +121,7 @@ print_context_stack_bp(struct thread_info *tinfo,
14649 struct stack_frame *frame = (struct stack_frame *)bp;
14650 unsigned long *ret_addr = &frame->return_address;
14651
14652 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
14653 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
14654 unsigned long addr = *ret_addr;
14655
14656 if (!__kernel_text_address(addr))
14657 @@ -131,7 +130,7 @@ print_context_stack_bp(struct thread_info *tinfo,
14658 ops->address(data, addr, 1);
14659 frame = frame->next_frame;
14660 ret_addr = &frame->return_address;
14661 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14662 + print_ftrace_graph_addr(addr, data, ops, task, graph);
14663 }
14664
14665 return (unsigned long)frame;
14666 @@ -189,7 +188,7 @@ void dump_stack(void)
14667
14668 bp = stack_frame(current, NULL);
14669 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
14670 - current->pid, current->comm, print_tainted(),
14671 + task_pid_nr(current), current->comm, print_tainted(),
14672 init_utsname()->release,
14673 (int)strcspn(init_utsname()->version, " "),
14674 init_utsname()->version);
14675 @@ -225,6 +224,8 @@ unsigned __kprobes long oops_begin(void)
14676 }
14677 EXPORT_SYMBOL_GPL(oops_begin);
14678
14679 +extern void gr_handle_kernel_exploit(void);
14680 +
14681 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14682 {
14683 if (regs && kexec_should_crash(current))
14684 @@ -246,7 +247,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14685 panic("Fatal exception in interrupt");
14686 if (panic_on_oops)
14687 panic("Fatal exception");
14688 - do_exit(signr);
14689 +
14690 + gr_handle_kernel_exploit();
14691 +
14692 + do_group_exit(signr);
14693 }
14694
14695 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14696 @@ -273,7 +277,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14697
14698 show_registers(regs);
14699 #ifdef CONFIG_X86_32
14700 - if (user_mode_vm(regs)) {
14701 + if (user_mode(regs)) {
14702 sp = regs->sp;
14703 ss = regs->ss & 0xffff;
14704 } else {
14705 @@ -301,7 +305,7 @@ void die(const char *str, struct pt_regs *regs, long err)
14706 unsigned long flags = oops_begin();
14707 int sig = SIGSEGV;
14708
14709 - if (!user_mode_vm(regs))
14710 + if (!user_mode(regs))
14711 report_bug(regs->ip, regs);
14712
14713 if (__die(str, regs, err))
14714 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
14715 index 88ec912..e95e935 100644
14716 --- a/arch/x86/kernel/dumpstack_32.c
14717 +++ b/arch/x86/kernel/dumpstack_32.c
14718 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14719 bp = stack_frame(task, regs);
14720
14721 for (;;) {
14722 - struct thread_info *context;
14723 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14724
14725 - context = (struct thread_info *)
14726 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14727 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
14728 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14729
14730 - stack = (unsigned long *)context->previous_esp;
14731 - if (!stack)
14732 + if (stack_start == task_stack_page(task))
14733 break;
14734 + stack = *(unsigned long **)stack_start;
14735 if (ops->stack(data, "IRQ") < 0)
14736 break;
14737 touch_nmi_watchdog();
14738 @@ -87,7 +85,7 @@ void show_registers(struct pt_regs *regs)
14739 int i;
14740
14741 print_modules();
14742 - __show_regs(regs, !user_mode_vm(regs));
14743 + __show_regs(regs, !user_mode(regs));
14744
14745 printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
14746 TASK_COMM_LEN, current->comm, task_pid_nr(current),
14747 @@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
14748 * When in-kernel, we also print out the stack and code at the
14749 * time of the fault..
14750 */
14751 - if (!user_mode_vm(regs)) {
14752 + if (!user_mode(regs)) {
14753 unsigned int code_prologue = code_bytes * 43 / 64;
14754 unsigned int code_len = code_bytes;
14755 unsigned char c;
14756 u8 *ip;
14757 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14758
14759 printk(KERN_EMERG "Stack:\n");
14760 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
14761
14762 printk(KERN_EMERG "Code: ");
14763
14764 - ip = (u8 *)regs->ip - code_prologue;
14765 + ip = (u8 *)regs->ip - code_prologue + cs_base;
14766 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14767 /* try starting at IP */
14768 - ip = (u8 *)regs->ip;
14769 + ip = (u8 *)regs->ip + cs_base;
14770 code_len = code_len - code_prologue + 1;
14771 }
14772 for (i = 0; i < code_len; i++, ip++) {
14773 @@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
14774 printk(KERN_CONT " Bad EIP value.");
14775 break;
14776 }
14777 - if (ip == (u8 *)regs->ip)
14778 + if (ip == (u8 *)regs->ip + cs_base)
14779 printk(KERN_CONT "<%02x> ", c);
14780 else
14781 printk(KERN_CONT "%02x ", c);
14782 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
14783 {
14784 unsigned short ud2;
14785
14786 + ip = ktla_ktva(ip);
14787 if (ip < PAGE_OFFSET)
14788 return 0;
14789 if (probe_kernel_address((unsigned short *)ip, ud2))
14790 @@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
14791
14792 return ud2 == 0x0b0f;
14793 }
14794 +
14795 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14796 +void pax_check_alloca(unsigned long size)
14797 +{
14798 + unsigned long sp = (unsigned long)&sp, stack_left;
14799 +
14800 + /* all kernel stacks are of the same size */
14801 + stack_left = sp & (THREAD_SIZE - 1);
14802 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14803 +}
14804 +EXPORT_SYMBOL(pax_check_alloca);
14805 +#endif
14806 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
14807 index 17107bd..9623722 100644
14808 --- a/arch/x86/kernel/dumpstack_64.c
14809 +++ b/arch/x86/kernel/dumpstack_64.c
14810 @@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14811 unsigned long *irq_stack_end =
14812 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14813 unsigned used = 0;
14814 - struct thread_info *tinfo;
14815 int graph = 0;
14816 unsigned long dummy;
14817 + void *stack_start;
14818
14819 if (!task)
14820 task = current;
14821 @@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14822 * current stack address. If the stacks consist of nested
14823 * exceptions
14824 */
14825 - tinfo = task_thread_info(task);
14826 for (;;) {
14827 char *id;
14828 unsigned long *estack_end;
14829 +
14830 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14831 &used, &id);
14832
14833 @@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14834 if (ops->stack(data, id) < 0)
14835 break;
14836
14837 - bp = ops->walk_stack(tinfo, stack, bp, ops,
14838 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14839 data, estack_end, &graph);
14840 ops->stack(data, "<EOE>");
14841 /*
14842 @@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14843 * second-to-last pointer (index -2 to end) in the
14844 * exception stack:
14845 */
14846 + if ((u16)estack_end[-1] != __KERNEL_DS)
14847 + goto out;
14848 stack = (unsigned long *) estack_end[-2];
14849 continue;
14850 }
14851 @@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14852 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
14853 if (ops->stack(data, "IRQ") < 0)
14854 break;
14855 - bp = ops->walk_stack(tinfo, stack, bp,
14856 + bp = ops->walk_stack(task, irq_stack, stack, bp,
14857 ops, data, irq_stack_end, &graph);
14858 /*
14859 * We link to the next stack (which would be
14860 @@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14861 /*
14862 * This handles the process stack:
14863 */
14864 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14865 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14866 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14867 +out:
14868 put_cpu();
14869 }
14870 EXPORT_SYMBOL(dump_trace);
14871 @@ -305,3 +309,50 @@ int is_valid_bugaddr(unsigned long ip)
14872
14873 return ud2 == 0x0b0f;
14874 }
14875 +
14876 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14877 +void pax_check_alloca(unsigned long size)
14878 +{
14879 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14880 + unsigned cpu, used;
14881 + char *id;
14882 +
14883 + /* check the process stack first */
14884 + stack_start = (unsigned long)task_stack_page(current);
14885 + stack_end = stack_start + THREAD_SIZE;
14886 + if (likely(stack_start <= sp && sp < stack_end)) {
14887 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
14888 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14889 + return;
14890 + }
14891 +
14892 + cpu = get_cpu();
14893 +
14894 + /* check the irq stacks */
14895 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14896 + stack_start = stack_end - IRQ_STACK_SIZE;
14897 + if (stack_start <= sp && sp < stack_end) {
14898 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14899 + put_cpu();
14900 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14901 + return;
14902 + }
14903 +
14904 + /* check the exception stacks */
14905 + used = 0;
14906 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14907 + stack_start = stack_end - EXCEPTION_STKSZ;
14908 + if (stack_end && stack_start <= sp && sp < stack_end) {
14909 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14910 + put_cpu();
14911 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
14912 + return;
14913 + }
14914 +
14915 + put_cpu();
14916 +
14917 + /* unknown stack */
14918 + BUG();
14919 +}
14920 +EXPORT_SYMBOL(pax_check_alloca);
14921 +#endif
14922 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
14923 index 9b9f18b..9fcaa04 100644
14924 --- a/arch/x86/kernel/early_printk.c
14925 +++ b/arch/x86/kernel/early_printk.c
14926 @@ -7,6 +7,7 @@
14927 #include <linux/pci_regs.h>
14928 #include <linux/pci_ids.h>
14929 #include <linux/errno.h>
14930 +#include <linux/sched.h>
14931 #include <asm/io.h>
14932 #include <asm/processor.h>
14933 #include <asm/fcntl.h>
14934 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
14935 index 7b784f4..db6b628 100644
14936 --- a/arch/x86/kernel/entry_32.S
14937 +++ b/arch/x86/kernel/entry_32.S
14938 @@ -179,13 +179,146 @@
14939 /*CFI_REL_OFFSET gs, PT_GS*/
14940 .endm
14941 .macro SET_KERNEL_GS reg
14942 +
14943 +#ifdef CONFIG_CC_STACKPROTECTOR
14944 movl $(__KERNEL_STACK_CANARY), \reg
14945 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14946 + movl $(__USER_DS), \reg
14947 +#else
14948 + xorl \reg, \reg
14949 +#endif
14950 +
14951 movl \reg, %gs
14952 .endm
14953
14954 #endif /* CONFIG_X86_32_LAZY_GS */
14955
14956 -.macro SAVE_ALL
14957 +.macro pax_enter_kernel
14958 +#ifdef CONFIG_PAX_KERNEXEC
14959 + call pax_enter_kernel
14960 +#endif
14961 +.endm
14962 +
14963 +.macro pax_exit_kernel
14964 +#ifdef CONFIG_PAX_KERNEXEC
14965 + call pax_exit_kernel
14966 +#endif
14967 +.endm
14968 +
14969 +#ifdef CONFIG_PAX_KERNEXEC
14970 +ENTRY(pax_enter_kernel)
14971 +#ifdef CONFIG_PARAVIRT
14972 + pushl %eax
14973 + pushl %ecx
14974 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
14975 + mov %eax, %esi
14976 +#else
14977 + mov %cr0, %esi
14978 +#endif
14979 + bts $16, %esi
14980 + jnc 1f
14981 + mov %cs, %esi
14982 + cmp $__KERNEL_CS, %esi
14983 + jz 3f
14984 + ljmp $__KERNEL_CS, $3f
14985 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
14986 +2:
14987 +#ifdef CONFIG_PARAVIRT
14988 + mov %esi, %eax
14989 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
14990 +#else
14991 + mov %esi, %cr0
14992 +#endif
14993 +3:
14994 +#ifdef CONFIG_PARAVIRT
14995 + popl %ecx
14996 + popl %eax
14997 +#endif
14998 + ret
14999 +ENDPROC(pax_enter_kernel)
15000 +
15001 +ENTRY(pax_exit_kernel)
15002 +#ifdef CONFIG_PARAVIRT
15003 + pushl %eax
15004 + pushl %ecx
15005 +#endif
15006 + mov %cs, %esi
15007 + cmp $__KERNEXEC_KERNEL_CS, %esi
15008 + jnz 2f
15009 +#ifdef CONFIG_PARAVIRT
15010 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
15011 + mov %eax, %esi
15012 +#else
15013 + mov %cr0, %esi
15014 +#endif
15015 + btr $16, %esi
15016 + ljmp $__KERNEL_CS, $1f
15017 +1:
15018 +#ifdef CONFIG_PARAVIRT
15019 + mov %esi, %eax
15020 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
15021 +#else
15022 + mov %esi, %cr0
15023 +#endif
15024 +2:
15025 +#ifdef CONFIG_PARAVIRT
15026 + popl %ecx
15027 + popl %eax
15028 +#endif
15029 + ret
15030 +ENDPROC(pax_exit_kernel)
15031 +#endif
15032 +
15033 +.macro pax_erase_kstack
15034 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15035 + call pax_erase_kstack
15036 +#endif
15037 +.endm
15038 +
15039 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15040 +/*
15041 + * ebp: thread_info
15042 + * ecx, edx: can be clobbered
15043 + */
15044 +ENTRY(pax_erase_kstack)
15045 + pushl %edi
15046 + pushl %eax
15047 +
15048 + mov TI_lowest_stack(%ebp), %edi
15049 + mov $-0xBEEF, %eax
15050 + std
15051 +
15052 +1: mov %edi, %ecx
15053 + and $THREAD_SIZE_asm - 1, %ecx
15054 + shr $2, %ecx
15055 + repne scasl
15056 + jecxz 2f
15057 +
15058 + cmp $2*16, %ecx
15059 + jc 2f
15060 +
15061 + mov $2*16, %ecx
15062 + repe scasl
15063 + jecxz 2f
15064 + jne 1b
15065 +
15066 +2: cld
15067 + mov %esp, %ecx
15068 + sub %edi, %ecx
15069 + shr $2, %ecx
15070 + rep stosl
15071 +
15072 + mov TI_task_thread_sp0(%ebp), %edi
15073 + sub $128, %edi
15074 + mov %edi, TI_lowest_stack(%ebp)
15075 +
15076 + popl %eax
15077 + popl %edi
15078 + ret
15079 +ENDPROC(pax_erase_kstack)
15080 +#endif
15081 +
15082 +.macro __SAVE_ALL _DS
15083 cld
15084 PUSH_GS
15085 pushl_cfi %fs
15086 @@ -208,7 +341,7 @@
15087 CFI_REL_OFFSET ecx, 0
15088 pushl_cfi %ebx
15089 CFI_REL_OFFSET ebx, 0
15090 - movl $(__USER_DS), %edx
15091 + movl $\_DS, %edx
15092 movl %edx, %ds
15093 movl %edx, %es
15094 movl $(__KERNEL_PERCPU), %edx
15095 @@ -216,6 +349,15 @@
15096 SET_KERNEL_GS %edx
15097 .endm
15098
15099 +.macro SAVE_ALL
15100 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
15101 + __SAVE_ALL __KERNEL_DS
15102 + pax_enter_kernel
15103 +#else
15104 + __SAVE_ALL __USER_DS
15105 +#endif
15106 +.endm
15107 +
15108 .macro RESTORE_INT_REGS
15109 popl_cfi %ebx
15110 CFI_RESTORE ebx
15111 @@ -301,7 +443,7 @@ ENTRY(ret_from_fork)
15112 popfl_cfi
15113 jmp syscall_exit
15114 CFI_ENDPROC
15115 -END(ret_from_fork)
15116 +ENDPROC(ret_from_fork)
15117
15118 /*
15119 * Interrupt exit functions should be protected against kprobes
15120 @@ -335,7 +477,15 @@ resume_userspace_sig:
15121 andl $SEGMENT_RPL_MASK, %eax
15122 #endif
15123 cmpl $USER_RPL, %eax
15124 +
15125 +#ifdef CONFIG_PAX_KERNEXEC
15126 + jae resume_userspace
15127 +
15128 + pax_exit_kernel
15129 + jmp resume_kernel
15130 +#else
15131 jb resume_kernel # not returning to v8086 or userspace
15132 +#endif
15133
15134 ENTRY(resume_userspace)
15135 LOCKDEP_SYS_EXIT
15136 @@ -347,8 +497,8 @@ ENTRY(resume_userspace)
15137 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
15138 # int/exception return?
15139 jne work_pending
15140 - jmp restore_all
15141 -END(ret_from_exception)
15142 + jmp restore_all_pax
15143 +ENDPROC(ret_from_exception)
15144
15145 #ifdef CONFIG_PREEMPT
15146 ENTRY(resume_kernel)
15147 @@ -363,7 +513,7 @@ need_resched:
15148 jz restore_all
15149 call preempt_schedule_irq
15150 jmp need_resched
15151 -END(resume_kernel)
15152 +ENDPROC(resume_kernel)
15153 #endif
15154 CFI_ENDPROC
15155 /*
15156 @@ -397,23 +547,34 @@ sysenter_past_esp:
15157 /*CFI_REL_OFFSET cs, 0*/
15158 /*
15159 * Push current_thread_info()->sysenter_return to the stack.
15160 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
15161 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
15162 */
15163 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
15164 + pushl_cfi $0
15165 CFI_REL_OFFSET eip, 0
15166
15167 pushl_cfi %eax
15168 SAVE_ALL
15169 + GET_THREAD_INFO(%ebp)
15170 + movl TI_sysenter_return(%ebp),%ebp
15171 + movl %ebp,PT_EIP(%esp)
15172 ENABLE_INTERRUPTS(CLBR_NONE)
15173
15174 /*
15175 * Load the potential sixth argument from user stack.
15176 * Careful about security.
15177 */
15178 + movl PT_OLDESP(%esp),%ebp
15179 +
15180 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15181 + mov PT_OLDSS(%esp),%ds
15182 +1: movl %ds:(%ebp),%ebp
15183 + push %ss
15184 + pop %ds
15185 +#else
15186 cmpl $__PAGE_OFFSET-3,%ebp
15187 jae syscall_fault
15188 1: movl (%ebp),%ebp
15189 +#endif
15190 +
15191 movl %ebp,PT_EBP(%esp)
15192 .section __ex_table,"a"
15193 .align 4
15194 @@ -436,12 +597,24 @@ sysenter_do_call:
15195 testl $_TIF_ALLWORK_MASK, %ecx
15196 jne sysexit_audit
15197 sysenter_exit:
15198 +
15199 +#ifdef CONFIG_PAX_RANDKSTACK
15200 + pushl_cfi %eax
15201 + movl %esp, %eax
15202 + call pax_randomize_kstack
15203 + popl_cfi %eax
15204 +#endif
15205 +
15206 + pax_erase_kstack
15207 +
15208 /* if something modifies registers it must also disable sysexit */
15209 movl PT_EIP(%esp), %edx
15210 movl PT_OLDESP(%esp), %ecx
15211 xorl %ebp,%ebp
15212 TRACE_IRQS_ON
15213 1: mov PT_FS(%esp), %fs
15214 +2: mov PT_DS(%esp), %ds
15215 +3: mov PT_ES(%esp), %es
15216 PTGS_TO_GS
15217 ENABLE_INTERRUPTS_SYSEXIT
15218
15219 @@ -458,6 +631,9 @@ sysenter_audit:
15220 movl %eax,%edx /* 2nd arg: syscall number */
15221 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
15222 call __audit_syscall_entry
15223 +
15224 + pax_erase_kstack
15225 +
15226 pushl_cfi %ebx
15227 movl PT_EAX(%esp),%eax /* reload syscall number */
15228 jmp sysenter_do_call
15229 @@ -483,11 +659,17 @@ sysexit_audit:
15230
15231 CFI_ENDPROC
15232 .pushsection .fixup,"ax"
15233 -2: movl $0,PT_FS(%esp)
15234 +4: movl $0,PT_FS(%esp)
15235 + jmp 1b
15236 +5: movl $0,PT_DS(%esp)
15237 + jmp 1b
15238 +6: movl $0,PT_ES(%esp)
15239 jmp 1b
15240 .section __ex_table,"a"
15241 .align 4
15242 - .long 1b,2b
15243 + .long 1b,4b
15244 + .long 2b,5b
15245 + .long 3b,6b
15246 .popsection
15247 PTGS_TO_GS_EX
15248 ENDPROC(ia32_sysenter_target)
15249 @@ -520,6 +702,15 @@ syscall_exit:
15250 testl $_TIF_ALLWORK_MASK, %ecx # current->work
15251 jne syscall_exit_work
15252
15253 +restore_all_pax:
15254 +
15255 +#ifdef CONFIG_PAX_RANDKSTACK
15256 + movl %esp, %eax
15257 + call pax_randomize_kstack
15258 +#endif
15259 +
15260 + pax_erase_kstack
15261 +
15262 restore_all:
15263 TRACE_IRQS_IRET
15264 restore_all_notrace:
15265 @@ -579,14 +770,34 @@ ldt_ss:
15266 * compensating for the offset by changing to the ESPFIX segment with
15267 * a base address that matches for the difference.
15268 */
15269 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
15270 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
15271 mov %esp, %edx /* load kernel esp */
15272 mov PT_OLDESP(%esp), %eax /* load userspace esp */
15273 mov %dx, %ax /* eax: new kernel esp */
15274 sub %eax, %edx /* offset (low word is 0) */
15275 +#ifdef CONFIG_SMP
15276 + movl PER_CPU_VAR(cpu_number), %ebx
15277 + shll $PAGE_SHIFT_asm, %ebx
15278 + addl $cpu_gdt_table, %ebx
15279 +#else
15280 + movl $cpu_gdt_table, %ebx
15281 +#endif
15282 shr $16, %edx
15283 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
15284 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
15285 +
15286 +#ifdef CONFIG_PAX_KERNEXEC
15287 + mov %cr0, %esi
15288 + btr $16, %esi
15289 + mov %esi, %cr0
15290 +#endif
15291 +
15292 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
15293 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
15294 +
15295 +#ifdef CONFIG_PAX_KERNEXEC
15296 + bts $16, %esi
15297 + mov %esi, %cr0
15298 +#endif
15299 +
15300 pushl_cfi $__ESPFIX_SS
15301 pushl_cfi %eax /* new kernel esp */
15302 /* Disable interrupts, but do not irqtrace this section: we
15303 @@ -615,38 +826,30 @@ work_resched:
15304 movl TI_flags(%ebp), %ecx
15305 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
15306 # than syscall tracing?
15307 - jz restore_all
15308 + jz restore_all_pax
15309 testb $_TIF_NEED_RESCHED, %cl
15310 jnz work_resched
15311
15312 work_notifysig: # deal with pending signals and
15313 # notify-resume requests
15314 + movl %esp, %eax
15315 #ifdef CONFIG_VM86
15316 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
15317 - movl %esp, %eax
15318 - jne work_notifysig_v86 # returning to kernel-space or
15319 + jz 1f # returning to kernel-space or
15320 # vm86-space
15321 - TRACE_IRQS_ON
15322 - ENABLE_INTERRUPTS(CLBR_NONE)
15323 - xorl %edx, %edx
15324 - call do_notify_resume
15325 - jmp resume_userspace_sig
15326
15327 - ALIGN
15328 -work_notifysig_v86:
15329 pushl_cfi %ecx # save ti_flags for do_notify_resume
15330 call save_v86_state # %eax contains pt_regs pointer
15331 popl_cfi %ecx
15332 movl %eax, %esp
15333 -#else
15334 - movl %esp, %eax
15335 +1:
15336 #endif
15337 TRACE_IRQS_ON
15338 ENABLE_INTERRUPTS(CLBR_NONE)
15339 xorl %edx, %edx
15340 call do_notify_resume
15341 jmp resume_userspace_sig
15342 -END(work_pending)
15343 +ENDPROC(work_pending)
15344
15345 # perform syscall exit tracing
15346 ALIGN
15347 @@ -654,11 +857,14 @@ syscall_trace_entry:
15348 movl $-ENOSYS,PT_EAX(%esp)
15349 movl %esp, %eax
15350 call syscall_trace_enter
15351 +
15352 + pax_erase_kstack
15353 +
15354 /* What it returned is what we'll actually use. */
15355 cmpl $(NR_syscalls), %eax
15356 jnae syscall_call
15357 jmp syscall_exit
15358 -END(syscall_trace_entry)
15359 +ENDPROC(syscall_trace_entry)
15360
15361 # perform syscall exit tracing
15362 ALIGN
15363 @@ -671,20 +877,24 @@ syscall_exit_work:
15364 movl %esp, %eax
15365 call syscall_trace_leave
15366 jmp resume_userspace
15367 -END(syscall_exit_work)
15368 +ENDPROC(syscall_exit_work)
15369 CFI_ENDPROC
15370
15371 RING0_INT_FRAME # can't unwind into user space anyway
15372 syscall_fault:
15373 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15374 + push %ss
15375 + pop %ds
15376 +#endif
15377 GET_THREAD_INFO(%ebp)
15378 movl $-EFAULT,PT_EAX(%esp)
15379 jmp resume_userspace
15380 -END(syscall_fault)
15381 +ENDPROC(syscall_fault)
15382
15383 syscall_badsys:
15384 movl $-ENOSYS,PT_EAX(%esp)
15385 jmp resume_userspace
15386 -END(syscall_badsys)
15387 +ENDPROC(syscall_badsys)
15388 CFI_ENDPROC
15389 /*
15390 * End of kprobes section
15391 @@ -756,6 +966,36 @@ ENTRY(ptregs_clone)
15392 CFI_ENDPROC
15393 ENDPROC(ptregs_clone)
15394
15395 + ALIGN;
15396 +ENTRY(kernel_execve)
15397 + CFI_STARTPROC
15398 + pushl_cfi %ebp
15399 + sub $PT_OLDSS+4,%esp
15400 + pushl_cfi %edi
15401 + pushl_cfi %ecx
15402 + pushl_cfi %eax
15403 + lea 3*4(%esp),%edi
15404 + mov $PT_OLDSS/4+1,%ecx
15405 + xorl %eax,%eax
15406 + rep stosl
15407 + popl_cfi %eax
15408 + popl_cfi %ecx
15409 + popl_cfi %edi
15410 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
15411 + pushl_cfi %esp
15412 + call sys_execve
15413 + add $4,%esp
15414 + CFI_ADJUST_CFA_OFFSET -4
15415 + GET_THREAD_INFO(%ebp)
15416 + test %eax,%eax
15417 + jz syscall_exit
15418 + add $PT_OLDSS+4,%esp
15419 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
15420 + popl_cfi %ebp
15421 + ret
15422 + CFI_ENDPROC
15423 +ENDPROC(kernel_execve)
15424 +
15425 .macro FIXUP_ESPFIX_STACK
15426 /*
15427 * Switch back for ESPFIX stack to the normal zerobased stack
15428 @@ -765,8 +1005,15 @@ ENDPROC(ptregs_clone)
15429 * normal stack and adjusts ESP with the matching offset.
15430 */
15431 /* fixup the stack */
15432 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
15433 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
15434 +#ifdef CONFIG_SMP
15435 + movl PER_CPU_VAR(cpu_number), %ebx
15436 + shll $PAGE_SHIFT_asm, %ebx
15437 + addl $cpu_gdt_table, %ebx
15438 +#else
15439 + movl $cpu_gdt_table, %ebx
15440 +#endif
15441 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
15442 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
15443 shl $16, %eax
15444 addl %esp, %eax /* the adjusted stack pointer */
15445 pushl_cfi $__KERNEL_DS
15446 @@ -819,7 +1066,7 @@ vector=vector+1
15447 .endr
15448 2: jmp common_interrupt
15449 .endr
15450 -END(irq_entries_start)
15451 +ENDPROC(irq_entries_start)
15452
15453 .previous
15454 END(interrupt)
15455 @@ -867,7 +1114,7 @@ ENTRY(coprocessor_error)
15456 pushl_cfi $do_coprocessor_error
15457 jmp error_code
15458 CFI_ENDPROC
15459 -END(coprocessor_error)
15460 +ENDPROC(coprocessor_error)
15461
15462 ENTRY(simd_coprocessor_error)
15463 RING0_INT_FRAME
15464 @@ -888,7 +1135,7 @@ ENTRY(simd_coprocessor_error)
15465 #endif
15466 jmp error_code
15467 CFI_ENDPROC
15468 -END(simd_coprocessor_error)
15469 +ENDPROC(simd_coprocessor_error)
15470
15471 ENTRY(device_not_available)
15472 RING0_INT_FRAME
15473 @@ -896,7 +1143,7 @@ ENTRY(device_not_available)
15474 pushl_cfi $do_device_not_available
15475 jmp error_code
15476 CFI_ENDPROC
15477 -END(device_not_available)
15478 +ENDPROC(device_not_available)
15479
15480 #ifdef CONFIG_PARAVIRT
15481 ENTRY(native_iret)
15482 @@ -905,12 +1152,12 @@ ENTRY(native_iret)
15483 .align 4
15484 .long native_iret, iret_exc
15485 .previous
15486 -END(native_iret)
15487 +ENDPROC(native_iret)
15488
15489 ENTRY(native_irq_enable_sysexit)
15490 sti
15491 sysexit
15492 -END(native_irq_enable_sysexit)
15493 +ENDPROC(native_irq_enable_sysexit)
15494 #endif
15495
15496 ENTRY(overflow)
15497 @@ -919,7 +1166,7 @@ ENTRY(overflow)
15498 pushl_cfi $do_overflow
15499 jmp error_code
15500 CFI_ENDPROC
15501 -END(overflow)
15502 +ENDPROC(overflow)
15503
15504 ENTRY(bounds)
15505 RING0_INT_FRAME
15506 @@ -927,7 +1174,7 @@ ENTRY(bounds)
15507 pushl_cfi $do_bounds
15508 jmp error_code
15509 CFI_ENDPROC
15510 -END(bounds)
15511 +ENDPROC(bounds)
15512
15513 ENTRY(invalid_op)
15514 RING0_INT_FRAME
15515 @@ -935,7 +1182,7 @@ ENTRY(invalid_op)
15516 pushl_cfi $do_invalid_op
15517 jmp error_code
15518 CFI_ENDPROC
15519 -END(invalid_op)
15520 +ENDPROC(invalid_op)
15521
15522 ENTRY(coprocessor_segment_overrun)
15523 RING0_INT_FRAME
15524 @@ -943,35 +1190,35 @@ ENTRY(coprocessor_segment_overrun)
15525 pushl_cfi $do_coprocessor_segment_overrun
15526 jmp error_code
15527 CFI_ENDPROC
15528 -END(coprocessor_segment_overrun)
15529 +ENDPROC(coprocessor_segment_overrun)
15530
15531 ENTRY(invalid_TSS)
15532 RING0_EC_FRAME
15533 pushl_cfi $do_invalid_TSS
15534 jmp error_code
15535 CFI_ENDPROC
15536 -END(invalid_TSS)
15537 +ENDPROC(invalid_TSS)
15538
15539 ENTRY(segment_not_present)
15540 RING0_EC_FRAME
15541 pushl_cfi $do_segment_not_present
15542 jmp error_code
15543 CFI_ENDPROC
15544 -END(segment_not_present)
15545 +ENDPROC(segment_not_present)
15546
15547 ENTRY(stack_segment)
15548 RING0_EC_FRAME
15549 pushl_cfi $do_stack_segment
15550 jmp error_code
15551 CFI_ENDPROC
15552 -END(stack_segment)
15553 +ENDPROC(stack_segment)
15554
15555 ENTRY(alignment_check)
15556 RING0_EC_FRAME
15557 pushl_cfi $do_alignment_check
15558 jmp error_code
15559 CFI_ENDPROC
15560 -END(alignment_check)
15561 +ENDPROC(alignment_check)
15562
15563 ENTRY(divide_error)
15564 RING0_INT_FRAME
15565 @@ -979,7 +1226,7 @@ ENTRY(divide_error)
15566 pushl_cfi $do_divide_error
15567 jmp error_code
15568 CFI_ENDPROC
15569 -END(divide_error)
15570 +ENDPROC(divide_error)
15571
15572 #ifdef CONFIG_X86_MCE
15573 ENTRY(machine_check)
15574 @@ -988,7 +1235,7 @@ ENTRY(machine_check)
15575 pushl_cfi machine_check_vector
15576 jmp error_code
15577 CFI_ENDPROC
15578 -END(machine_check)
15579 +ENDPROC(machine_check)
15580 #endif
15581
15582 ENTRY(spurious_interrupt_bug)
15583 @@ -997,7 +1244,7 @@ ENTRY(spurious_interrupt_bug)
15584 pushl_cfi $do_spurious_interrupt_bug
15585 jmp error_code
15586 CFI_ENDPROC
15587 -END(spurious_interrupt_bug)
15588 +ENDPROC(spurious_interrupt_bug)
15589 /*
15590 * End of kprobes section
15591 */
15592 @@ -1112,7 +1359,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
15593
15594 ENTRY(mcount)
15595 ret
15596 -END(mcount)
15597 +ENDPROC(mcount)
15598
15599 ENTRY(ftrace_caller)
15600 cmpl $0, function_trace_stop
15601 @@ -1141,7 +1388,7 @@ ftrace_graph_call:
15602 .globl ftrace_stub
15603 ftrace_stub:
15604 ret
15605 -END(ftrace_caller)
15606 +ENDPROC(ftrace_caller)
15607
15608 #else /* ! CONFIG_DYNAMIC_FTRACE */
15609
15610 @@ -1177,7 +1424,7 @@ trace:
15611 popl %ecx
15612 popl %eax
15613 jmp ftrace_stub
15614 -END(mcount)
15615 +ENDPROC(mcount)
15616 #endif /* CONFIG_DYNAMIC_FTRACE */
15617 #endif /* CONFIG_FUNCTION_TRACER */
15618
15619 @@ -1198,7 +1445,7 @@ ENTRY(ftrace_graph_caller)
15620 popl %ecx
15621 popl %eax
15622 ret
15623 -END(ftrace_graph_caller)
15624 +ENDPROC(ftrace_graph_caller)
15625
15626 .globl return_to_handler
15627 return_to_handler:
15628 @@ -1253,15 +1500,18 @@ error_code:
15629 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
15630 REG_TO_PTGS %ecx
15631 SET_KERNEL_GS %ecx
15632 - movl $(__USER_DS), %ecx
15633 + movl $(__KERNEL_DS), %ecx
15634 movl %ecx, %ds
15635 movl %ecx, %es
15636 +
15637 + pax_enter_kernel
15638 +
15639 TRACE_IRQS_OFF
15640 movl %esp,%eax # pt_regs pointer
15641 call *%edi
15642 jmp ret_from_exception
15643 CFI_ENDPROC
15644 -END(page_fault)
15645 +ENDPROC(page_fault)
15646
15647 /*
15648 * Debug traps and NMI can happen at the one SYSENTER instruction
15649 @@ -1303,7 +1553,7 @@ debug_stack_correct:
15650 call do_debug
15651 jmp ret_from_exception
15652 CFI_ENDPROC
15653 -END(debug)
15654 +ENDPROC(debug)
15655
15656 /*
15657 * NMI is doubly nasty. It can happen _while_ we're handling
15658 @@ -1340,6 +1590,9 @@ nmi_stack_correct:
15659 xorl %edx,%edx # zero error code
15660 movl %esp,%eax # pt_regs pointer
15661 call do_nmi
15662 +
15663 + pax_exit_kernel
15664 +
15665 jmp restore_all_notrace
15666 CFI_ENDPROC
15667
15668 @@ -1376,12 +1629,15 @@ nmi_espfix_stack:
15669 FIXUP_ESPFIX_STACK # %eax == %esp
15670 xorl %edx,%edx # zero error code
15671 call do_nmi
15672 +
15673 + pax_exit_kernel
15674 +
15675 RESTORE_REGS
15676 lss 12+4(%esp), %esp # back to espfix stack
15677 CFI_ADJUST_CFA_OFFSET -24
15678 jmp irq_return
15679 CFI_ENDPROC
15680 -END(nmi)
15681 +ENDPROC(nmi)
15682
15683 ENTRY(int3)
15684 RING0_INT_FRAME
15685 @@ -1393,14 +1649,14 @@ ENTRY(int3)
15686 call do_int3
15687 jmp ret_from_exception
15688 CFI_ENDPROC
15689 -END(int3)
15690 +ENDPROC(int3)
15691
15692 ENTRY(general_protection)
15693 RING0_EC_FRAME
15694 pushl_cfi $do_general_protection
15695 jmp error_code
15696 CFI_ENDPROC
15697 -END(general_protection)
15698 +ENDPROC(general_protection)
15699
15700 #ifdef CONFIG_KVM_GUEST
15701 ENTRY(async_page_fault)
15702 @@ -1408,7 +1664,7 @@ ENTRY(async_page_fault)
15703 pushl_cfi $do_async_page_fault
15704 jmp error_code
15705 CFI_ENDPROC
15706 -END(async_page_fault)
15707 +ENDPROC(async_page_fault)
15708 #endif
15709
15710 /*
15711 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
15712 index cdc79b5..4710a75 100644
15713 --- a/arch/x86/kernel/entry_64.S
15714 +++ b/arch/x86/kernel/entry_64.S
15715 @@ -56,6 +56,8 @@
15716 #include <asm/ftrace.h>
15717 #include <asm/percpu.h>
15718 #include <linux/err.h>
15719 +#include <asm/pgtable.h>
15720 +#include <asm/alternative-asm.h>
15721
15722 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15723 #include <linux/elf-em.h>
15724 @@ -69,8 +71,9 @@
15725 #ifdef CONFIG_FUNCTION_TRACER
15726 #ifdef CONFIG_DYNAMIC_FTRACE
15727 ENTRY(mcount)
15728 + pax_force_retaddr
15729 retq
15730 -END(mcount)
15731 +ENDPROC(mcount)
15732
15733 ENTRY(ftrace_caller)
15734 cmpl $0, function_trace_stop
15735 @@ -93,8 +96,9 @@ GLOBAL(ftrace_graph_call)
15736 #endif
15737
15738 GLOBAL(ftrace_stub)
15739 + pax_force_retaddr
15740 retq
15741 -END(ftrace_caller)
15742 +ENDPROC(ftrace_caller)
15743
15744 #else /* ! CONFIG_DYNAMIC_FTRACE */
15745 ENTRY(mcount)
15746 @@ -113,6 +117,7 @@ ENTRY(mcount)
15747 #endif
15748
15749 GLOBAL(ftrace_stub)
15750 + pax_force_retaddr
15751 retq
15752
15753 trace:
15754 @@ -122,12 +127,13 @@ trace:
15755 movq 8(%rbp), %rsi
15756 subq $MCOUNT_INSN_SIZE, %rdi
15757
15758 + pax_force_fptr ftrace_trace_function
15759 call *ftrace_trace_function
15760
15761 MCOUNT_RESTORE_FRAME
15762
15763 jmp ftrace_stub
15764 -END(mcount)
15765 +ENDPROC(mcount)
15766 #endif /* CONFIG_DYNAMIC_FTRACE */
15767 #endif /* CONFIG_FUNCTION_TRACER */
15768
15769 @@ -147,8 +153,9 @@ ENTRY(ftrace_graph_caller)
15770
15771 MCOUNT_RESTORE_FRAME
15772
15773 + pax_force_retaddr
15774 retq
15775 -END(ftrace_graph_caller)
15776 +ENDPROC(ftrace_graph_caller)
15777
15778 GLOBAL(return_to_handler)
15779 subq $24, %rsp
15780 @@ -164,6 +171,7 @@ GLOBAL(return_to_handler)
15781 movq 8(%rsp), %rdx
15782 movq (%rsp), %rax
15783 addq $24, %rsp
15784 + pax_force_fptr %rdi
15785 jmp *%rdi
15786 #endif
15787
15788 @@ -179,6 +187,282 @@ ENTRY(native_usergs_sysret64)
15789 ENDPROC(native_usergs_sysret64)
15790 #endif /* CONFIG_PARAVIRT */
15791
15792 + .macro ljmpq sel, off
15793 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
15794 + .byte 0x48; ljmp *1234f(%rip)
15795 + .pushsection .rodata
15796 + .align 16
15797 + 1234: .quad \off; .word \sel
15798 + .popsection
15799 +#else
15800 + pushq $\sel
15801 + pushq $\off
15802 + lretq
15803 +#endif
15804 + .endm
15805 +
15806 + .macro pax_enter_kernel
15807 + pax_set_fptr_mask
15808 +#ifdef CONFIG_PAX_KERNEXEC
15809 + call pax_enter_kernel
15810 +#endif
15811 + .endm
15812 +
15813 + .macro pax_exit_kernel
15814 +#ifdef CONFIG_PAX_KERNEXEC
15815 + call pax_exit_kernel
15816 +#endif
15817 + .endm
15818 +
15819 +#ifdef CONFIG_PAX_KERNEXEC
15820 +ENTRY(pax_enter_kernel)
15821 + pushq %rdi
15822 +
15823 +#ifdef CONFIG_PARAVIRT
15824 + PV_SAVE_REGS(CLBR_RDI)
15825 +#endif
15826 +
15827 + GET_CR0_INTO_RDI
15828 + bts $16,%rdi
15829 + jnc 3f
15830 + mov %cs,%edi
15831 + cmp $__KERNEL_CS,%edi
15832 + jnz 2f
15833 +1:
15834 +
15835 +#ifdef CONFIG_PARAVIRT
15836 + PV_RESTORE_REGS(CLBR_RDI)
15837 +#endif
15838 +
15839 + popq %rdi
15840 + pax_force_retaddr
15841 + retq
15842 +
15843 +2: ljmpq __KERNEL_CS,1f
15844 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
15845 +4: SET_RDI_INTO_CR0
15846 + jmp 1b
15847 +ENDPROC(pax_enter_kernel)
15848 +
15849 +ENTRY(pax_exit_kernel)
15850 + pushq %rdi
15851 +
15852 +#ifdef CONFIG_PARAVIRT
15853 + PV_SAVE_REGS(CLBR_RDI)
15854 +#endif
15855 +
15856 + mov %cs,%rdi
15857 + cmp $__KERNEXEC_KERNEL_CS,%edi
15858 + jz 2f
15859 +1:
15860 +
15861 +#ifdef CONFIG_PARAVIRT
15862 + PV_RESTORE_REGS(CLBR_RDI);
15863 +#endif
15864 +
15865 + popq %rdi
15866 + pax_force_retaddr
15867 + retq
15868 +
15869 +2: GET_CR0_INTO_RDI
15870 + btr $16,%rdi
15871 + ljmpq __KERNEL_CS,3f
15872 +3: SET_RDI_INTO_CR0
15873 + jmp 1b
15874 +#ifdef CONFIG_PARAVIRT
15875 + PV_RESTORE_REGS(CLBR_RDI);
15876 +#endif
15877 +
15878 + popq %rdi
15879 + pax_force_retaddr
15880 + retq
15881 +ENDPROC(pax_exit_kernel)
15882 +#endif
15883 +
15884 + .macro pax_enter_kernel_user
15885 + pax_set_fptr_mask
15886 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15887 + call pax_enter_kernel_user
15888 +#endif
15889 + .endm
15890 +
15891 + .macro pax_exit_kernel_user
15892 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15893 + call pax_exit_kernel_user
15894 +#endif
15895 +#ifdef CONFIG_PAX_RANDKSTACK
15896 + pushq %rax
15897 + call pax_randomize_kstack
15898 + popq %rax
15899 +#endif
15900 + .endm
15901 +
15902 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15903 +ENTRY(pax_enter_kernel_user)
15904 + pushq %rdi
15905 + pushq %rbx
15906 +
15907 +#ifdef CONFIG_PARAVIRT
15908 + PV_SAVE_REGS(CLBR_RDI)
15909 +#endif
15910 +
15911 + GET_CR3_INTO_RDI
15912 + mov %rdi,%rbx
15913 + add $__START_KERNEL_map,%rbx
15914 + sub phys_base(%rip),%rbx
15915 +
15916 +#ifdef CONFIG_PARAVIRT
15917 + pushq %rdi
15918 + cmpl $0, pv_info+PARAVIRT_enabled
15919 + jz 1f
15920 + i = 0
15921 + .rept USER_PGD_PTRS
15922 + mov i*8(%rbx),%rsi
15923 + mov $0,%sil
15924 + lea i*8(%rbx),%rdi
15925 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15926 + i = i + 1
15927 + .endr
15928 + jmp 2f
15929 +1:
15930 +#endif
15931 +
15932 + i = 0
15933 + .rept USER_PGD_PTRS
15934 + movb $0,i*8(%rbx)
15935 + i = i + 1
15936 + .endr
15937 +
15938 +#ifdef CONFIG_PARAVIRT
15939 +2: popq %rdi
15940 +#endif
15941 + SET_RDI_INTO_CR3
15942 +
15943 +#ifdef CONFIG_PAX_KERNEXEC
15944 + GET_CR0_INTO_RDI
15945 + bts $16,%rdi
15946 + SET_RDI_INTO_CR0
15947 +#endif
15948 +
15949 +#ifdef CONFIG_PARAVIRT
15950 + PV_RESTORE_REGS(CLBR_RDI)
15951 +#endif
15952 +
15953 + popq %rbx
15954 + popq %rdi
15955 + pax_force_retaddr
15956 + retq
15957 +ENDPROC(pax_enter_kernel_user)
15958 +
15959 +ENTRY(pax_exit_kernel_user)
15960 + push %rdi
15961 +
15962 +#ifdef CONFIG_PARAVIRT
15963 + pushq %rbx
15964 + PV_SAVE_REGS(CLBR_RDI)
15965 +#endif
15966 +
15967 +#ifdef CONFIG_PAX_KERNEXEC
15968 + GET_CR0_INTO_RDI
15969 + btr $16,%rdi
15970 + SET_RDI_INTO_CR0
15971 +#endif
15972 +
15973 + GET_CR3_INTO_RDI
15974 + add $__START_KERNEL_map,%rdi
15975 + sub phys_base(%rip),%rdi
15976 +
15977 +#ifdef CONFIG_PARAVIRT
15978 + cmpl $0, pv_info+PARAVIRT_enabled
15979 + jz 1f
15980 + mov %rdi,%rbx
15981 + i = 0
15982 + .rept USER_PGD_PTRS
15983 + mov i*8(%rbx),%rsi
15984 + mov $0x67,%sil
15985 + lea i*8(%rbx),%rdi
15986 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15987 + i = i + 1
15988 + .endr
15989 + jmp 2f
15990 +1:
15991 +#endif
15992 +
15993 + i = 0
15994 + .rept USER_PGD_PTRS
15995 + movb $0x67,i*8(%rdi)
15996 + i = i + 1
15997 + .endr
15998 +
15999 +#ifdef CONFIG_PARAVIRT
16000 +2: PV_RESTORE_REGS(CLBR_RDI)
16001 + popq %rbx
16002 +#endif
16003 +
16004 + popq %rdi
16005 + pax_force_retaddr
16006 + retq
16007 +ENDPROC(pax_exit_kernel_user)
16008 +#endif
16009 +
16010 +.macro pax_erase_kstack
16011 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16012 + call pax_erase_kstack
16013 +#endif
16014 +.endm
16015 +
16016 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16017 +/*
16018 + * r11: thread_info
16019 + * rcx, rdx: can be clobbered
16020 + */
16021 +ENTRY(pax_erase_kstack)
16022 + pushq %rdi
16023 + pushq %rax
16024 + pushq %r11
16025 +
16026 + GET_THREAD_INFO(%r11)
16027 + mov TI_lowest_stack(%r11), %rdi
16028 + mov $-0xBEEF, %rax
16029 + std
16030 +
16031 +1: mov %edi, %ecx
16032 + and $THREAD_SIZE_asm - 1, %ecx
16033 + shr $3, %ecx
16034 + repne scasq
16035 + jecxz 2f
16036 +
16037 + cmp $2*8, %ecx
16038 + jc 2f
16039 +
16040 + mov $2*8, %ecx
16041 + repe scasq
16042 + jecxz 2f
16043 + jne 1b
16044 +
16045 +2: cld
16046 + mov %esp, %ecx
16047 + sub %edi, %ecx
16048 +
16049 + cmp $THREAD_SIZE_asm, %rcx
16050 + jb 3f
16051 + ud2
16052 +3:
16053 +
16054 + shr $3, %ecx
16055 + rep stosq
16056 +
16057 + mov TI_task_thread_sp0(%r11), %rdi
16058 + sub $256, %rdi
16059 + mov %rdi, TI_lowest_stack(%r11)
16060 +
16061 + popq %r11
16062 + popq %rax
16063 + popq %rdi
16064 + pax_force_retaddr
16065 + ret
16066 +ENDPROC(pax_erase_kstack)
16067 +#endif
16068
16069 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
16070 #ifdef CONFIG_TRACE_IRQFLAGS
16071 @@ -232,8 +516,8 @@ ENDPROC(native_usergs_sysret64)
16072 .endm
16073
16074 .macro UNFAKE_STACK_FRAME
16075 - addq $8*6, %rsp
16076 - CFI_ADJUST_CFA_OFFSET -(6*8)
16077 + addq $8*6 + ARG_SKIP, %rsp
16078 + CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
16079 .endm
16080
16081 /*
16082 @@ -320,7 +604,7 @@ ENDPROC(native_usergs_sysret64)
16083 movq %rsp, %rsi
16084
16085 leaq -RBP(%rsp),%rdi /* arg1 for handler */
16086 - testl $3, CS-RBP(%rsi)
16087 + testb $3, CS-RBP(%rsi)
16088 je 1f
16089 SWAPGS
16090 /*
16091 @@ -355,9 +639,10 @@ ENTRY(save_rest)
16092 movq_cfi r15, R15+16
16093 movq %r11, 8(%rsp) /* return address */
16094 FIXUP_TOP_OF_STACK %r11, 16
16095 + pax_force_retaddr
16096 ret
16097 CFI_ENDPROC
16098 -END(save_rest)
16099 +ENDPROC(save_rest)
16100
16101 /* save complete stack frame */
16102 .pushsection .kprobes.text, "ax"
16103 @@ -386,9 +671,10 @@ ENTRY(save_paranoid)
16104 js 1f /* negative -> in kernel */
16105 SWAPGS
16106 xorl %ebx,%ebx
16107 -1: ret
16108 +1: pax_force_retaddr_bts
16109 + ret
16110 CFI_ENDPROC
16111 -END(save_paranoid)
16112 +ENDPROC(save_paranoid)
16113 .popsection
16114
16115 /*
16116 @@ -410,7 +696,7 @@ ENTRY(ret_from_fork)
16117
16118 RESTORE_REST
16119
16120 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16121 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16122 jz retint_restore_args
16123
16124 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
16125 @@ -420,7 +706,7 @@ ENTRY(ret_from_fork)
16126 jmp ret_from_sys_call # go to the SYSRET fastpath
16127
16128 CFI_ENDPROC
16129 -END(ret_from_fork)
16130 +ENDPROC(ret_from_fork)
16131
16132 /*
16133 * System call entry. Up to 6 arguments in registers are supported.
16134 @@ -456,7 +742,7 @@ END(ret_from_fork)
16135 ENTRY(system_call)
16136 CFI_STARTPROC simple
16137 CFI_SIGNAL_FRAME
16138 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
16139 + CFI_DEF_CFA rsp,0
16140 CFI_REGISTER rip,rcx
16141 /*CFI_REGISTER rflags,r11*/
16142 SWAPGS_UNSAFE_STACK
16143 @@ -469,16 +755,18 @@ GLOBAL(system_call_after_swapgs)
16144
16145 movq %rsp,PER_CPU_VAR(old_rsp)
16146 movq PER_CPU_VAR(kernel_stack),%rsp
16147 + SAVE_ARGS 8*6,0
16148 + pax_enter_kernel_user
16149 /*
16150 * No need to follow this irqs off/on section - it's straight
16151 * and short:
16152 */
16153 ENABLE_INTERRUPTS(CLBR_NONE)
16154 - SAVE_ARGS 8,0
16155 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
16156 movq %rcx,RIP-ARGOFFSET(%rsp)
16157 CFI_REL_OFFSET rip,RIP-ARGOFFSET
16158 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
16159 + GET_THREAD_INFO(%rcx)
16160 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
16161 jnz tracesys
16162 system_call_fastpath:
16163 #if __SYSCALL_MASK == ~0
16164 @@ -488,7 +776,7 @@ system_call_fastpath:
16165 cmpl $__NR_syscall_max,%eax
16166 #endif
16167 ja badsys
16168 - movq %r10,%rcx
16169 + movq R10-ARGOFFSET(%rsp),%rcx
16170 call *sys_call_table(,%rax,8) # XXX: rip relative
16171 movq %rax,RAX-ARGOFFSET(%rsp)
16172 /*
16173 @@ -502,10 +790,13 @@ sysret_check:
16174 LOCKDEP_SYS_EXIT
16175 DISABLE_INTERRUPTS(CLBR_NONE)
16176 TRACE_IRQS_OFF
16177 - movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
16178 + GET_THREAD_INFO(%rcx)
16179 + movl TI_flags(%rcx),%edx
16180 andl %edi,%edx
16181 jnz sysret_careful
16182 CFI_REMEMBER_STATE
16183 + pax_exit_kernel_user
16184 + pax_erase_kstack
16185 /*
16186 * sysretq will re-enable interrupts:
16187 */
16188 @@ -557,14 +848,18 @@ badsys:
16189 * jump back to the normal fast path.
16190 */
16191 auditsys:
16192 - movq %r10,%r9 /* 6th arg: 4th syscall arg */
16193 + movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
16194 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
16195 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
16196 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
16197 movq %rax,%rsi /* 2nd arg: syscall number */
16198 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
16199 call __audit_syscall_entry
16200 +
16201 + pax_erase_kstack
16202 +
16203 LOAD_ARGS 0 /* reload call-clobbered registers */
16204 + pax_set_fptr_mask
16205 jmp system_call_fastpath
16206
16207 /*
16208 @@ -585,7 +880,7 @@ sysret_audit:
16209 /* Do syscall tracing */
16210 tracesys:
16211 #ifdef CONFIG_AUDITSYSCALL
16212 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
16213 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
16214 jz auditsys
16215 #endif
16216 SAVE_REST
16217 @@ -593,12 +888,16 @@ tracesys:
16218 FIXUP_TOP_OF_STACK %rdi
16219 movq %rsp,%rdi
16220 call syscall_trace_enter
16221 +
16222 + pax_erase_kstack
16223 +
16224 /*
16225 * Reload arg registers from stack in case ptrace changed them.
16226 * We don't reload %rax because syscall_trace_enter() returned
16227 * the value it wants us to use in the table lookup.
16228 */
16229 LOAD_ARGS ARGOFFSET, 1
16230 + pax_set_fptr_mask
16231 RESTORE_REST
16232 #if __SYSCALL_MASK == ~0
16233 cmpq $__NR_syscall_max,%rax
16234 @@ -607,7 +906,7 @@ tracesys:
16235 cmpl $__NR_syscall_max,%eax
16236 #endif
16237 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
16238 - movq %r10,%rcx /* fixup for C */
16239 + movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
16240 call *sys_call_table(,%rax,8)
16241 movq %rax,RAX-ARGOFFSET(%rsp)
16242 /* Use IRET because user could have changed frame */
16243 @@ -628,6 +927,7 @@ GLOBAL(int_with_check)
16244 andl %edi,%edx
16245 jnz int_careful
16246 andl $~TS_COMPAT,TI_status(%rcx)
16247 + pax_erase_kstack
16248 jmp retint_swapgs
16249
16250 /* Either reschedule or signal or syscall exit tracking needed. */
16251 @@ -674,7 +974,7 @@ int_restore_rest:
16252 TRACE_IRQS_OFF
16253 jmp int_with_check
16254 CFI_ENDPROC
16255 -END(system_call)
16256 +ENDPROC(system_call)
16257
16258 /*
16259 * Certain special system calls that need to save a complete full stack frame.
16260 @@ -690,7 +990,7 @@ ENTRY(\label)
16261 call \func
16262 jmp ptregscall_common
16263 CFI_ENDPROC
16264 -END(\label)
16265 +ENDPROC(\label)
16266 .endm
16267
16268 PTREGSCALL stub_clone, sys_clone, %r8
16269 @@ -708,9 +1008,10 @@ ENTRY(ptregscall_common)
16270 movq_cfi_restore R12+8, r12
16271 movq_cfi_restore RBP+8, rbp
16272 movq_cfi_restore RBX+8, rbx
16273 + pax_force_retaddr
16274 ret $REST_SKIP /* pop extended registers */
16275 CFI_ENDPROC
16276 -END(ptregscall_common)
16277 +ENDPROC(ptregscall_common)
16278
16279 ENTRY(stub_execve)
16280 CFI_STARTPROC
16281 @@ -725,7 +1026,7 @@ ENTRY(stub_execve)
16282 RESTORE_REST
16283 jmp int_ret_from_sys_call
16284 CFI_ENDPROC
16285 -END(stub_execve)
16286 +ENDPROC(stub_execve)
16287
16288 /*
16289 * sigreturn is special because it needs to restore all registers on return.
16290 @@ -743,7 +1044,7 @@ ENTRY(stub_rt_sigreturn)
16291 RESTORE_REST
16292 jmp int_ret_from_sys_call
16293 CFI_ENDPROC
16294 -END(stub_rt_sigreturn)
16295 +ENDPROC(stub_rt_sigreturn)
16296
16297 #ifdef CONFIG_X86_X32_ABI
16298 PTREGSCALL stub_x32_sigaltstack, sys32_sigaltstack, %rdx
16299 @@ -812,7 +1113,7 @@ vector=vector+1
16300 2: jmp common_interrupt
16301 .endr
16302 CFI_ENDPROC
16303 -END(irq_entries_start)
16304 +ENDPROC(irq_entries_start)
16305
16306 .previous
16307 END(interrupt)
16308 @@ -832,6 +1133,16 @@ END(interrupt)
16309 subq $ORIG_RAX-RBP, %rsp
16310 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
16311 SAVE_ARGS_IRQ
16312 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16313 + testb $3, CS(%rdi)
16314 + jnz 1f
16315 + pax_enter_kernel
16316 + jmp 2f
16317 +1: pax_enter_kernel_user
16318 +2:
16319 +#else
16320 + pax_enter_kernel
16321 +#endif
16322 call \func
16323 .endm
16324
16325 @@ -863,7 +1174,7 @@ ret_from_intr:
16326
16327 exit_intr:
16328 GET_THREAD_INFO(%rcx)
16329 - testl $3,CS-ARGOFFSET(%rsp)
16330 + testb $3,CS-ARGOFFSET(%rsp)
16331 je retint_kernel
16332
16333 /* Interrupt came from user space */
16334 @@ -885,12 +1196,15 @@ retint_swapgs: /* return to user-space */
16335 * The iretq could re-enable interrupts:
16336 */
16337 DISABLE_INTERRUPTS(CLBR_ANY)
16338 + pax_exit_kernel_user
16339 TRACE_IRQS_IRETQ
16340 SWAPGS
16341 jmp restore_args
16342
16343 retint_restore_args: /* return to kernel space */
16344 DISABLE_INTERRUPTS(CLBR_ANY)
16345 + pax_exit_kernel
16346 + pax_force_retaddr RIP-ARGOFFSET
16347 /*
16348 * The iretq could re-enable interrupts:
16349 */
16350 @@ -979,7 +1293,7 @@ ENTRY(retint_kernel)
16351 #endif
16352
16353 CFI_ENDPROC
16354 -END(common_interrupt)
16355 +ENDPROC(common_interrupt)
16356 /*
16357 * End of kprobes section
16358 */
16359 @@ -996,7 +1310,7 @@ ENTRY(\sym)
16360 interrupt \do_sym
16361 jmp ret_from_intr
16362 CFI_ENDPROC
16363 -END(\sym)
16364 +ENDPROC(\sym)
16365 .endm
16366
16367 #ifdef CONFIG_SMP
16368 @@ -1069,12 +1383,22 @@ ENTRY(\sym)
16369 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16370 call error_entry
16371 DEFAULT_FRAME 0
16372 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16373 + testb $3, CS(%rsp)
16374 + jnz 1f
16375 + pax_enter_kernel
16376 + jmp 2f
16377 +1: pax_enter_kernel_user
16378 +2:
16379 +#else
16380 + pax_enter_kernel
16381 +#endif
16382 movq %rsp,%rdi /* pt_regs pointer */
16383 xorl %esi,%esi /* no error code */
16384 call \do_sym
16385 jmp error_exit /* %ebx: no swapgs flag */
16386 CFI_ENDPROC
16387 -END(\sym)
16388 +ENDPROC(\sym)
16389 .endm
16390
16391 .macro paranoidzeroentry sym do_sym
16392 @@ -1086,15 +1410,25 @@ ENTRY(\sym)
16393 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16394 call save_paranoid
16395 TRACE_IRQS_OFF
16396 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16397 + testb $3, CS(%rsp)
16398 + jnz 1f
16399 + pax_enter_kernel
16400 + jmp 2f
16401 +1: pax_enter_kernel_user
16402 +2:
16403 +#else
16404 + pax_enter_kernel
16405 +#endif
16406 movq %rsp,%rdi /* pt_regs pointer */
16407 xorl %esi,%esi /* no error code */
16408 call \do_sym
16409 jmp paranoid_exit /* %ebx: no swapgs flag */
16410 CFI_ENDPROC
16411 -END(\sym)
16412 +ENDPROC(\sym)
16413 .endm
16414
16415 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
16416 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
16417 .macro paranoidzeroentry_ist sym do_sym ist
16418 ENTRY(\sym)
16419 INTR_FRAME
16420 @@ -1104,14 +1438,30 @@ ENTRY(\sym)
16421 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16422 call save_paranoid
16423 TRACE_IRQS_OFF
16424 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16425 + testb $3, CS(%rsp)
16426 + jnz 1f
16427 + pax_enter_kernel
16428 + jmp 2f
16429 +1: pax_enter_kernel_user
16430 +2:
16431 +#else
16432 + pax_enter_kernel
16433 +#endif
16434 movq %rsp,%rdi /* pt_regs pointer */
16435 xorl %esi,%esi /* no error code */
16436 +#ifdef CONFIG_SMP
16437 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
16438 + lea init_tss(%r12), %r12
16439 +#else
16440 + lea init_tss(%rip), %r12
16441 +#endif
16442 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
16443 call \do_sym
16444 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
16445 jmp paranoid_exit /* %ebx: no swapgs flag */
16446 CFI_ENDPROC
16447 -END(\sym)
16448 +ENDPROC(\sym)
16449 .endm
16450
16451 .macro errorentry sym do_sym
16452 @@ -1122,13 +1472,23 @@ ENTRY(\sym)
16453 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
16454 call error_entry
16455 DEFAULT_FRAME 0
16456 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16457 + testb $3, CS(%rsp)
16458 + jnz 1f
16459 + pax_enter_kernel
16460 + jmp 2f
16461 +1: pax_enter_kernel_user
16462 +2:
16463 +#else
16464 + pax_enter_kernel
16465 +#endif
16466 movq %rsp,%rdi /* pt_regs pointer */
16467 movq ORIG_RAX(%rsp),%rsi /* get error code */
16468 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16469 call \do_sym
16470 jmp error_exit /* %ebx: no swapgs flag */
16471 CFI_ENDPROC
16472 -END(\sym)
16473 +ENDPROC(\sym)
16474 .endm
16475
16476 /* error code is on the stack already */
16477 @@ -1141,13 +1501,23 @@ ENTRY(\sym)
16478 call save_paranoid
16479 DEFAULT_FRAME 0
16480 TRACE_IRQS_OFF
16481 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16482 + testb $3, CS(%rsp)
16483 + jnz 1f
16484 + pax_enter_kernel
16485 + jmp 2f
16486 +1: pax_enter_kernel_user
16487 +2:
16488 +#else
16489 + pax_enter_kernel
16490 +#endif
16491 movq %rsp,%rdi /* pt_regs pointer */
16492 movq ORIG_RAX(%rsp),%rsi /* get error code */
16493 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16494 call \do_sym
16495 jmp paranoid_exit /* %ebx: no swapgs flag */
16496 CFI_ENDPROC
16497 -END(\sym)
16498 +ENDPROC(\sym)
16499 .endm
16500
16501 zeroentry divide_error do_divide_error
16502 @@ -1177,9 +1547,10 @@ gs_change:
16503 2: mfence /* workaround */
16504 SWAPGS
16505 popfq_cfi
16506 + pax_force_retaddr
16507 ret
16508 CFI_ENDPROC
16509 -END(native_load_gs_index)
16510 +ENDPROC(native_load_gs_index)
16511
16512 .section __ex_table,"a"
16513 .align 8
16514 @@ -1201,13 +1572,14 @@ ENTRY(kernel_thread_helper)
16515 * Here we are in the child and the registers are set as they were
16516 * at kernel_thread() invocation in the parent.
16517 */
16518 + pax_force_fptr %rsi
16519 call *%rsi
16520 # exit
16521 mov %eax, %edi
16522 call do_exit
16523 ud2 # padding for call trace
16524 CFI_ENDPROC
16525 -END(kernel_thread_helper)
16526 +ENDPROC(kernel_thread_helper)
16527
16528 /*
16529 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
16530 @@ -1234,11 +1606,11 @@ ENTRY(kernel_execve)
16531 RESTORE_REST
16532 testq %rax,%rax
16533 je int_ret_from_sys_call
16534 - RESTORE_ARGS
16535 UNFAKE_STACK_FRAME
16536 + pax_force_retaddr
16537 ret
16538 CFI_ENDPROC
16539 -END(kernel_execve)
16540 +ENDPROC(kernel_execve)
16541
16542 /* Call softirq on interrupt stack. Interrupts are off. */
16543 ENTRY(call_softirq)
16544 @@ -1256,9 +1628,10 @@ ENTRY(call_softirq)
16545 CFI_DEF_CFA_REGISTER rsp
16546 CFI_ADJUST_CFA_OFFSET -8
16547 decl PER_CPU_VAR(irq_count)
16548 + pax_force_retaddr
16549 ret
16550 CFI_ENDPROC
16551 -END(call_softirq)
16552 +ENDPROC(call_softirq)
16553
16554 #ifdef CONFIG_XEN
16555 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
16556 @@ -1296,7 +1669,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
16557 decl PER_CPU_VAR(irq_count)
16558 jmp error_exit
16559 CFI_ENDPROC
16560 -END(xen_do_hypervisor_callback)
16561 +ENDPROC(xen_do_hypervisor_callback)
16562
16563 /*
16564 * Hypervisor uses this for application faults while it executes.
16565 @@ -1355,7 +1728,7 @@ ENTRY(xen_failsafe_callback)
16566 SAVE_ALL
16567 jmp error_exit
16568 CFI_ENDPROC
16569 -END(xen_failsafe_callback)
16570 +ENDPROC(xen_failsafe_callback)
16571
16572 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
16573 xen_hvm_callback_vector xen_evtchn_do_upcall
16574 @@ -1404,16 +1777,31 @@ ENTRY(paranoid_exit)
16575 TRACE_IRQS_OFF
16576 testl %ebx,%ebx /* swapgs needed? */
16577 jnz paranoid_restore
16578 - testl $3,CS(%rsp)
16579 + testb $3,CS(%rsp)
16580 jnz paranoid_userspace
16581 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16582 + pax_exit_kernel
16583 + TRACE_IRQS_IRETQ 0
16584 + SWAPGS_UNSAFE_STACK
16585 + RESTORE_ALL 8
16586 + pax_force_retaddr_bts
16587 + jmp irq_return
16588 +#endif
16589 paranoid_swapgs:
16590 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16591 + pax_exit_kernel_user
16592 +#else
16593 + pax_exit_kernel
16594 +#endif
16595 TRACE_IRQS_IRETQ 0
16596 SWAPGS_UNSAFE_STACK
16597 RESTORE_ALL 8
16598 jmp irq_return
16599 paranoid_restore:
16600 + pax_exit_kernel
16601 TRACE_IRQS_IRETQ 0
16602 RESTORE_ALL 8
16603 + pax_force_retaddr_bts
16604 jmp irq_return
16605 paranoid_userspace:
16606 GET_THREAD_INFO(%rcx)
16607 @@ -1442,7 +1830,7 @@ paranoid_schedule:
16608 TRACE_IRQS_OFF
16609 jmp paranoid_userspace
16610 CFI_ENDPROC
16611 -END(paranoid_exit)
16612 +ENDPROC(paranoid_exit)
16613
16614 /*
16615 * Exception entry point. This expects an error code/orig_rax on the stack.
16616 @@ -1469,12 +1857,13 @@ ENTRY(error_entry)
16617 movq_cfi r14, R14+8
16618 movq_cfi r15, R15+8
16619 xorl %ebx,%ebx
16620 - testl $3,CS+8(%rsp)
16621 + testb $3,CS+8(%rsp)
16622 je error_kernelspace
16623 error_swapgs:
16624 SWAPGS
16625 error_sti:
16626 TRACE_IRQS_OFF
16627 + pax_force_retaddr_bts
16628 ret
16629
16630 /*
16631 @@ -1501,7 +1890,7 @@ bstep_iret:
16632 movq %rcx,RIP+8(%rsp)
16633 jmp error_swapgs
16634 CFI_ENDPROC
16635 -END(error_entry)
16636 +ENDPROC(error_entry)
16637
16638
16639 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
16640 @@ -1521,7 +1910,7 @@ ENTRY(error_exit)
16641 jnz retint_careful
16642 jmp retint_swapgs
16643 CFI_ENDPROC
16644 -END(error_exit)
16645 +ENDPROC(error_exit)
16646
16647 /*
16648 * Test if a given stack is an NMI stack or not.
16649 @@ -1579,9 +1968,11 @@ ENTRY(nmi)
16650 * If %cs was not the kernel segment, then the NMI triggered in user
16651 * space, which means it is definitely not nested.
16652 */
16653 + cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
16654 + je 1f
16655 cmpl $__KERNEL_CS, 16(%rsp)
16656 jne first_nmi
16657 -
16658 +1:
16659 /*
16660 * Check the special variable on the stack to see if NMIs are
16661 * executing.
16662 @@ -1728,6 +2119,16 @@ end_repeat_nmi:
16663 */
16664 call save_paranoid
16665 DEFAULT_FRAME 0
16666 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16667 + testb $3, CS(%rsp)
16668 + jnz 1f
16669 + pax_enter_kernel
16670 + jmp 2f
16671 +1: pax_enter_kernel_user
16672 +2:
16673 +#else
16674 + pax_enter_kernel
16675 +#endif
16676 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
16677 movq %rsp,%rdi
16678 movq $-1,%rsi
16679 @@ -1735,21 +2136,32 @@ end_repeat_nmi:
16680 testl %ebx,%ebx /* swapgs needed? */
16681 jnz nmi_restore
16682 nmi_swapgs:
16683 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16684 + pax_exit_kernel_user
16685 +#else
16686 + pax_exit_kernel
16687 +#endif
16688 SWAPGS_UNSAFE_STACK
16689 + RESTORE_ALL 8
16690 + /* Clear the NMI executing stack variable */
16691 + movq $0, 10*8(%rsp)
16692 + jmp irq_return
16693 nmi_restore:
16694 + pax_exit_kernel
16695 RESTORE_ALL 8
16696 + pax_force_retaddr_bts
16697 /* Clear the NMI executing stack variable */
16698 movq $0, 10*8(%rsp)
16699 jmp irq_return
16700 CFI_ENDPROC
16701 -END(nmi)
16702 +ENDPROC(nmi)
16703
16704 ENTRY(ignore_sysret)
16705 CFI_STARTPROC
16706 mov $-ENOSYS,%eax
16707 sysret
16708 CFI_ENDPROC
16709 -END(ignore_sysret)
16710 +ENDPROC(ignore_sysret)
16711
16712 /*
16713 * End of kprobes section
16714 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
16715 index c9a281f..ce2f317 100644
16716 --- a/arch/x86/kernel/ftrace.c
16717 +++ b/arch/x86/kernel/ftrace.c
16718 @@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
16719 static const void *mod_code_newcode; /* holds the text to write to the IP */
16720
16721 static unsigned nmi_wait_count;
16722 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
16723 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
16724
16725 int ftrace_arch_read_dyn_info(char *buf, int size)
16726 {
16727 @@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
16728
16729 r = snprintf(buf, size, "%u %u",
16730 nmi_wait_count,
16731 - atomic_read(&nmi_update_count));
16732 + atomic_read_unchecked(&nmi_update_count));
16733 return r;
16734 }
16735
16736 @@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
16737
16738 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
16739 smp_rmb();
16740 + pax_open_kernel();
16741 ftrace_mod_code();
16742 - atomic_inc(&nmi_update_count);
16743 + pax_close_kernel();
16744 + atomic_inc_unchecked(&nmi_update_count);
16745 }
16746 /* Must have previous changes seen before executions */
16747 smp_mb();
16748 @@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
16749 {
16750 unsigned char replaced[MCOUNT_INSN_SIZE];
16751
16752 + ip = ktla_ktva(ip);
16753 +
16754 /*
16755 * Note: Due to modules and __init, code can
16756 * disappear and change, we need to protect against faulting
16757 @@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
16758 unsigned char old[MCOUNT_INSN_SIZE], *new;
16759 int ret;
16760
16761 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16762 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16763 new = ftrace_call_replace(ip, (unsigned long)func);
16764 ret = ftrace_modify_code(ip, old, new);
16765
16766 @@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
16767 {
16768 unsigned char code[MCOUNT_INSN_SIZE];
16769
16770 + ip = ktla_ktva(ip);
16771 +
16772 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16773 return -EFAULT;
16774
16775 diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
16776 index 51ff186..9e77418 100644
16777 --- a/arch/x86/kernel/head32.c
16778 +++ b/arch/x86/kernel/head32.c
16779 @@ -19,6 +19,7 @@
16780 #include <asm/io_apic.h>
16781 #include <asm/bios_ebda.h>
16782 #include <asm/tlbflush.h>
16783 +#include <asm/boot.h>
16784
16785 static void __init i386_default_early_setup(void)
16786 {
16787 @@ -31,8 +32,7 @@ static void __init i386_default_early_setup(void)
16788
16789 void __init i386_start_kernel(void)
16790 {
16791 - memblock_reserve(__pa_symbol(&_text),
16792 - __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
16793 + memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop) - LOAD_PHYSICAL_ADDR);
16794
16795 #ifdef CONFIG_BLK_DEV_INITRD
16796 /* Reserve INITRD */
16797 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
16798 index ce0be7c..c41476e 100644
16799 --- a/arch/x86/kernel/head_32.S
16800 +++ b/arch/x86/kernel/head_32.S
16801 @@ -25,6 +25,12 @@
16802 /* Physical address */
16803 #define pa(X) ((X) - __PAGE_OFFSET)
16804
16805 +#ifdef CONFIG_PAX_KERNEXEC
16806 +#define ta(X) (X)
16807 +#else
16808 +#define ta(X) ((X) - __PAGE_OFFSET)
16809 +#endif
16810 +
16811 /*
16812 * References to members of the new_cpu_data structure.
16813 */
16814 @@ -54,11 +60,7 @@
16815 * and small than max_low_pfn, otherwise will waste some page table entries
16816 */
16817
16818 -#if PTRS_PER_PMD > 1
16819 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
16820 -#else
16821 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
16822 -#endif
16823 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
16824
16825 /* Number of possible pages in the lowmem region */
16826 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
16827 @@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
16828 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16829
16830 /*
16831 + * Real beginning of normal "text" segment
16832 + */
16833 +ENTRY(stext)
16834 +ENTRY(_stext)
16835 +
16836 +/*
16837 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
16838 * %esi points to the real-mode code as a 32-bit pointer.
16839 * CS and DS must be 4 GB flat segments, but we don't depend on
16840 @@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16841 * can.
16842 */
16843 __HEAD
16844 +
16845 +#ifdef CONFIG_PAX_KERNEXEC
16846 + jmp startup_32
16847 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
16848 +.fill PAGE_SIZE-5,1,0xcc
16849 +#endif
16850 +
16851 ENTRY(startup_32)
16852 movl pa(stack_start),%ecx
16853
16854 @@ -105,6 +120,57 @@ ENTRY(startup_32)
16855 2:
16856 leal -__PAGE_OFFSET(%ecx),%esp
16857
16858 +#ifdef CONFIG_SMP
16859 + movl $pa(cpu_gdt_table),%edi
16860 + movl $__per_cpu_load,%eax
16861 + movw %ax,__KERNEL_PERCPU + 2(%edi)
16862 + rorl $16,%eax
16863 + movb %al,__KERNEL_PERCPU + 4(%edi)
16864 + movb %ah,__KERNEL_PERCPU + 7(%edi)
16865 + movl $__per_cpu_end - 1,%eax
16866 + subl $__per_cpu_start,%eax
16867 + movw %ax,__KERNEL_PERCPU + 0(%edi)
16868 +#endif
16869 +
16870 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16871 + movl $NR_CPUS,%ecx
16872 + movl $pa(cpu_gdt_table),%edi
16873 +1:
16874 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
16875 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
16876 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
16877 + addl $PAGE_SIZE_asm,%edi
16878 + loop 1b
16879 +#endif
16880 +
16881 +#ifdef CONFIG_PAX_KERNEXEC
16882 + movl $pa(boot_gdt),%edi
16883 + movl $__LOAD_PHYSICAL_ADDR,%eax
16884 + movw %ax,__BOOT_CS + 2(%edi)
16885 + rorl $16,%eax
16886 + movb %al,__BOOT_CS + 4(%edi)
16887 + movb %ah,__BOOT_CS + 7(%edi)
16888 + rorl $16,%eax
16889 +
16890 + ljmp $(__BOOT_CS),$1f
16891 +1:
16892 +
16893 + movl $NR_CPUS,%ecx
16894 + movl $pa(cpu_gdt_table),%edi
16895 + addl $__PAGE_OFFSET,%eax
16896 +1:
16897 + movw %ax,__KERNEL_CS + 2(%edi)
16898 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
16899 + rorl $16,%eax
16900 + movb %al,__KERNEL_CS + 4(%edi)
16901 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
16902 + movb %ah,__KERNEL_CS + 7(%edi)
16903 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
16904 + rorl $16,%eax
16905 + addl $PAGE_SIZE_asm,%edi
16906 + loop 1b
16907 +#endif
16908 +
16909 /*
16910 * Clear BSS first so that there are no surprises...
16911 */
16912 @@ -195,8 +261,11 @@ ENTRY(startup_32)
16913 movl %eax, pa(max_pfn_mapped)
16914
16915 /* Do early initialization of the fixmap area */
16916 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16917 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
16918 +#ifdef CONFIG_COMPAT_VDSO
16919 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
16920 +#else
16921 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
16922 +#endif
16923 #else /* Not PAE */
16924
16925 page_pde_offset = (__PAGE_OFFSET >> 20);
16926 @@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16927 movl %eax, pa(max_pfn_mapped)
16928
16929 /* Do early initialization of the fixmap area */
16930 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16931 - movl %eax,pa(initial_page_table+0xffc)
16932 +#ifdef CONFIG_COMPAT_VDSO
16933 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
16934 +#else
16935 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
16936 +#endif
16937 #endif
16938
16939 #ifdef CONFIG_PARAVIRT
16940 @@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16941 cmpl $num_subarch_entries, %eax
16942 jae bad_subarch
16943
16944 - movl pa(subarch_entries)(,%eax,4), %eax
16945 - subl $__PAGE_OFFSET, %eax
16946 - jmp *%eax
16947 + jmp *pa(subarch_entries)(,%eax,4)
16948
16949 bad_subarch:
16950 WEAK(lguest_entry)
16951 @@ -255,10 +325,10 @@ WEAK(xen_entry)
16952 __INITDATA
16953
16954 subarch_entries:
16955 - .long default_entry /* normal x86/PC */
16956 - .long lguest_entry /* lguest hypervisor */
16957 - .long xen_entry /* Xen hypervisor */
16958 - .long default_entry /* Moorestown MID */
16959 + .long ta(default_entry) /* normal x86/PC */
16960 + .long ta(lguest_entry) /* lguest hypervisor */
16961 + .long ta(xen_entry) /* Xen hypervisor */
16962 + .long ta(default_entry) /* Moorestown MID */
16963 num_subarch_entries = (. - subarch_entries) / 4
16964 .previous
16965 #else
16966 @@ -312,6 +382,7 @@ default_entry:
16967 orl %edx,%eax
16968 movl %eax,%cr4
16969
16970 +#ifdef CONFIG_X86_PAE
16971 testb $X86_CR4_PAE, %al # check if PAE is enabled
16972 jz 6f
16973
16974 @@ -340,6 +411,9 @@ default_entry:
16975 /* Make changes effective */
16976 wrmsr
16977
16978 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
16979 +#endif
16980 +
16981 6:
16982
16983 /*
16984 @@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
16985 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
16986 movl %eax,%ss # after changing gdt.
16987
16988 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
16989 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
16990 movl %eax,%ds
16991 movl %eax,%es
16992
16993 @@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
16994 */
16995 cmpb $0,ready
16996 jne 1f
16997 - movl $gdt_page,%eax
16998 + movl $cpu_gdt_table,%eax
16999 movl $stack_canary,%ecx
17000 +#ifdef CONFIG_SMP
17001 + addl $__per_cpu_load,%ecx
17002 +#endif
17003 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
17004 shrl $16, %ecx
17005 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
17006 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
17007 1:
17008 -#endif
17009 movl $(__KERNEL_STACK_CANARY),%eax
17010 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
17011 + movl $(__USER_DS),%eax
17012 +#else
17013 + xorl %eax,%eax
17014 +#endif
17015 movl %eax,%gs
17016
17017 xorl %eax,%eax # Clear LDT
17018 @@ -558,22 +639,22 @@ early_page_fault:
17019 jmp early_fault
17020
17021 early_fault:
17022 - cld
17023 #ifdef CONFIG_PRINTK
17024 + cmpl $1,%ss:early_recursion_flag
17025 + je hlt_loop
17026 + incl %ss:early_recursion_flag
17027 + cld
17028 pusha
17029 movl $(__KERNEL_DS),%eax
17030 movl %eax,%ds
17031 movl %eax,%es
17032 - cmpl $2,early_recursion_flag
17033 - je hlt_loop
17034 - incl early_recursion_flag
17035 movl %cr2,%eax
17036 pushl %eax
17037 pushl %edx /* trapno */
17038 pushl $fault_msg
17039 call printk
17040 +; call dump_stack
17041 #endif
17042 - call dump_stack
17043 hlt_loop:
17044 hlt
17045 jmp hlt_loop
17046 @@ -581,8 +662,11 @@ hlt_loop:
17047 /* This is the default interrupt "handler" :-) */
17048 ALIGN
17049 ignore_int:
17050 - cld
17051 #ifdef CONFIG_PRINTK
17052 + cmpl $2,%ss:early_recursion_flag
17053 + je hlt_loop
17054 + incl %ss:early_recursion_flag
17055 + cld
17056 pushl %eax
17057 pushl %ecx
17058 pushl %edx
17059 @@ -591,9 +675,6 @@ ignore_int:
17060 movl $(__KERNEL_DS),%eax
17061 movl %eax,%ds
17062 movl %eax,%es
17063 - cmpl $2,early_recursion_flag
17064 - je hlt_loop
17065 - incl early_recursion_flag
17066 pushl 16(%esp)
17067 pushl 24(%esp)
17068 pushl 32(%esp)
17069 @@ -622,29 +703,43 @@ ENTRY(initial_code)
17070 /*
17071 * BSS section
17072 */
17073 -__PAGE_ALIGNED_BSS
17074 - .align PAGE_SIZE
17075 #ifdef CONFIG_X86_PAE
17076 +.section .initial_pg_pmd,"a",@progbits
17077 initial_pg_pmd:
17078 .fill 1024*KPMDS,4,0
17079 #else
17080 +.section .initial_page_table,"a",@progbits
17081 ENTRY(initial_page_table)
17082 .fill 1024,4,0
17083 #endif
17084 +.section .initial_pg_fixmap,"a",@progbits
17085 initial_pg_fixmap:
17086 .fill 1024,4,0
17087 +.section .empty_zero_page,"a",@progbits
17088 ENTRY(empty_zero_page)
17089 .fill 4096,1,0
17090 +.section .swapper_pg_dir,"a",@progbits
17091 ENTRY(swapper_pg_dir)
17092 +#ifdef CONFIG_X86_PAE
17093 + .fill 4,8,0
17094 +#else
17095 .fill 1024,4,0
17096 +#endif
17097 +
17098 +/*
17099 + * The IDT has to be page-aligned to simplify the Pentium
17100 + * F0 0F bug workaround.. We have a special link segment
17101 + * for this.
17102 + */
17103 +.section .idt,"a",@progbits
17104 +ENTRY(idt_table)
17105 + .fill 256,8,0
17106
17107 /*
17108 * This starts the data section.
17109 */
17110 #ifdef CONFIG_X86_PAE
17111 -__PAGE_ALIGNED_DATA
17112 - /* Page-aligned for the benefit of paravirt? */
17113 - .align PAGE_SIZE
17114 +.section .initial_page_table,"a",@progbits
17115 ENTRY(initial_page_table)
17116 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
17117 # if KPMDS == 3
17118 @@ -663,18 +758,27 @@ ENTRY(initial_page_table)
17119 # error "Kernel PMDs should be 1, 2 or 3"
17120 # endif
17121 .align PAGE_SIZE /* needs to be page-sized too */
17122 +
17123 +#ifdef CONFIG_PAX_PER_CPU_PGD
17124 +ENTRY(cpu_pgd)
17125 + .rept NR_CPUS
17126 + .fill 4,8,0
17127 + .endr
17128 +#endif
17129 +
17130 #endif
17131
17132 .data
17133 .balign 4
17134 ENTRY(stack_start)
17135 - .long init_thread_union+THREAD_SIZE
17136 + .long init_thread_union+THREAD_SIZE-8
17137
17138 +ready: .byte 0
17139 +
17140 +.section .rodata,"a",@progbits
17141 early_recursion_flag:
17142 .long 0
17143
17144 -ready: .byte 0
17145 -
17146 int_msg:
17147 .asciz "Unknown interrupt or fault at: %p %p %p\n"
17148
17149 @@ -707,7 +811,7 @@ fault_msg:
17150 .word 0 # 32 bit align gdt_desc.address
17151 boot_gdt_descr:
17152 .word __BOOT_DS+7
17153 - .long boot_gdt - __PAGE_OFFSET
17154 + .long pa(boot_gdt)
17155
17156 .word 0 # 32-bit align idt_desc.address
17157 idt_descr:
17158 @@ -718,7 +822,7 @@ idt_descr:
17159 .word 0 # 32 bit align gdt_desc.address
17160 ENTRY(early_gdt_descr)
17161 .word GDT_ENTRIES*8-1
17162 - .long gdt_page /* Overwritten for secondary CPUs */
17163 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
17164
17165 /*
17166 * The boot_gdt must mirror the equivalent in setup.S and is
17167 @@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
17168 .align L1_CACHE_BYTES
17169 ENTRY(boot_gdt)
17170 .fill GDT_ENTRY_BOOT_CS,8,0
17171 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
17172 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
17173 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
17174 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
17175 +
17176 + .align PAGE_SIZE_asm
17177 +ENTRY(cpu_gdt_table)
17178 + .rept NR_CPUS
17179 + .quad 0x0000000000000000 /* NULL descriptor */
17180 + .quad 0x0000000000000000 /* 0x0b reserved */
17181 + .quad 0x0000000000000000 /* 0x13 reserved */
17182 + .quad 0x0000000000000000 /* 0x1b reserved */
17183 +
17184 +#ifdef CONFIG_PAX_KERNEXEC
17185 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
17186 +#else
17187 + .quad 0x0000000000000000 /* 0x20 unused */
17188 +#endif
17189 +
17190 + .quad 0x0000000000000000 /* 0x28 unused */
17191 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
17192 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
17193 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
17194 + .quad 0x0000000000000000 /* 0x4b reserved */
17195 + .quad 0x0000000000000000 /* 0x53 reserved */
17196 + .quad 0x0000000000000000 /* 0x5b reserved */
17197 +
17198 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
17199 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
17200 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
17201 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
17202 +
17203 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
17204 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
17205 +
17206 + /*
17207 + * Segments used for calling PnP BIOS have byte granularity.
17208 + * The code segments and data segments have fixed 64k limits,
17209 + * the transfer segment sizes are set at run time.
17210 + */
17211 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
17212 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
17213 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
17214 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
17215 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
17216 +
17217 + /*
17218 + * The APM segments have byte granularity and their bases
17219 + * are set at run time. All have 64k limits.
17220 + */
17221 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
17222 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
17223 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
17224 +
17225 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
17226 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
17227 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
17228 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
17229 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
17230 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
17231 +
17232 + /* Be sure this is zeroed to avoid false validations in Xen */
17233 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
17234 + .endr
17235 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
17236 index 40f4eb3..6d24d9d 100644
17237 --- a/arch/x86/kernel/head_64.S
17238 +++ b/arch/x86/kernel/head_64.S
17239 @@ -19,6 +19,8 @@
17240 #include <asm/cache.h>
17241 #include <asm/processor-flags.h>
17242 #include <asm/percpu.h>
17243 +#include <asm/cpufeature.h>
17244 +#include <asm/alternative-asm.h>
17245
17246 #ifdef CONFIG_PARAVIRT
17247 #include <asm/asm-offsets.h>
17248 @@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
17249 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
17250 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
17251 L3_START_KERNEL = pud_index(__START_KERNEL_map)
17252 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
17253 +L3_VMALLOC_START = pud_index(VMALLOC_START)
17254 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
17255 +L3_VMALLOC_END = pud_index(VMALLOC_END)
17256 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
17257 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
17258
17259 .text
17260 __HEAD
17261 @@ -85,35 +93,23 @@ startup_64:
17262 */
17263 addq %rbp, init_level4_pgt + 0(%rip)
17264 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
17265 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
17266 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
17267 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
17268 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
17269
17270 addq %rbp, level3_ident_pgt + 0(%rip)
17271 +#ifndef CONFIG_XEN
17272 + addq %rbp, level3_ident_pgt + 8(%rip)
17273 +#endif
17274
17275 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
17276 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
17277 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
17278 +
17279 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
17280 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
17281
17282 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
17283 -
17284 - /* Add an Identity mapping if I am above 1G */
17285 - leaq _text(%rip), %rdi
17286 - andq $PMD_PAGE_MASK, %rdi
17287 -
17288 - movq %rdi, %rax
17289 - shrq $PUD_SHIFT, %rax
17290 - andq $(PTRS_PER_PUD - 1), %rax
17291 - jz ident_complete
17292 -
17293 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
17294 - leaq level3_ident_pgt(%rip), %rbx
17295 - movq %rdx, 0(%rbx, %rax, 8)
17296 -
17297 - movq %rdi, %rax
17298 - shrq $PMD_SHIFT, %rax
17299 - andq $(PTRS_PER_PMD - 1), %rax
17300 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
17301 - leaq level2_spare_pgt(%rip), %rbx
17302 - movq %rdx, 0(%rbx, %rax, 8)
17303 -ident_complete:
17304 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
17305
17306 /*
17307 * Fixup the kernel text+data virtual addresses. Note that
17308 @@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
17309 * after the boot processor executes this code.
17310 */
17311
17312 - /* Enable PAE mode and PGE */
17313 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
17314 + /* Enable PAE mode and PSE/PGE */
17315 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17316 movq %rax, %cr4
17317
17318 /* Setup early boot stage 4 level pagetables. */
17319 @@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
17320 movl $MSR_EFER, %ecx
17321 rdmsr
17322 btsl $_EFER_SCE, %eax /* Enable System Call */
17323 - btl $20,%edi /* No Execute supported? */
17324 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
17325 jnc 1f
17326 btsl $_EFER_NX, %eax
17327 + leaq init_level4_pgt(%rip), %rdi
17328 +#ifndef CONFIG_EFI
17329 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
17330 +#endif
17331 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
17332 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
17333 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
17334 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
17335 1: wrmsr /* Make changes effective */
17336
17337 /* Setup cr0 */
17338 @@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
17339 * jump. In addition we need to ensure %cs is set so we make this
17340 * a far return.
17341 */
17342 + pax_set_fptr_mask
17343 movq initial_code(%rip),%rax
17344 pushq $0 # fake return address to stop unwinder
17345 pushq $__KERNEL_CS # set correct cs
17346 @@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
17347 bad_address:
17348 jmp bad_address
17349
17350 - .section ".init.text","ax"
17351 + __INIT
17352 #ifdef CONFIG_EARLY_PRINTK
17353 .globl early_idt_handlers
17354 early_idt_handlers:
17355 @@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
17356 #endif /* EARLY_PRINTK */
17357 1: hlt
17358 jmp 1b
17359 + .previous
17360
17361 #ifdef CONFIG_EARLY_PRINTK
17362 + __INITDATA
17363 early_recursion_flag:
17364 .long 0
17365 + .previous
17366
17367 + .section .rodata,"a",@progbits
17368 early_idt_msg:
17369 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
17370 early_idt_ripmsg:
17371 .asciz "RIP %s\n"
17372 + .previous
17373 #endif /* CONFIG_EARLY_PRINTK */
17374 - .previous
17375
17376 + .section .rodata,"a",@progbits
17377 #define NEXT_PAGE(name) \
17378 .balign PAGE_SIZE; \
17379 ENTRY(name)
17380 @@ -338,7 +348,6 @@ ENTRY(name)
17381 i = i + 1 ; \
17382 .endr
17383
17384 - .data
17385 /*
17386 * This default setting generates an ident mapping at address 0x100000
17387 * and a mapping for the kernel that precisely maps virtual address
17388 @@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
17389 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17390 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
17391 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17392 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
17393 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
17394 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
17395 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
17396 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
17397 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17398 .org init_level4_pgt + L4_START_KERNEL*8, 0
17399 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
17400 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
17401
17402 +#ifdef CONFIG_PAX_PER_CPU_PGD
17403 +NEXT_PAGE(cpu_pgd)
17404 + .rept NR_CPUS
17405 + .fill 512,8,0
17406 + .endr
17407 +#endif
17408 +
17409 NEXT_PAGE(level3_ident_pgt)
17410 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17411 +#ifdef CONFIG_XEN
17412 .fill 511,8,0
17413 +#else
17414 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
17415 + .fill 510,8,0
17416 +#endif
17417 +
17418 +NEXT_PAGE(level3_vmalloc_start_pgt)
17419 + .fill 512,8,0
17420 +
17421 +NEXT_PAGE(level3_vmalloc_end_pgt)
17422 + .fill 512,8,0
17423 +
17424 +NEXT_PAGE(level3_vmemmap_pgt)
17425 + .fill L3_VMEMMAP_START,8,0
17426 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17427
17428 NEXT_PAGE(level3_kernel_pgt)
17429 .fill L3_START_KERNEL,8,0
17430 @@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
17431 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
17432 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17433
17434 +NEXT_PAGE(level2_vmemmap_pgt)
17435 + .fill 512,8,0
17436 +
17437 NEXT_PAGE(level2_fixmap_pgt)
17438 - .fill 506,8,0
17439 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17440 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
17441 - .fill 5,8,0
17442 + .fill 507,8,0
17443 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
17444 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
17445 + .fill 4,8,0
17446
17447 -NEXT_PAGE(level1_fixmap_pgt)
17448 +NEXT_PAGE(level1_vsyscall_pgt)
17449 .fill 512,8,0
17450
17451 -NEXT_PAGE(level2_ident_pgt)
17452 - /* Since I easily can, map the first 1G.
17453 + /* Since I easily can, map the first 2G.
17454 * Don't set NX because code runs from these pages.
17455 */
17456 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
17457 +NEXT_PAGE(level2_ident_pgt)
17458 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
17459
17460 NEXT_PAGE(level2_kernel_pgt)
17461 /*
17462 @@ -389,37 +429,59 @@ NEXT_PAGE(level2_kernel_pgt)
17463 * If you want to increase this then increase MODULES_VADDR
17464 * too.)
17465 */
17466 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
17467 - KERNEL_IMAGE_SIZE/PMD_SIZE)
17468 -
17469 -NEXT_PAGE(level2_spare_pgt)
17470 - .fill 512, 8, 0
17471 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
17472
17473 #undef PMDS
17474 #undef NEXT_PAGE
17475
17476 - .data
17477 + .align PAGE_SIZE
17478 +ENTRY(cpu_gdt_table)
17479 + .rept NR_CPUS
17480 + .quad 0x0000000000000000 /* NULL descriptor */
17481 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
17482 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
17483 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
17484 + .quad 0x00cffb000000ffff /* __USER32_CS */
17485 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
17486 + .quad 0x00affb000000ffff /* __USER_CS */
17487 +
17488 +#ifdef CONFIG_PAX_KERNEXEC
17489 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
17490 +#else
17491 + .quad 0x0 /* unused */
17492 +#endif
17493 +
17494 + .quad 0,0 /* TSS */
17495 + .quad 0,0 /* LDT */
17496 + .quad 0,0,0 /* three TLS descriptors */
17497 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
17498 + /* asm/segment.h:GDT_ENTRIES must match this */
17499 +
17500 + /* zero the remaining page */
17501 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
17502 + .endr
17503 +
17504 .align 16
17505 .globl early_gdt_descr
17506 early_gdt_descr:
17507 .word GDT_ENTRIES*8-1
17508 early_gdt_descr_base:
17509 - .quad INIT_PER_CPU_VAR(gdt_page)
17510 + .quad cpu_gdt_table
17511
17512 ENTRY(phys_base)
17513 /* This must match the first entry in level2_kernel_pgt */
17514 .quad 0x0000000000000000
17515
17516 #include "../../x86/xen/xen-head.S"
17517 -
17518 - .section .bss, "aw", @nobits
17519 +
17520 + .section .rodata,"a",@progbits
17521 .align L1_CACHE_BYTES
17522 ENTRY(idt_table)
17523 - .skip IDT_ENTRIES * 16
17524 + .fill 512,8,0
17525
17526 .align L1_CACHE_BYTES
17527 ENTRY(nmi_idt_table)
17528 - .skip IDT_ENTRIES * 16
17529 + .fill 512,8,0
17530
17531 __PAGE_ALIGNED_BSS
17532 .align PAGE_SIZE
17533 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
17534 index 9c3bd4a..e1d9b35 100644
17535 --- a/arch/x86/kernel/i386_ksyms_32.c
17536 +++ b/arch/x86/kernel/i386_ksyms_32.c
17537 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
17538 EXPORT_SYMBOL(cmpxchg8b_emu);
17539 #endif
17540
17541 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
17542 +
17543 /* Networking helper routines. */
17544 EXPORT_SYMBOL(csum_partial_copy_generic);
17545 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
17546 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
17547
17548 EXPORT_SYMBOL(__get_user_1);
17549 EXPORT_SYMBOL(__get_user_2);
17550 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
17551
17552 EXPORT_SYMBOL(csum_partial);
17553 EXPORT_SYMBOL(empty_zero_page);
17554 +
17555 +#ifdef CONFIG_PAX_KERNEXEC
17556 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
17557 +#endif
17558 diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
17559 index 2d6e649..df6e1af 100644
17560 --- a/arch/x86/kernel/i387.c
17561 +++ b/arch/x86/kernel/i387.c
17562 @@ -59,7 +59,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
17563 static inline bool interrupted_user_mode(void)
17564 {
17565 struct pt_regs *regs = get_irq_regs();
17566 - return regs && user_mode_vm(regs);
17567 + return regs && user_mode(regs);
17568 }
17569
17570 /*
17571 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
17572 index 36d1853..bf25736 100644
17573 --- a/arch/x86/kernel/i8259.c
17574 +++ b/arch/x86/kernel/i8259.c
17575 @@ -209,7 +209,7 @@ spurious_8259A_irq:
17576 "spurious 8259A interrupt: IRQ%d.\n", irq);
17577 spurious_irq_mask |= irqmask;
17578 }
17579 - atomic_inc(&irq_err_count);
17580 + atomic_inc_unchecked(&irq_err_count);
17581 /*
17582 * Theoretically we do not have to handle this IRQ,
17583 * but in Linux this does not cause problems and is
17584 diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
17585 index 43e9ccf..44ccf6f 100644
17586 --- a/arch/x86/kernel/init_task.c
17587 +++ b/arch/x86/kernel/init_task.c
17588 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17589 * way process stacks are handled. This is done by having a special
17590 * "init_task" linker map entry..
17591 */
17592 -union thread_union init_thread_union __init_task_data =
17593 - { INIT_THREAD_INFO(init_task) };
17594 +union thread_union init_thread_union __init_task_data;
17595
17596 /*
17597 * Initial task structure.
17598 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
17599 * section. Since TSS's are completely CPU-local, we want them
17600 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
17601 */
17602 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
17603 -
17604 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
17605 +EXPORT_SYMBOL(init_tss);
17606 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
17607 index 8c96897..be66bfa 100644
17608 --- a/arch/x86/kernel/ioport.c
17609 +++ b/arch/x86/kernel/ioport.c
17610 @@ -6,6 +6,7 @@
17611 #include <linux/sched.h>
17612 #include <linux/kernel.h>
17613 #include <linux/capability.h>
17614 +#include <linux/security.h>
17615 #include <linux/errno.h>
17616 #include <linux/types.h>
17617 #include <linux/ioport.h>
17618 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17619
17620 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
17621 return -EINVAL;
17622 +#ifdef CONFIG_GRKERNSEC_IO
17623 + if (turn_on && grsec_disable_privio) {
17624 + gr_handle_ioperm();
17625 + return -EPERM;
17626 + }
17627 +#endif
17628 if (turn_on && !capable(CAP_SYS_RAWIO))
17629 return -EPERM;
17630
17631 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17632 * because the ->io_bitmap_max value must match the bitmap
17633 * contents:
17634 */
17635 - tss = &per_cpu(init_tss, get_cpu());
17636 + tss = init_tss + get_cpu();
17637
17638 if (turn_on)
17639 bitmap_clear(t->io_bitmap_ptr, from, num);
17640 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
17641 return -EINVAL;
17642 /* Trying to gain more privileges? */
17643 if (level > old) {
17644 +#ifdef CONFIG_GRKERNSEC_IO
17645 + if (grsec_disable_privio) {
17646 + gr_handle_iopl();
17647 + return -EPERM;
17648 + }
17649 +#endif
17650 if (!capable(CAP_SYS_RAWIO))
17651 return -EPERM;
17652 }
17653 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
17654 index 3dafc60..aa8e9c4 100644
17655 --- a/arch/x86/kernel/irq.c
17656 +++ b/arch/x86/kernel/irq.c
17657 @@ -18,7 +18,7 @@
17658 #include <asm/mce.h>
17659 #include <asm/hw_irq.h>
17660
17661 -atomic_t irq_err_count;
17662 +atomic_unchecked_t irq_err_count;
17663
17664 /* Function pointer for generic interrupt vector handling */
17665 void (*x86_platform_ipi_callback)(void) = NULL;
17666 @@ -121,9 +121,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
17667 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
17668 seq_printf(p, " Machine check polls\n");
17669 #endif
17670 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
17671 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
17672 #if defined(CONFIG_X86_IO_APIC)
17673 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
17674 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
17675 #endif
17676 return 0;
17677 }
17678 @@ -164,10 +164,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
17679
17680 u64 arch_irq_stat(void)
17681 {
17682 - u64 sum = atomic_read(&irq_err_count);
17683 + u64 sum = atomic_read_unchecked(&irq_err_count);
17684
17685 #ifdef CONFIG_X86_IO_APIC
17686 - sum += atomic_read(&irq_mis_count);
17687 + sum += atomic_read_unchecked(&irq_mis_count);
17688 #endif
17689 return sum;
17690 }
17691 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
17692 index 58b7f27..e112d08 100644
17693 --- a/arch/x86/kernel/irq_32.c
17694 +++ b/arch/x86/kernel/irq_32.c
17695 @@ -39,7 +39,7 @@ static int check_stack_overflow(void)
17696 __asm__ __volatile__("andl %%esp,%0" :
17697 "=r" (sp) : "0" (THREAD_SIZE - 1));
17698
17699 - return sp < (sizeof(struct thread_info) + STACK_WARN);
17700 + return sp < STACK_WARN;
17701 }
17702
17703 static void print_stack_overflow(void)
17704 @@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
17705 * per-CPU IRQ handling contexts (thread information and stack)
17706 */
17707 union irq_ctx {
17708 - struct thread_info tinfo;
17709 - u32 stack[THREAD_SIZE/sizeof(u32)];
17710 + unsigned long previous_esp;
17711 + u32 stack[THREAD_SIZE/sizeof(u32)];
17712 } __attribute__((aligned(THREAD_SIZE)));
17713
17714 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
17715 @@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
17716 static inline int
17717 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17718 {
17719 - union irq_ctx *curctx, *irqctx;
17720 + union irq_ctx *irqctx;
17721 u32 *isp, arg1, arg2;
17722
17723 - curctx = (union irq_ctx *) current_thread_info();
17724 irqctx = __this_cpu_read(hardirq_ctx);
17725
17726 /*
17727 @@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17728 * handler) we can't do that and just have to keep using the
17729 * current stack (which is the irq stack already after all)
17730 */
17731 - if (unlikely(curctx == irqctx))
17732 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
17733 return 0;
17734
17735 /* build the stack frame on the IRQ stack */
17736 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17737 - irqctx->tinfo.task = curctx->tinfo.task;
17738 - irqctx->tinfo.previous_esp = current_stack_pointer;
17739 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17740 + irqctx->previous_esp = current_stack_pointer;
17741
17742 - /* Copy the preempt_count so that the [soft]irq checks work. */
17743 - irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
17744 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17745 + __set_fs(MAKE_MM_SEG(0));
17746 +#endif
17747
17748 if (unlikely(overflow))
17749 call_on_stack(print_stack_overflow, isp);
17750 @@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17751 : "0" (irq), "1" (desc), "2" (isp),
17752 "D" (desc->handle_irq)
17753 : "memory", "cc", "ecx");
17754 +
17755 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17756 + __set_fs(current_thread_info()->addr_limit);
17757 +#endif
17758 +
17759 return 1;
17760 }
17761
17762 @@ -121,29 +125,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17763 */
17764 void __cpuinit irq_ctx_init(int cpu)
17765 {
17766 - union irq_ctx *irqctx;
17767 -
17768 if (per_cpu(hardirq_ctx, cpu))
17769 return;
17770
17771 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17772 - THREAD_FLAGS,
17773 - THREAD_ORDER));
17774 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17775 - irqctx->tinfo.cpu = cpu;
17776 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
17777 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17778 -
17779 - per_cpu(hardirq_ctx, cpu) = irqctx;
17780 -
17781 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17782 - THREAD_FLAGS,
17783 - THREAD_ORDER));
17784 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17785 - irqctx->tinfo.cpu = cpu;
17786 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17787 -
17788 - per_cpu(softirq_ctx, cpu) = irqctx;
17789 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17790 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17791
17792 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
17793 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
17794 @@ -152,7 +138,6 @@ void __cpuinit irq_ctx_init(int cpu)
17795 asmlinkage void do_softirq(void)
17796 {
17797 unsigned long flags;
17798 - struct thread_info *curctx;
17799 union irq_ctx *irqctx;
17800 u32 *isp;
17801
17802 @@ -162,15 +147,22 @@ asmlinkage void do_softirq(void)
17803 local_irq_save(flags);
17804
17805 if (local_softirq_pending()) {
17806 - curctx = current_thread_info();
17807 irqctx = __this_cpu_read(softirq_ctx);
17808 - irqctx->tinfo.task = curctx->task;
17809 - irqctx->tinfo.previous_esp = current_stack_pointer;
17810 + irqctx->previous_esp = current_stack_pointer;
17811
17812 /* build the stack frame on the softirq stack */
17813 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17814 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17815 +
17816 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17817 + __set_fs(MAKE_MM_SEG(0));
17818 +#endif
17819
17820 call_on_stack(__do_softirq, isp);
17821 +
17822 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17823 + __set_fs(current_thread_info()->addr_limit);
17824 +#endif
17825 +
17826 /*
17827 * Shouldn't happen, we returned above if in_interrupt():
17828 */
17829 @@ -191,7 +183,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
17830 if (unlikely(!desc))
17831 return false;
17832
17833 - if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
17834 + if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
17835 if (unlikely(overflow))
17836 print_stack_overflow();
17837 desc->handle_irq(irq, desc);
17838 diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
17839 index d04d3ec..ea4b374 100644
17840 --- a/arch/x86/kernel/irq_64.c
17841 +++ b/arch/x86/kernel/irq_64.c
17842 @@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
17843 u64 estack_top, estack_bottom;
17844 u64 curbase = (u64)task_stack_page(current);
17845
17846 - if (user_mode_vm(regs))
17847 + if (user_mode(regs))
17848 return;
17849
17850 if (regs->sp >= curbase + sizeof(struct thread_info) +
17851 diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
17852 index 1d5d31e..ab846ed 100644
17853 --- a/arch/x86/kernel/kdebugfs.c
17854 +++ b/arch/x86/kernel/kdebugfs.c
17855 @@ -28,6 +28,8 @@ struct setup_data_node {
17856 };
17857
17858 static ssize_t setup_data_read(struct file *file, char __user *user_buf,
17859 + size_t count, loff_t *ppos) __size_overflow(3);
17860 +static ssize_t setup_data_read(struct file *file, char __user *user_buf,
17861 size_t count, loff_t *ppos)
17862 {
17863 struct setup_data_node *node = file->private_data;
17864 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
17865 index 8bfb614..2b3b35f 100644
17866 --- a/arch/x86/kernel/kgdb.c
17867 +++ b/arch/x86/kernel/kgdb.c
17868 @@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
17869 #ifdef CONFIG_X86_32
17870 switch (regno) {
17871 case GDB_SS:
17872 - if (!user_mode_vm(regs))
17873 + if (!user_mode(regs))
17874 *(unsigned long *)mem = __KERNEL_DS;
17875 break;
17876 case GDB_SP:
17877 - if (!user_mode_vm(regs))
17878 + if (!user_mode(regs))
17879 *(unsigned long *)mem = kernel_stack_pointer(regs);
17880 break;
17881 case GDB_GS:
17882 @@ -476,12 +476,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
17883 case 'k':
17884 /* clear the trace bit */
17885 linux_regs->flags &= ~X86_EFLAGS_TF;
17886 - atomic_set(&kgdb_cpu_doing_single_step, -1);
17887 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
17888
17889 /* set the trace bit if we're stepping */
17890 if (remcomInBuffer[0] == 's') {
17891 linux_regs->flags |= X86_EFLAGS_TF;
17892 - atomic_set(&kgdb_cpu_doing_single_step,
17893 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
17894 raw_smp_processor_id());
17895 }
17896
17897 @@ -546,7 +546,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
17898
17899 switch (cmd) {
17900 case DIE_DEBUG:
17901 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
17902 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
17903 if (user_mode(regs))
17904 return single_step_cont(regs, args);
17905 break;
17906 diff --git a/arch/x86/kernel/kprobes-opt.c b/arch/x86/kernel/kprobes-opt.c
17907 index c5e410e..da6aaf9 100644
17908 --- a/arch/x86/kernel/kprobes-opt.c
17909 +++ b/arch/x86/kernel/kprobes-opt.c
17910 @@ -338,7 +338,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17911 * Verify if the address gap is in 2GB range, because this uses
17912 * a relative jump.
17913 */
17914 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
17915 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
17916 if (abs(rel) > 0x7fffffff)
17917 return -ERANGE;
17918
17919 @@ -359,11 +359,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17920 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
17921
17922 /* Set probe function call */
17923 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
17924 + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
17925
17926 /* Set returning jmp instruction at the tail of out-of-line buffer */
17927 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
17928 - (u8 *)op->kp.addr + op->optinsn.size);
17929 + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
17930
17931 flush_icache_range((unsigned long) buf,
17932 (unsigned long) buf + TMPL_END_IDX +
17933 @@ -385,7 +385,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
17934 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
17935
17936 /* Backup instructions which will be replaced by jump address */
17937 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
17938 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
17939 RELATIVE_ADDR_SIZE);
17940
17941 insn_buf[0] = RELATIVEJUMP_OPCODE;
17942 diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
17943 index e213fc8..d783ba4 100644
17944 --- a/arch/x86/kernel/kprobes.c
17945 +++ b/arch/x86/kernel/kprobes.c
17946 @@ -120,8 +120,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
17947 } __attribute__((packed)) *insn;
17948
17949 insn = (struct __arch_relative_insn *)from;
17950 +
17951 + pax_open_kernel();
17952 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
17953 insn->op = op;
17954 + pax_close_kernel();
17955 }
17956
17957 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
17958 @@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
17959 kprobe_opcode_t opcode;
17960 kprobe_opcode_t *orig_opcodes = opcodes;
17961
17962 - if (search_exception_tables((unsigned long)opcodes))
17963 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
17964 return 0; /* Page fault may occur on this address. */
17965
17966 retry:
17967 @@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
17968 /* Another subsystem puts a breakpoint, failed to recover */
17969 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
17970 return 0;
17971 + pax_open_kernel();
17972 memcpy(dest, insn.kaddr, insn.length);
17973 + pax_close_kernel();
17974
17975 #ifdef CONFIG_X86_64
17976 if (insn_rip_relative(&insn)) {
17977 @@ -355,7 +360,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
17978 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
17979 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
17980 disp = (u8 *) dest + insn_offset_displacement(&insn);
17981 + pax_open_kernel();
17982 *(s32 *) disp = (s32) newdisp;
17983 + pax_close_kernel();
17984 }
17985 #endif
17986 return insn.length;
17987 @@ -485,7 +492,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
17988 * nor set current_kprobe, because it doesn't use single
17989 * stepping.
17990 */
17991 - regs->ip = (unsigned long)p->ainsn.insn;
17992 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17993 preempt_enable_no_resched();
17994 return;
17995 }
17996 @@ -504,7 +511,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
17997 if (p->opcode == BREAKPOINT_INSTRUCTION)
17998 regs->ip = (unsigned long)p->addr;
17999 else
18000 - regs->ip = (unsigned long)p->ainsn.insn;
18001 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
18002 }
18003
18004 /*
18005 @@ -583,7 +590,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
18006 setup_singlestep(p, regs, kcb, 0);
18007 return 1;
18008 }
18009 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
18010 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
18011 /*
18012 * The breakpoint instruction was removed right
18013 * after we hit it. Another cpu has removed
18014 @@ -628,6 +635,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
18015 " movq %rax, 152(%rsp)\n"
18016 RESTORE_REGS_STRING
18017 " popfq\n"
18018 +#ifdef KERNEXEC_PLUGIN
18019 + " btsq $63,(%rsp)\n"
18020 +#endif
18021 #else
18022 " pushf\n"
18023 SAVE_REGS_STRING
18024 @@ -765,7 +775,7 @@ static void __kprobes
18025 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
18026 {
18027 unsigned long *tos = stack_addr(regs);
18028 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
18029 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
18030 unsigned long orig_ip = (unsigned long)p->addr;
18031 kprobe_opcode_t *insn = p->ainsn.insn;
18032
18033 @@ -947,7 +957,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
18034 struct die_args *args = data;
18035 int ret = NOTIFY_DONE;
18036
18037 - if (args->regs && user_mode_vm(args->regs))
18038 + if (args->regs && user_mode(args->regs))
18039 return ret;
18040
18041 switch (val) {
18042 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
18043 index ebc9873..1b9724b 100644
18044 --- a/arch/x86/kernel/ldt.c
18045 +++ b/arch/x86/kernel/ldt.c
18046 @@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
18047 if (reload) {
18048 #ifdef CONFIG_SMP
18049 preempt_disable();
18050 - load_LDT(pc);
18051 + load_LDT_nolock(pc);
18052 if (!cpumask_equal(mm_cpumask(current->mm),
18053 cpumask_of(smp_processor_id())))
18054 smp_call_function(flush_ldt, current->mm, 1);
18055 preempt_enable();
18056 #else
18057 - load_LDT(pc);
18058 + load_LDT_nolock(pc);
18059 #endif
18060 }
18061 if (oldsize) {
18062 @@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
18063 return err;
18064
18065 for (i = 0; i < old->size; i++)
18066 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
18067 + write_ldt_entry(new->ldt, i, old->ldt + i);
18068 return 0;
18069 }
18070
18071 @@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
18072 retval = copy_ldt(&mm->context, &old_mm->context);
18073 mutex_unlock(&old_mm->context.lock);
18074 }
18075 +
18076 + if (tsk == current) {
18077 + mm->context.vdso = 0;
18078 +
18079 +#ifdef CONFIG_X86_32
18080 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18081 + mm->context.user_cs_base = 0UL;
18082 + mm->context.user_cs_limit = ~0UL;
18083 +
18084 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
18085 + cpus_clear(mm->context.cpu_user_cs_mask);
18086 +#endif
18087 +
18088 +#endif
18089 +#endif
18090 +
18091 + }
18092 +
18093 return retval;
18094 }
18095
18096 @@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
18097 }
18098 }
18099
18100 +#ifdef CONFIG_PAX_SEGMEXEC
18101 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
18102 + error = -EINVAL;
18103 + goto out_unlock;
18104 + }
18105 +#endif
18106 +
18107 fill_ldt(&ldt, &ldt_info);
18108 if (oldmode)
18109 ldt.avl = 0;
18110 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
18111 index 5b19e4d..6476a76 100644
18112 --- a/arch/x86/kernel/machine_kexec_32.c
18113 +++ b/arch/x86/kernel/machine_kexec_32.c
18114 @@ -26,7 +26,7 @@
18115 #include <asm/cacheflush.h>
18116 #include <asm/debugreg.h>
18117
18118 -static void set_idt(void *newidt, __u16 limit)
18119 +static void set_idt(struct desc_struct *newidt, __u16 limit)
18120 {
18121 struct desc_ptr curidt;
18122
18123 @@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
18124 }
18125
18126
18127 -static void set_gdt(void *newgdt, __u16 limit)
18128 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
18129 {
18130 struct desc_ptr curgdt;
18131
18132 @@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
18133 }
18134
18135 control_page = page_address(image->control_code_page);
18136 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
18137 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
18138
18139 relocate_kernel_ptr = control_page;
18140 page_list[PA_CONTROL_PAGE] = __pa(control_page);
18141 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
18142 index 0327e2b..e43737b 100644
18143 --- a/arch/x86/kernel/microcode_intel.c
18144 +++ b/arch/x86/kernel/microcode_intel.c
18145 @@ -430,13 +430,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
18146
18147 static int get_ucode_user(void *to, const void *from, size_t n)
18148 {
18149 - return copy_from_user(to, from, n);
18150 + return copy_from_user(to, (const void __force_user *)from, n);
18151 }
18152
18153 static enum ucode_state
18154 request_microcode_user(int cpu, const void __user *buf, size_t size)
18155 {
18156 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
18157 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
18158 }
18159
18160 static void microcode_fini_cpu(int cpu)
18161 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
18162 index f21fd94..61565cd 100644
18163 --- a/arch/x86/kernel/module.c
18164 +++ b/arch/x86/kernel/module.c
18165 @@ -35,15 +35,60 @@
18166 #define DEBUGP(fmt...)
18167 #endif
18168
18169 -void *module_alloc(unsigned long size)
18170 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
18171 {
18172 - if (PAGE_ALIGN(size) > MODULES_LEN)
18173 + if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
18174 return NULL;
18175 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
18176 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
18177 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
18178 -1, __builtin_return_address(0));
18179 }
18180
18181 +void *module_alloc(unsigned long size)
18182 +{
18183 +
18184 +#ifdef CONFIG_PAX_KERNEXEC
18185 + return __module_alloc(size, PAGE_KERNEL);
18186 +#else
18187 + return __module_alloc(size, PAGE_KERNEL_EXEC);
18188 +#endif
18189 +
18190 +}
18191 +
18192 +#ifdef CONFIG_PAX_KERNEXEC
18193 +#ifdef CONFIG_X86_32
18194 +void *module_alloc_exec(unsigned long size)
18195 +{
18196 + struct vm_struct *area;
18197 +
18198 + if (size == 0)
18199 + return NULL;
18200 +
18201 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
18202 + return area ? area->addr : NULL;
18203 +}
18204 +EXPORT_SYMBOL(module_alloc_exec);
18205 +
18206 +void module_free_exec(struct module *mod, void *module_region)
18207 +{
18208 + vunmap(module_region);
18209 +}
18210 +EXPORT_SYMBOL(module_free_exec);
18211 +#else
18212 +void module_free_exec(struct module *mod, void *module_region)
18213 +{
18214 + module_free(mod, module_region);
18215 +}
18216 +EXPORT_SYMBOL(module_free_exec);
18217 +
18218 +void *module_alloc_exec(unsigned long size)
18219 +{
18220 + return __module_alloc(size, PAGE_KERNEL_RX);
18221 +}
18222 +EXPORT_SYMBOL(module_alloc_exec);
18223 +#endif
18224 +#endif
18225 +
18226 #ifdef CONFIG_X86_32
18227 int apply_relocate(Elf32_Shdr *sechdrs,
18228 const char *strtab,
18229 @@ -54,14 +99,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18230 unsigned int i;
18231 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
18232 Elf32_Sym *sym;
18233 - uint32_t *location;
18234 + uint32_t *plocation, location;
18235
18236 DEBUGP("Applying relocate section %u to %u\n", relsec,
18237 sechdrs[relsec].sh_info);
18238 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
18239 /* This is where to make the change */
18240 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
18241 - + rel[i].r_offset;
18242 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
18243 + location = (uint32_t)plocation;
18244 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
18245 + plocation = ktla_ktva((void *)plocation);
18246 /* This is the symbol it is referring to. Note that all
18247 undefined symbols have been resolved. */
18248 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
18249 @@ -70,11 +117,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18250 switch (ELF32_R_TYPE(rel[i].r_info)) {
18251 case R_386_32:
18252 /* We add the value into the location given */
18253 - *location += sym->st_value;
18254 + pax_open_kernel();
18255 + *plocation += sym->st_value;
18256 + pax_close_kernel();
18257 break;
18258 case R_386_PC32:
18259 /* Add the value, subtract its postition */
18260 - *location += sym->st_value - (uint32_t)location;
18261 + pax_open_kernel();
18262 + *plocation += sym->st_value - location;
18263 + pax_close_kernel();
18264 break;
18265 default:
18266 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
18267 @@ -119,21 +170,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
18268 case R_X86_64_NONE:
18269 break;
18270 case R_X86_64_64:
18271 + pax_open_kernel();
18272 *(u64 *)loc = val;
18273 + pax_close_kernel();
18274 break;
18275 case R_X86_64_32:
18276 + pax_open_kernel();
18277 *(u32 *)loc = val;
18278 + pax_close_kernel();
18279 if (val != *(u32 *)loc)
18280 goto overflow;
18281 break;
18282 case R_X86_64_32S:
18283 + pax_open_kernel();
18284 *(s32 *)loc = val;
18285 + pax_close_kernel();
18286 if ((s64)val != *(s32 *)loc)
18287 goto overflow;
18288 break;
18289 case R_X86_64_PC32:
18290 val -= (u64)loc;
18291 + pax_open_kernel();
18292 *(u32 *)loc = val;
18293 + pax_close_kernel();
18294 +
18295 #if 0
18296 if ((s64)val != *(s32 *)loc)
18297 goto overflow;
18298 diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
18299 index 32856fa..ce95eaa 100644
18300 --- a/arch/x86/kernel/nmi.c
18301 +++ b/arch/x86/kernel/nmi.c
18302 @@ -507,6 +507,17 @@ static inline void nmi_nesting_postprocess(void)
18303 dotraplinkage notrace __kprobes void
18304 do_nmi(struct pt_regs *regs, long error_code)
18305 {
18306 +
18307 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18308 + if (!user_mode(regs)) {
18309 + unsigned long cs = regs->cs & 0xFFFF;
18310 + unsigned long ip = ktva_ktla(regs->ip);
18311 +
18312 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
18313 + regs->ip = ip;
18314 + }
18315 +#endif
18316 +
18317 nmi_nesting_preprocess(regs);
18318
18319 nmi_enter();
18320 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
18321 index 676b8c7..870ba04 100644
18322 --- a/arch/x86/kernel/paravirt-spinlocks.c
18323 +++ b/arch/x86/kernel/paravirt-spinlocks.c
18324 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
18325 arch_spin_lock(lock);
18326 }
18327
18328 -struct pv_lock_ops pv_lock_ops = {
18329 +struct pv_lock_ops pv_lock_ops __read_only = {
18330 #ifdef CONFIG_SMP
18331 .spin_is_locked = __ticket_spin_is_locked,
18332 .spin_is_contended = __ticket_spin_is_contended,
18333 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
18334 index ab13760..01218e0 100644
18335 --- a/arch/x86/kernel/paravirt.c
18336 +++ b/arch/x86/kernel/paravirt.c
18337 @@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
18338 {
18339 return x;
18340 }
18341 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18342 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
18343 +#endif
18344
18345 void __init default_banner(void)
18346 {
18347 @@ -147,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
18348 if (opfunc == NULL)
18349 /* If there's no function, patch it with a ud2a (BUG) */
18350 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
18351 - else if (opfunc == _paravirt_nop)
18352 + else if (opfunc == (void *)_paravirt_nop)
18353 /* If the operation is a nop, then nop the callsite */
18354 ret = paravirt_patch_nop();
18355
18356 /* identity functions just return their single argument */
18357 - else if (opfunc == _paravirt_ident_32)
18358 + else if (opfunc == (void *)_paravirt_ident_32)
18359 ret = paravirt_patch_ident_32(insnbuf, len);
18360 - else if (opfunc == _paravirt_ident_64)
18361 + else if (opfunc == (void *)_paravirt_ident_64)
18362 ret = paravirt_patch_ident_64(insnbuf, len);
18363 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18364 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
18365 + ret = paravirt_patch_ident_64(insnbuf, len);
18366 +#endif
18367
18368 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
18369 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
18370 @@ -180,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
18371 if (insn_len > len || start == NULL)
18372 insn_len = len;
18373 else
18374 - memcpy(insnbuf, start, insn_len);
18375 + memcpy(insnbuf, ktla_ktva(start), insn_len);
18376
18377 return insn_len;
18378 }
18379 @@ -304,7 +311,7 @@ void arch_flush_lazy_mmu_mode(void)
18380 preempt_enable();
18381 }
18382
18383 -struct pv_info pv_info = {
18384 +struct pv_info pv_info __read_only = {
18385 .name = "bare hardware",
18386 .paravirt_enabled = 0,
18387 .kernel_rpl = 0,
18388 @@ -315,16 +322,16 @@ struct pv_info pv_info = {
18389 #endif
18390 };
18391
18392 -struct pv_init_ops pv_init_ops = {
18393 +struct pv_init_ops pv_init_ops __read_only = {
18394 .patch = native_patch,
18395 };
18396
18397 -struct pv_time_ops pv_time_ops = {
18398 +struct pv_time_ops pv_time_ops __read_only = {
18399 .sched_clock = native_sched_clock,
18400 .steal_clock = native_steal_clock,
18401 };
18402
18403 -struct pv_irq_ops pv_irq_ops = {
18404 +struct pv_irq_ops pv_irq_ops __read_only = {
18405 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
18406 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
18407 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
18408 @@ -336,7 +343,7 @@ struct pv_irq_ops pv_irq_ops = {
18409 #endif
18410 };
18411
18412 -struct pv_cpu_ops pv_cpu_ops = {
18413 +struct pv_cpu_ops pv_cpu_ops __read_only = {
18414 .cpuid = native_cpuid,
18415 .get_debugreg = native_get_debugreg,
18416 .set_debugreg = native_set_debugreg,
18417 @@ -397,21 +404,26 @@ struct pv_cpu_ops pv_cpu_ops = {
18418 .end_context_switch = paravirt_nop,
18419 };
18420
18421 -struct pv_apic_ops pv_apic_ops = {
18422 +struct pv_apic_ops pv_apic_ops __read_only = {
18423 #ifdef CONFIG_X86_LOCAL_APIC
18424 .startup_ipi_hook = paravirt_nop,
18425 #endif
18426 };
18427
18428 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
18429 +#ifdef CONFIG_X86_32
18430 +#ifdef CONFIG_X86_PAE
18431 +/* 64-bit pagetable entries */
18432 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
18433 +#else
18434 /* 32-bit pagetable entries */
18435 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
18436 +#endif
18437 #else
18438 /* 64-bit pagetable entries */
18439 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
18440 #endif
18441
18442 -struct pv_mmu_ops pv_mmu_ops = {
18443 +struct pv_mmu_ops pv_mmu_ops __read_only = {
18444
18445 .read_cr2 = native_read_cr2,
18446 .write_cr2 = native_write_cr2,
18447 @@ -461,6 +473,7 @@ struct pv_mmu_ops pv_mmu_ops = {
18448 .make_pud = PTE_IDENT,
18449
18450 .set_pgd = native_set_pgd,
18451 + .set_pgd_batched = native_set_pgd_batched,
18452 #endif
18453 #endif /* PAGETABLE_LEVELS >= 3 */
18454
18455 @@ -480,6 +493,12 @@ struct pv_mmu_ops pv_mmu_ops = {
18456 },
18457
18458 .set_fixmap = native_set_fixmap,
18459 +
18460 +#ifdef CONFIG_PAX_KERNEXEC
18461 + .pax_open_kernel = native_pax_open_kernel,
18462 + .pax_close_kernel = native_pax_close_kernel,
18463 +#endif
18464 +
18465 };
18466
18467 EXPORT_SYMBOL_GPL(pv_time_ops);
18468 diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
18469 index 35ccf75..7a15747 100644
18470 --- a/arch/x86/kernel/pci-iommu_table.c
18471 +++ b/arch/x86/kernel/pci-iommu_table.c
18472 @@ -2,7 +2,7 @@
18473 #include <asm/iommu_table.h>
18474 #include <linux/string.h>
18475 #include <linux/kallsyms.h>
18476 -
18477 +#include <linux/sched.h>
18478
18479 #define DEBUG 1
18480
18481 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
18482 index 1d92a5a..7bc8c29 100644
18483 --- a/arch/x86/kernel/process.c
18484 +++ b/arch/x86/kernel/process.c
18485 @@ -69,16 +69,33 @@ void free_thread_xstate(struct task_struct *tsk)
18486
18487 void free_thread_info(struct thread_info *ti)
18488 {
18489 - free_thread_xstate(ti->task);
18490 free_pages((unsigned long)ti, THREAD_ORDER);
18491 }
18492
18493 +static struct kmem_cache *task_struct_cachep;
18494 +
18495 void arch_task_cache_init(void)
18496 {
18497 - task_xstate_cachep =
18498 - kmem_cache_create("task_xstate", xstate_size,
18499 + /* create a slab on which task_structs can be allocated */
18500 + task_struct_cachep =
18501 + kmem_cache_create("task_struct", sizeof(struct task_struct),
18502 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
18503 +
18504 + task_xstate_cachep =
18505 + kmem_cache_create("task_xstate", xstate_size,
18506 __alignof__(union thread_xstate),
18507 - SLAB_PANIC | SLAB_NOTRACK, NULL);
18508 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
18509 +}
18510 +
18511 +struct task_struct *alloc_task_struct_node(int node)
18512 +{
18513 + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
18514 +}
18515 +
18516 +void free_task_struct(struct task_struct *task)
18517 +{
18518 + free_thread_xstate(task);
18519 + kmem_cache_free(task_struct_cachep, task);
18520 }
18521
18522 /*
18523 @@ -91,7 +108,7 @@ void exit_thread(void)
18524 unsigned long *bp = t->io_bitmap_ptr;
18525
18526 if (bp) {
18527 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
18528 + struct tss_struct *tss = init_tss + get_cpu();
18529
18530 t->io_bitmap_ptr = NULL;
18531 clear_thread_flag(TIF_IO_BITMAP);
18532 @@ -127,7 +144,7 @@ void show_regs_common(void)
18533
18534 printk(KERN_CONT "\n");
18535 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
18536 - current->pid, current->comm, print_tainted(),
18537 + task_pid_nr(current), current->comm, print_tainted(),
18538 init_utsname()->release,
18539 (int)strcspn(init_utsname()->version, " "),
18540 init_utsname()->version);
18541 @@ -141,6 +158,9 @@ void flush_thread(void)
18542 {
18543 struct task_struct *tsk = current;
18544
18545 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18546 + loadsegment(gs, 0);
18547 +#endif
18548 flush_ptrace_hw_breakpoint(tsk);
18549 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
18550 /*
18551 @@ -303,10 +323,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
18552 regs.di = (unsigned long) arg;
18553
18554 #ifdef CONFIG_X86_32
18555 - regs.ds = __USER_DS;
18556 - regs.es = __USER_DS;
18557 + regs.ds = __KERNEL_DS;
18558 + regs.es = __KERNEL_DS;
18559 regs.fs = __KERNEL_PERCPU;
18560 - regs.gs = __KERNEL_STACK_CANARY;
18561 + savesegment(gs, regs.gs);
18562 #else
18563 regs.ss = __KERNEL_DS;
18564 #endif
18565 @@ -392,7 +412,7 @@ static void __exit_idle(void)
18566 void exit_idle(void)
18567 {
18568 /* idle loop has pid 0 */
18569 - if (current->pid)
18570 + if (task_pid_nr(current))
18571 return;
18572 __exit_idle();
18573 }
18574 @@ -501,7 +521,7 @@ bool set_pm_idle_to_default(void)
18575
18576 return ret;
18577 }
18578 -void stop_this_cpu(void *dummy)
18579 +__noreturn void stop_this_cpu(void *dummy)
18580 {
18581 local_irq_disable();
18582 /*
18583 @@ -743,16 +763,37 @@ static int __init idle_setup(char *str)
18584 }
18585 early_param("idle", idle_setup);
18586
18587 -unsigned long arch_align_stack(unsigned long sp)
18588 +#ifdef CONFIG_PAX_RANDKSTACK
18589 +void pax_randomize_kstack(struct pt_regs *regs)
18590 {
18591 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
18592 - sp -= get_random_int() % 8192;
18593 - return sp & ~0xf;
18594 -}
18595 + struct thread_struct *thread = &current->thread;
18596 + unsigned long time;
18597
18598 -unsigned long arch_randomize_brk(struct mm_struct *mm)
18599 -{
18600 - unsigned long range_end = mm->brk + 0x02000000;
18601 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
18602 -}
18603 + if (!randomize_va_space)
18604 + return;
18605 +
18606 + if (v8086_mode(regs))
18607 + return;
18608
18609 + rdtscl(time);
18610 +
18611 + /* P4 seems to return a 0 LSB, ignore it */
18612 +#ifdef CONFIG_MPENTIUM4
18613 + time &= 0x3EUL;
18614 + time <<= 2;
18615 +#elif defined(CONFIG_X86_64)
18616 + time &= 0xFUL;
18617 + time <<= 4;
18618 +#else
18619 + time &= 0x1FUL;
18620 + time <<= 3;
18621 +#endif
18622 +
18623 + thread->sp0 ^= time;
18624 + load_sp0(init_tss + smp_processor_id(), thread);
18625 +
18626 +#ifdef CONFIG_X86_64
18627 + percpu_write(kernel_stack, thread->sp0);
18628 +#endif
18629 +}
18630 +#endif
18631 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
18632 index ae68473..7b0bb71 100644
18633 --- a/arch/x86/kernel/process_32.c
18634 +++ b/arch/x86/kernel/process_32.c
18635 @@ -64,6 +64,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
18636 unsigned long thread_saved_pc(struct task_struct *tsk)
18637 {
18638 return ((unsigned long *)tsk->thread.sp)[3];
18639 +//XXX return tsk->thread.eip;
18640 }
18641
18642 void __show_regs(struct pt_regs *regs, int all)
18643 @@ -73,15 +74,14 @@ void __show_regs(struct pt_regs *regs, int all)
18644 unsigned long sp;
18645 unsigned short ss, gs;
18646
18647 - if (user_mode_vm(regs)) {
18648 + if (user_mode(regs)) {
18649 sp = regs->sp;
18650 ss = regs->ss & 0xffff;
18651 - gs = get_user_gs(regs);
18652 } else {
18653 sp = kernel_stack_pointer(regs);
18654 savesegment(ss, ss);
18655 - savesegment(gs, gs);
18656 }
18657 + gs = get_user_gs(regs);
18658
18659 show_regs_common();
18660
18661 @@ -143,13 +143,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18662 struct task_struct *tsk;
18663 int err;
18664
18665 - childregs = task_pt_regs(p);
18666 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
18667 *childregs = *regs;
18668 childregs->ax = 0;
18669 childregs->sp = sp;
18670
18671 p->thread.sp = (unsigned long) childregs;
18672 p->thread.sp0 = (unsigned long) (childregs+1);
18673 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18674
18675 p->thread.ip = (unsigned long) ret_from_fork;
18676
18677 @@ -240,7 +241,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18678 struct thread_struct *prev = &prev_p->thread,
18679 *next = &next_p->thread;
18680 int cpu = smp_processor_id();
18681 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
18682 + struct tss_struct *tss = init_tss + cpu;
18683 fpu_switch_t fpu;
18684
18685 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
18686 @@ -264,6 +265,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18687 */
18688 lazy_save_gs(prev->gs);
18689
18690 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18691 + __set_fs(task_thread_info(next_p)->addr_limit);
18692 +#endif
18693 +
18694 /*
18695 * Load the per-thread Thread-Local Storage descriptor.
18696 */
18697 @@ -294,6 +299,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18698 */
18699 arch_end_context_switch(next_p);
18700
18701 + percpu_write(current_task, next_p);
18702 + percpu_write(current_tinfo, &next_p->tinfo);
18703 +
18704 /*
18705 * Restore %gs if needed (which is common)
18706 */
18707 @@ -302,8 +310,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18708
18709 switch_fpu_finish(next_p, fpu);
18710
18711 - percpu_write(current_task, next_p);
18712 -
18713 return prev_p;
18714 }
18715
18716 @@ -333,4 +339,3 @@ unsigned long get_wchan(struct task_struct *p)
18717 } while (count++ < 16);
18718 return 0;
18719 }
18720 -
18721 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
18722 index 43d8b48..c45d566 100644
18723 --- a/arch/x86/kernel/process_64.c
18724 +++ b/arch/x86/kernel/process_64.c
18725 @@ -162,8 +162,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18726 struct pt_regs *childregs;
18727 struct task_struct *me = current;
18728
18729 - childregs = ((struct pt_regs *)
18730 - (THREAD_SIZE + task_stack_page(p))) - 1;
18731 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
18732 *childregs = *regs;
18733
18734 childregs->ax = 0;
18735 @@ -175,6 +174,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18736 p->thread.sp = (unsigned long) childregs;
18737 p->thread.sp0 = (unsigned long) (childregs+1);
18738 p->thread.usersp = me->thread.usersp;
18739 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18740
18741 set_tsk_thread_flag(p, TIF_FORK);
18742
18743 @@ -280,7 +280,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18744 struct thread_struct *prev = &prev_p->thread;
18745 struct thread_struct *next = &next_p->thread;
18746 int cpu = smp_processor_id();
18747 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
18748 + struct tss_struct *tss = init_tss + cpu;
18749 unsigned fsindex, gsindex;
18750 fpu_switch_t fpu;
18751
18752 @@ -362,10 +362,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18753 prev->usersp = percpu_read(old_rsp);
18754 percpu_write(old_rsp, next->usersp);
18755 percpu_write(current_task, next_p);
18756 + percpu_write(current_tinfo, &next_p->tinfo);
18757
18758 - percpu_write(kernel_stack,
18759 - (unsigned long)task_stack_page(next_p) +
18760 - THREAD_SIZE - KERNEL_STACK_OFFSET);
18761 + percpu_write(kernel_stack, next->sp0);
18762
18763 /*
18764 * Now maybe reload the debug registers and handle I/O bitmaps
18765 @@ -434,12 +433,11 @@ unsigned long get_wchan(struct task_struct *p)
18766 if (!p || p == current || p->state == TASK_RUNNING)
18767 return 0;
18768 stack = (unsigned long)task_stack_page(p);
18769 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
18770 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
18771 return 0;
18772 fp = *(u64 *)(p->thread.sp);
18773 do {
18774 - if (fp < (unsigned long)stack ||
18775 - fp >= (unsigned long)stack+THREAD_SIZE)
18776 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
18777 return 0;
18778 ip = *(u64 *)(fp+8);
18779 if (!in_sched_functions(ip))
18780 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
18781 index cf11783..e7ce551 100644
18782 --- a/arch/x86/kernel/ptrace.c
18783 +++ b/arch/x86/kernel/ptrace.c
18784 @@ -824,7 +824,7 @@ long arch_ptrace(struct task_struct *child, long request,
18785 unsigned long addr, unsigned long data)
18786 {
18787 int ret;
18788 - unsigned long __user *datap = (unsigned long __user *)data;
18789 + unsigned long __user *datap = (__force unsigned long __user *)data;
18790
18791 switch (request) {
18792 /* read the word at location addr in the USER area. */
18793 @@ -909,14 +909,14 @@ long arch_ptrace(struct task_struct *child, long request,
18794 if ((int) addr < 0)
18795 return -EIO;
18796 ret = do_get_thread_area(child, addr,
18797 - (struct user_desc __user *)data);
18798 + (__force struct user_desc __user *) data);
18799 break;
18800
18801 case PTRACE_SET_THREAD_AREA:
18802 if ((int) addr < 0)
18803 return -EIO;
18804 ret = do_set_thread_area(child, addr,
18805 - (struct user_desc __user *)data, 0);
18806 + (__force struct user_desc __user *) data, 0);
18807 break;
18808 #endif
18809
18810 @@ -1426,7 +1426,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
18811 memset(info, 0, sizeof(*info));
18812 info->si_signo = SIGTRAP;
18813 info->si_code = si_code;
18814 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
18815 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
18816 }
18817
18818 void user_single_step_siginfo(struct task_struct *tsk,
18819 @@ -1455,6 +1455,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
18820 # define IS_IA32 0
18821 #endif
18822
18823 +#ifdef CONFIG_GRKERNSEC_SETXID
18824 +extern void gr_delayed_cred_worker(void);
18825 +#endif
18826 +
18827 /*
18828 * We must return the syscall number to actually look up in the table.
18829 * This can be -1L to skip running any syscall at all.
18830 @@ -1463,6 +1467,11 @@ long syscall_trace_enter(struct pt_regs *regs)
18831 {
18832 long ret = 0;
18833
18834 +#ifdef CONFIG_GRKERNSEC_SETXID
18835 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
18836 + gr_delayed_cred_worker();
18837 +#endif
18838 +
18839 /*
18840 * If we stepped into a sysenter/syscall insn, it trapped in
18841 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
18842 @@ -1506,6 +1515,11 @@ void syscall_trace_leave(struct pt_regs *regs)
18843 {
18844 bool step;
18845
18846 +#ifdef CONFIG_GRKERNSEC_SETXID
18847 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
18848 + gr_delayed_cred_worker();
18849 +#endif
18850 +
18851 audit_syscall_exit(regs);
18852
18853 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
18854 diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
18855 index 42eb330..139955c 100644
18856 --- a/arch/x86/kernel/pvclock.c
18857 +++ b/arch/x86/kernel/pvclock.c
18858 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
18859 return pv_tsc_khz;
18860 }
18861
18862 -static atomic64_t last_value = ATOMIC64_INIT(0);
18863 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
18864
18865 void pvclock_resume(void)
18866 {
18867 - atomic64_set(&last_value, 0);
18868 + atomic64_set_unchecked(&last_value, 0);
18869 }
18870
18871 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18872 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
18873 * updating at the same time, and one of them could be slightly behind,
18874 * making the assumption that last_value always go forward fail to hold.
18875 */
18876 - last = atomic64_read(&last_value);
18877 + last = atomic64_read_unchecked(&last_value);
18878 do {
18879 if (ret < last)
18880 return last;
18881 - last = atomic64_cmpxchg(&last_value, last, ret);
18882 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
18883 } while (unlikely(last != ret));
18884
18885 return ret;
18886 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
18887 index 3034ee5..7cfbfa6 100644
18888 --- a/arch/x86/kernel/reboot.c
18889 +++ b/arch/x86/kernel/reboot.c
18890 @@ -35,7 +35,7 @@ void (*pm_power_off)(void);
18891 EXPORT_SYMBOL(pm_power_off);
18892
18893 static const struct desc_ptr no_idt = {};
18894 -static int reboot_mode;
18895 +static unsigned short reboot_mode;
18896 enum reboot_type reboot_type = BOOT_ACPI;
18897 int reboot_force;
18898
18899 @@ -335,13 +335,17 @@ core_initcall(reboot_init);
18900 extern const unsigned char machine_real_restart_asm[];
18901 extern const u64 machine_real_restart_gdt[3];
18902
18903 -void machine_real_restart(unsigned int type)
18904 +__noreturn void machine_real_restart(unsigned int type)
18905 {
18906 void *restart_va;
18907 unsigned long restart_pa;
18908 - void (*restart_lowmem)(unsigned int);
18909 + void (* __noreturn restart_lowmem)(unsigned int);
18910 u64 *lowmem_gdt;
18911
18912 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18913 + struct desc_struct *gdt;
18914 +#endif
18915 +
18916 local_irq_disable();
18917
18918 /* Write zero to CMOS register number 0x0f, which the BIOS POST
18919 @@ -367,14 +371,14 @@ void machine_real_restart(unsigned int type)
18920 boot)". This seems like a fairly standard thing that gets set by
18921 REBOOT.COM programs, and the previous reset routine did this
18922 too. */
18923 - *((unsigned short *)0x472) = reboot_mode;
18924 + *(unsigned short *)(__va(0x472)) = reboot_mode;
18925
18926 /* Patch the GDT in the low memory trampoline */
18927 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
18928
18929 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
18930 restart_pa = virt_to_phys(restart_va);
18931 - restart_lowmem = (void (*)(unsigned int))restart_pa;
18932 + restart_lowmem = (void *)restart_pa;
18933
18934 /* GDT[0]: GDT self-pointer */
18935 lowmem_gdt[0] =
18936 @@ -385,7 +389,33 @@ void machine_real_restart(unsigned int type)
18937 GDT_ENTRY(0x009b, restart_pa, 0xffff);
18938
18939 /* Jump to the identity-mapped low memory code */
18940 +
18941 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18942 + gdt = get_cpu_gdt_table(smp_processor_id());
18943 + pax_open_kernel();
18944 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18945 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
18946 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
18947 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
18948 +#endif
18949 +#ifdef CONFIG_PAX_KERNEXEC
18950 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
18951 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
18952 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
18953 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
18954 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
18955 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
18956 +#endif
18957 + pax_close_kernel();
18958 +#endif
18959 +
18960 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18961 + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
18962 + unreachable();
18963 +#else
18964 restart_lowmem(type);
18965 +#endif
18966 +
18967 }
18968 #ifdef CONFIG_APM_MODULE
18969 EXPORT_SYMBOL(machine_real_restart);
18970 @@ -564,7 +594,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
18971 * try to force a triple fault and then cycle between hitting the keyboard
18972 * controller and doing that
18973 */
18974 -static void native_machine_emergency_restart(void)
18975 +__noreturn static void native_machine_emergency_restart(void)
18976 {
18977 int i;
18978 int attempt = 0;
18979 @@ -688,13 +718,13 @@ void native_machine_shutdown(void)
18980 #endif
18981 }
18982
18983 -static void __machine_emergency_restart(int emergency)
18984 +static __noreturn void __machine_emergency_restart(int emergency)
18985 {
18986 reboot_emergency = emergency;
18987 machine_ops.emergency_restart();
18988 }
18989
18990 -static void native_machine_restart(char *__unused)
18991 +static __noreturn void native_machine_restart(char *__unused)
18992 {
18993 printk("machine restart\n");
18994
18995 @@ -703,7 +733,7 @@ static void native_machine_restart(char *__unused)
18996 __machine_emergency_restart(0);
18997 }
18998
18999 -static void native_machine_halt(void)
19000 +static __noreturn void native_machine_halt(void)
19001 {
19002 /* stop other cpus and apics */
19003 machine_shutdown();
19004 @@ -714,7 +744,7 @@ static void native_machine_halt(void)
19005 stop_this_cpu(NULL);
19006 }
19007
19008 -static void native_machine_power_off(void)
19009 +__noreturn static void native_machine_power_off(void)
19010 {
19011 if (pm_power_off) {
19012 if (!reboot_force)
19013 @@ -723,6 +753,7 @@ static void native_machine_power_off(void)
19014 }
19015 /* a fallback in case there is no PM info available */
19016 tboot_shutdown(TB_SHUTDOWN_HALT);
19017 + unreachable();
19018 }
19019
19020 struct machine_ops machine_ops = {
19021 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
19022 index 7a6f3b3..bed145d7 100644
19023 --- a/arch/x86/kernel/relocate_kernel_64.S
19024 +++ b/arch/x86/kernel/relocate_kernel_64.S
19025 @@ -11,6 +11,7 @@
19026 #include <asm/kexec.h>
19027 #include <asm/processor-flags.h>
19028 #include <asm/pgtable_types.h>
19029 +#include <asm/alternative-asm.h>
19030
19031 /*
19032 * Must be relocatable PIC code callable as a C function
19033 @@ -160,13 +161,14 @@ identity_mapped:
19034 xorq %rbp, %rbp
19035 xorq %r8, %r8
19036 xorq %r9, %r9
19037 - xorq %r10, %r9
19038 + xorq %r10, %r10
19039 xorq %r11, %r11
19040 xorq %r12, %r12
19041 xorq %r13, %r13
19042 xorq %r14, %r14
19043 xorq %r15, %r15
19044
19045 + pax_force_retaddr 0, 1
19046 ret
19047
19048 1:
19049 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
19050 index 1a29015..712f324 100644
19051 --- a/arch/x86/kernel/setup.c
19052 +++ b/arch/x86/kernel/setup.c
19053 @@ -447,7 +447,7 @@ static void __init parse_setup_data(void)
19054
19055 switch (data->type) {
19056 case SETUP_E820_EXT:
19057 - parse_e820_ext(data);
19058 + parse_e820_ext((struct setup_data __force_kernel *)data);
19059 break;
19060 case SETUP_DTB:
19061 add_dtb(pa_data);
19062 @@ -639,7 +639,7 @@ static void __init trim_bios_range(void)
19063 * area (640->1Mb) as ram even though it is not.
19064 * take them out.
19065 */
19066 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
19067 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
19068 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
19069 }
19070
19071 @@ -763,14 +763,14 @@ void __init setup_arch(char **cmdline_p)
19072
19073 if (!boot_params.hdr.root_flags)
19074 root_mountflags &= ~MS_RDONLY;
19075 - init_mm.start_code = (unsigned long) _text;
19076 - init_mm.end_code = (unsigned long) _etext;
19077 + init_mm.start_code = ktla_ktva((unsigned long) _text);
19078 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
19079 init_mm.end_data = (unsigned long) _edata;
19080 init_mm.brk = _brk_end;
19081
19082 - code_resource.start = virt_to_phys(_text);
19083 - code_resource.end = virt_to_phys(_etext)-1;
19084 - data_resource.start = virt_to_phys(_etext);
19085 + code_resource.start = virt_to_phys(ktla_ktva(_text));
19086 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
19087 + data_resource.start = virt_to_phys(_sdata);
19088 data_resource.end = virt_to_phys(_edata)-1;
19089 bss_resource.start = virt_to_phys(&__bss_start);
19090 bss_resource.end = virt_to_phys(&__bss_stop)-1;
19091 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
19092 index 5a98aa2..2f9288d 100644
19093 --- a/arch/x86/kernel/setup_percpu.c
19094 +++ b/arch/x86/kernel/setup_percpu.c
19095 @@ -21,19 +21,17 @@
19096 #include <asm/cpu.h>
19097 #include <asm/stackprotector.h>
19098
19099 -DEFINE_PER_CPU(int, cpu_number);
19100 +#ifdef CONFIG_SMP
19101 +DEFINE_PER_CPU(unsigned int, cpu_number);
19102 EXPORT_PER_CPU_SYMBOL(cpu_number);
19103 +#endif
19104
19105 -#ifdef CONFIG_X86_64
19106 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
19107 -#else
19108 -#define BOOT_PERCPU_OFFSET 0
19109 -#endif
19110
19111 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
19112 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
19113
19114 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
19115 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
19116 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
19117 };
19118 EXPORT_SYMBOL(__per_cpu_offset);
19119 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
19120 {
19121 #ifdef CONFIG_X86_32
19122 struct desc_struct gdt;
19123 + unsigned long base = per_cpu_offset(cpu);
19124
19125 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
19126 - 0x2 | DESCTYPE_S, 0x8);
19127 - gdt.s = 1;
19128 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
19129 + 0x83 | DESCTYPE_S, 0xC);
19130 write_gdt_entry(get_cpu_gdt_table(cpu),
19131 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
19132 #endif
19133 @@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
19134 /* alrighty, percpu areas up and running */
19135 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
19136 for_each_possible_cpu(cpu) {
19137 +#ifdef CONFIG_CC_STACKPROTECTOR
19138 +#ifdef CONFIG_X86_32
19139 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
19140 +#endif
19141 +#endif
19142 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
19143 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
19144 per_cpu(cpu_number, cpu) = cpu;
19145 @@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
19146 */
19147 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
19148 #endif
19149 +#ifdef CONFIG_CC_STACKPROTECTOR
19150 +#ifdef CONFIG_X86_32
19151 + if (!cpu)
19152 + per_cpu(stack_canary.canary, cpu) = canary;
19153 +#endif
19154 +#endif
19155 /*
19156 * Up to this point, the boot CPU has been using .init.data
19157 * area. Reload any changed state for the boot CPU.
19158 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
19159 index 115eac4..c0591d5 100644
19160 --- a/arch/x86/kernel/signal.c
19161 +++ b/arch/x86/kernel/signal.c
19162 @@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
19163 * Align the stack pointer according to the i386 ABI,
19164 * i.e. so that on function entry ((sp + 4) & 15) == 0.
19165 */
19166 - sp = ((sp + 4) & -16ul) - 4;
19167 + sp = ((sp - 12) & -16ul) - 4;
19168 #else /* !CONFIG_X86_32 */
19169 sp = round_down(sp, 16) - 8;
19170 #endif
19171 @@ -241,11 +241,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
19172 * Return an always-bogus address instead so we will die with SIGSEGV.
19173 */
19174 if (onsigstack && !likely(on_sig_stack(sp)))
19175 - return (void __user *)-1L;
19176 + return (__force void __user *)-1L;
19177
19178 /* save i387 state */
19179 if (used_math() && save_i387_xstate(*fpstate) < 0)
19180 - return (void __user *)-1L;
19181 + return (__force void __user *)-1L;
19182
19183 return (void __user *)sp;
19184 }
19185 @@ -300,9 +300,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19186 }
19187
19188 if (current->mm->context.vdso)
19189 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19190 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19191 else
19192 - restorer = &frame->retcode;
19193 + restorer = (void __user *)&frame->retcode;
19194 if (ka->sa.sa_flags & SA_RESTORER)
19195 restorer = ka->sa.sa_restorer;
19196
19197 @@ -316,7 +316,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19198 * reasons and because gdb uses it as a signature to notice
19199 * signal handler stack frames.
19200 */
19201 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
19202 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
19203
19204 if (err)
19205 return -EFAULT;
19206 @@ -370,7 +370,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19207 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
19208
19209 /* Set up to return from userspace. */
19210 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19211 + if (current->mm->context.vdso)
19212 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19213 + else
19214 + restorer = (void __user *)&frame->retcode;
19215 if (ka->sa.sa_flags & SA_RESTORER)
19216 restorer = ka->sa.sa_restorer;
19217 put_user_ex(restorer, &frame->pretcode);
19218 @@ -382,7 +385,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19219 * reasons and because gdb uses it as a signature to notice
19220 * signal handler stack frames.
19221 */
19222 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
19223 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
19224 } put_user_catch(err);
19225
19226 if (err)
19227 @@ -773,7 +776,7 @@ static void do_signal(struct pt_regs *regs)
19228 * X86_32: vm86 regs switched out by assembly code before reaching
19229 * here, so testing against kernel CS suffices.
19230 */
19231 - if (!user_mode(regs))
19232 + if (!user_mode_novm(regs))
19233 return;
19234
19235 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
19236 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
19237 index 6e1e406..edfb7cb 100644
19238 --- a/arch/x86/kernel/smpboot.c
19239 +++ b/arch/x86/kernel/smpboot.c
19240 @@ -699,17 +699,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
19241 set_idle_for_cpu(cpu, c_idle.idle);
19242 do_rest:
19243 per_cpu(current_task, cpu) = c_idle.idle;
19244 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
19245 #ifdef CONFIG_X86_32
19246 /* Stack for startup_32 can be just as for start_secondary onwards */
19247 irq_ctx_init(cpu);
19248 #else
19249 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
19250 initial_gs = per_cpu_offset(cpu);
19251 - per_cpu(kernel_stack, cpu) =
19252 - (unsigned long)task_stack_page(c_idle.idle) -
19253 - KERNEL_STACK_OFFSET + THREAD_SIZE;
19254 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
19255 #endif
19256 +
19257 + pax_open_kernel();
19258 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
19259 + pax_close_kernel();
19260 +
19261 initial_code = (unsigned long)start_secondary;
19262 stack_start = c_idle.idle->thread.sp;
19263
19264 @@ -851,6 +854,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
19265
19266 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
19267
19268 +#ifdef CONFIG_PAX_PER_CPU_PGD
19269 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
19270 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19271 + KERNEL_PGD_PTRS);
19272 +#endif
19273 +
19274 err = do_boot_cpu(apicid, cpu);
19275 if (err) {
19276 pr_debug("do_boot_cpu failed %d\n", err);
19277 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
19278 index c346d11..d43b163 100644
19279 --- a/arch/x86/kernel/step.c
19280 +++ b/arch/x86/kernel/step.c
19281 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19282 struct desc_struct *desc;
19283 unsigned long base;
19284
19285 - seg &= ~7UL;
19286 + seg >>= 3;
19287
19288 mutex_lock(&child->mm->context.lock);
19289 - if (unlikely((seg >> 3) >= child->mm->context.size))
19290 + if (unlikely(seg >= child->mm->context.size))
19291 addr = -1L; /* bogus selector, access would fault */
19292 else {
19293 desc = child->mm->context.ldt + seg;
19294 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19295 addr += base;
19296 }
19297 mutex_unlock(&child->mm->context.lock);
19298 - }
19299 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
19300 + addr = ktla_ktva(addr);
19301
19302 return addr;
19303 }
19304 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19305 unsigned char opcode[15];
19306 unsigned long addr = convert_ip_to_linear(child, regs);
19307
19308 + if (addr == -EINVAL)
19309 + return 0;
19310 +
19311 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
19312 for (i = 0; i < copied; i++) {
19313 switch (opcode[i]) {
19314 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
19315 index 0b0cb5f..db6b9ed 100644
19316 --- a/arch/x86/kernel/sys_i386_32.c
19317 +++ b/arch/x86/kernel/sys_i386_32.c
19318 @@ -24,17 +24,224 @@
19319
19320 #include <asm/syscalls.h>
19321
19322 -/*
19323 - * Do a system call from kernel instead of calling sys_execve so we
19324 - * end up with proper pt_regs.
19325 - */
19326 -int kernel_execve(const char *filename,
19327 - const char *const argv[],
19328 - const char *const envp[])
19329 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
19330 {
19331 - long __res;
19332 - asm volatile ("int $0x80"
19333 - : "=a" (__res)
19334 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
19335 - return __res;
19336 + unsigned long pax_task_size = TASK_SIZE;
19337 +
19338 +#ifdef CONFIG_PAX_SEGMEXEC
19339 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
19340 + pax_task_size = SEGMEXEC_TASK_SIZE;
19341 +#endif
19342 +
19343 + if (len > pax_task_size || addr > pax_task_size - len)
19344 + return -EINVAL;
19345 +
19346 + return 0;
19347 +}
19348 +
19349 +unsigned long
19350 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
19351 + unsigned long len, unsigned long pgoff, unsigned long flags)
19352 +{
19353 + struct mm_struct *mm = current->mm;
19354 + struct vm_area_struct *vma;
19355 + unsigned long start_addr, pax_task_size = TASK_SIZE;
19356 +
19357 +#ifdef CONFIG_PAX_SEGMEXEC
19358 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19359 + pax_task_size = SEGMEXEC_TASK_SIZE;
19360 +#endif
19361 +
19362 + pax_task_size -= PAGE_SIZE;
19363 +
19364 + if (len > pax_task_size)
19365 + return -ENOMEM;
19366 +
19367 + if (flags & MAP_FIXED)
19368 + return addr;
19369 +
19370 +#ifdef CONFIG_PAX_RANDMMAP
19371 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19372 +#endif
19373 +
19374 + if (addr) {
19375 + addr = PAGE_ALIGN(addr);
19376 + if (pax_task_size - len >= addr) {
19377 + vma = find_vma(mm, addr);
19378 + if (check_heap_stack_gap(vma, addr, len))
19379 + return addr;
19380 + }
19381 + }
19382 + if (len > mm->cached_hole_size) {
19383 + start_addr = addr = mm->free_area_cache;
19384 + } else {
19385 + start_addr = addr = mm->mmap_base;
19386 + mm->cached_hole_size = 0;
19387 + }
19388 +
19389 +#ifdef CONFIG_PAX_PAGEEXEC
19390 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
19391 + start_addr = 0x00110000UL;
19392 +
19393 +#ifdef CONFIG_PAX_RANDMMAP
19394 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19395 + start_addr += mm->delta_mmap & 0x03FFF000UL;
19396 +#endif
19397 +
19398 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
19399 + start_addr = addr = mm->mmap_base;
19400 + else
19401 + addr = start_addr;
19402 + }
19403 +#endif
19404 +
19405 +full_search:
19406 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19407 + /* At this point: (!vma || addr < vma->vm_end). */
19408 + if (pax_task_size - len < addr) {
19409 + /*
19410 + * Start a new search - just in case we missed
19411 + * some holes.
19412 + */
19413 + if (start_addr != mm->mmap_base) {
19414 + start_addr = addr = mm->mmap_base;
19415 + mm->cached_hole_size = 0;
19416 + goto full_search;
19417 + }
19418 + return -ENOMEM;
19419 + }
19420 + if (check_heap_stack_gap(vma, addr, len))
19421 + break;
19422 + if (addr + mm->cached_hole_size < vma->vm_start)
19423 + mm->cached_hole_size = vma->vm_start - addr;
19424 + addr = vma->vm_end;
19425 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
19426 + start_addr = addr = mm->mmap_base;
19427 + mm->cached_hole_size = 0;
19428 + goto full_search;
19429 + }
19430 + }
19431 +
19432 + /*
19433 + * Remember the place where we stopped the search:
19434 + */
19435 + mm->free_area_cache = addr + len;
19436 + return addr;
19437 +}
19438 +
19439 +unsigned long
19440 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19441 + const unsigned long len, const unsigned long pgoff,
19442 + const unsigned long flags)
19443 +{
19444 + struct vm_area_struct *vma;
19445 + struct mm_struct *mm = current->mm;
19446 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
19447 +
19448 +#ifdef CONFIG_PAX_SEGMEXEC
19449 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19450 + pax_task_size = SEGMEXEC_TASK_SIZE;
19451 +#endif
19452 +
19453 + pax_task_size -= PAGE_SIZE;
19454 +
19455 + /* requested length too big for entire address space */
19456 + if (len > pax_task_size)
19457 + return -ENOMEM;
19458 +
19459 + if (flags & MAP_FIXED)
19460 + return addr;
19461 +
19462 +#ifdef CONFIG_PAX_PAGEEXEC
19463 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
19464 + goto bottomup;
19465 +#endif
19466 +
19467 +#ifdef CONFIG_PAX_RANDMMAP
19468 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19469 +#endif
19470 +
19471 + /* requesting a specific address */
19472 + if (addr) {
19473 + addr = PAGE_ALIGN(addr);
19474 + if (pax_task_size - len >= addr) {
19475 + vma = find_vma(mm, addr);
19476 + if (check_heap_stack_gap(vma, addr, len))
19477 + return addr;
19478 + }
19479 + }
19480 +
19481 + /* check if free_area_cache is useful for us */
19482 + if (len <= mm->cached_hole_size) {
19483 + mm->cached_hole_size = 0;
19484 + mm->free_area_cache = mm->mmap_base;
19485 + }
19486 +
19487 + /* either no address requested or can't fit in requested address hole */
19488 + addr = mm->free_area_cache;
19489 +
19490 + /* make sure it can fit in the remaining address space */
19491 + if (addr > len) {
19492 + vma = find_vma(mm, addr-len);
19493 + if (check_heap_stack_gap(vma, addr - len, len))
19494 + /* remember the address as a hint for next time */
19495 + return (mm->free_area_cache = addr-len);
19496 + }
19497 +
19498 + if (mm->mmap_base < len)
19499 + goto bottomup;
19500 +
19501 + addr = mm->mmap_base-len;
19502 +
19503 + do {
19504 + /*
19505 + * Lookup failure means no vma is above this address,
19506 + * else if new region fits below vma->vm_start,
19507 + * return with success:
19508 + */
19509 + vma = find_vma(mm, addr);
19510 + if (check_heap_stack_gap(vma, addr, len))
19511 + /* remember the address as a hint for next time */
19512 + return (mm->free_area_cache = addr);
19513 +
19514 + /* remember the largest hole we saw so far */
19515 + if (addr + mm->cached_hole_size < vma->vm_start)
19516 + mm->cached_hole_size = vma->vm_start - addr;
19517 +
19518 + /* try just below the current vma->vm_start */
19519 + addr = skip_heap_stack_gap(vma, len);
19520 + } while (!IS_ERR_VALUE(addr));
19521 +
19522 +bottomup:
19523 + /*
19524 + * A failed mmap() very likely causes application failure,
19525 + * so fall back to the bottom-up function here. This scenario
19526 + * can happen with large stack limits and large mmap()
19527 + * allocations.
19528 + */
19529 +
19530 +#ifdef CONFIG_PAX_SEGMEXEC
19531 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19532 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19533 + else
19534 +#endif
19535 +
19536 + mm->mmap_base = TASK_UNMAPPED_BASE;
19537 +
19538 +#ifdef CONFIG_PAX_RANDMMAP
19539 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19540 + mm->mmap_base += mm->delta_mmap;
19541 +#endif
19542 +
19543 + mm->free_area_cache = mm->mmap_base;
19544 + mm->cached_hole_size = ~0UL;
19545 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19546 + /*
19547 + * Restore the topdown base:
19548 + */
19549 + mm->mmap_base = base;
19550 + mm->free_area_cache = base;
19551 + mm->cached_hole_size = ~0UL;
19552 +
19553 + return addr;
19554 }
19555 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
19556 index b4d3c39..82bb73b 100644
19557 --- a/arch/x86/kernel/sys_x86_64.c
19558 +++ b/arch/x86/kernel/sys_x86_64.c
19559 @@ -95,8 +95,8 @@ out:
19560 return error;
19561 }
19562
19563 -static void find_start_end(unsigned long flags, unsigned long *begin,
19564 - unsigned long *end)
19565 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
19566 + unsigned long *begin, unsigned long *end)
19567 {
19568 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
19569 unsigned long new_begin;
19570 @@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
19571 *begin = new_begin;
19572 }
19573 } else {
19574 - *begin = TASK_UNMAPPED_BASE;
19575 + *begin = mm->mmap_base;
19576 *end = TASK_SIZE;
19577 }
19578 }
19579 @@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
19580 if (flags & MAP_FIXED)
19581 return addr;
19582
19583 - find_start_end(flags, &begin, &end);
19584 + find_start_end(mm, flags, &begin, &end);
19585
19586 if (len > end)
19587 return -ENOMEM;
19588
19589 +#ifdef CONFIG_PAX_RANDMMAP
19590 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19591 +#endif
19592 +
19593 if (addr) {
19594 addr = PAGE_ALIGN(addr);
19595 vma = find_vma(mm, addr);
19596 - if (end - len >= addr &&
19597 - (!vma || addr + len <= vma->vm_start))
19598 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
19599 return addr;
19600 }
19601 if (((flags & MAP_32BIT) || test_thread_flag(TIF_ADDR32))
19602 @@ -172,7 +175,7 @@ full_search:
19603 }
19604 return -ENOMEM;
19605 }
19606 - if (!vma || addr + len <= vma->vm_start) {
19607 + if (check_heap_stack_gap(vma, addr, len)) {
19608 /*
19609 * Remember the place where we stopped the search:
19610 */
19611 @@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19612 {
19613 struct vm_area_struct *vma;
19614 struct mm_struct *mm = current->mm;
19615 - unsigned long addr = addr0, start_addr;
19616 + unsigned long base = mm->mmap_base, addr = addr0, start_addr;
19617
19618 /* requested length too big for entire address space */
19619 if (len > TASK_SIZE)
19620 @@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19621 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
19622 goto bottomup;
19623
19624 +#ifdef CONFIG_PAX_RANDMMAP
19625 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19626 +#endif
19627 +
19628 /* requesting a specific address */
19629 if (addr) {
19630 addr = PAGE_ALIGN(addr);
19631 - vma = find_vma(mm, addr);
19632 - if (TASK_SIZE - len >= addr &&
19633 - (!vma || addr + len <= vma->vm_start))
19634 - return addr;
19635 + if (TASK_SIZE - len >= addr) {
19636 + vma = find_vma(mm, addr);
19637 + if (check_heap_stack_gap(vma, addr, len))
19638 + return addr;
19639 + }
19640 }
19641
19642 /* check if free_area_cache is useful for us */
19643 @@ -240,7 +248,7 @@ try_again:
19644 * return with success:
19645 */
19646 vma = find_vma(mm, addr);
19647 - if (!vma || addr+len <= vma->vm_start)
19648 + if (check_heap_stack_gap(vma, addr, len))
19649 /* remember the address as a hint for next time */
19650 return mm->free_area_cache = addr;
19651
19652 @@ -249,8 +257,8 @@ try_again:
19653 mm->cached_hole_size = vma->vm_start - addr;
19654
19655 /* try just below the current vma->vm_start */
19656 - addr = vma->vm_start-len;
19657 - } while (len < vma->vm_start);
19658 + addr = skip_heap_stack_gap(vma, len);
19659 + } while (!IS_ERR_VALUE(addr));
19660
19661 fail:
19662 /*
19663 @@ -270,13 +278,21 @@ bottomup:
19664 * can happen with large stack limits and large mmap()
19665 * allocations.
19666 */
19667 + mm->mmap_base = TASK_UNMAPPED_BASE;
19668 +
19669 +#ifdef CONFIG_PAX_RANDMMAP
19670 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19671 + mm->mmap_base += mm->delta_mmap;
19672 +#endif
19673 +
19674 + mm->free_area_cache = mm->mmap_base;
19675 mm->cached_hole_size = ~0UL;
19676 - mm->free_area_cache = TASK_UNMAPPED_BASE;
19677 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19678 /*
19679 * Restore the topdown base:
19680 */
19681 - mm->free_area_cache = mm->mmap_base;
19682 + mm->mmap_base = base;
19683 + mm->free_area_cache = base;
19684 mm->cached_hole_size = ~0UL;
19685
19686 return addr;
19687 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
19688 index 6410744..79758f0 100644
19689 --- a/arch/x86/kernel/tboot.c
19690 +++ b/arch/x86/kernel/tboot.c
19691 @@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
19692
19693 void tboot_shutdown(u32 shutdown_type)
19694 {
19695 - void (*shutdown)(void);
19696 + void (* __noreturn shutdown)(void);
19697
19698 if (!tboot_enabled())
19699 return;
19700 @@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
19701
19702 switch_to_tboot_pt();
19703
19704 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
19705 + shutdown = (void *)tboot->shutdown_entry;
19706 shutdown();
19707
19708 /* should not reach here */
19709 @@ -299,7 +299,7 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
19710 return 0;
19711 }
19712
19713 -static atomic_t ap_wfs_count;
19714 +static atomic_unchecked_t ap_wfs_count;
19715
19716 static int tboot_wait_for_aps(int num_aps)
19717 {
19718 @@ -323,9 +323,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
19719 {
19720 switch (action) {
19721 case CPU_DYING:
19722 - atomic_inc(&ap_wfs_count);
19723 + atomic_inc_unchecked(&ap_wfs_count);
19724 if (num_online_cpus() == 1)
19725 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
19726 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
19727 return NOTIFY_BAD;
19728 break;
19729 }
19730 @@ -344,7 +344,7 @@ static __init int tboot_late_init(void)
19731
19732 tboot_create_trampoline();
19733
19734 - atomic_set(&ap_wfs_count, 0);
19735 + atomic_set_unchecked(&ap_wfs_count, 0);
19736 register_hotcpu_notifier(&tboot_cpu_notifier);
19737
19738 acpi_os_set_prepare_sleep(&tboot_sleep);
19739 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
19740 index c6eba2b..3303326 100644
19741 --- a/arch/x86/kernel/time.c
19742 +++ b/arch/x86/kernel/time.c
19743 @@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs)
19744 {
19745 unsigned long pc = instruction_pointer(regs);
19746
19747 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
19748 + if (!user_mode(regs) && in_lock_functions(pc)) {
19749 #ifdef CONFIG_FRAME_POINTER
19750 - return *(unsigned long *)(regs->bp + sizeof(long));
19751 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
19752 #else
19753 unsigned long *sp =
19754 (unsigned long *)kernel_stack_pointer(regs);
19755 @@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs)
19756 * or above a saved flags. Eflags has bits 22-31 zero,
19757 * kernel addresses don't.
19758 */
19759 +
19760 +#ifdef CONFIG_PAX_KERNEXEC
19761 + return ktla_ktva(sp[0]);
19762 +#else
19763 if (sp[0] >> 22)
19764 return sp[0];
19765 if (sp[1] >> 22)
19766 return sp[1];
19767 #endif
19768 +
19769 +#endif
19770 }
19771 return pc;
19772 }
19773 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
19774 index 9d9d2f9..ed344e4 100644
19775 --- a/arch/x86/kernel/tls.c
19776 +++ b/arch/x86/kernel/tls.c
19777 @@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
19778 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
19779 return -EINVAL;
19780
19781 +#ifdef CONFIG_PAX_SEGMEXEC
19782 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
19783 + return -EINVAL;
19784 +#endif
19785 +
19786 set_tls_desc(p, idx, &info, 1);
19787
19788 return 0;
19789 diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
19790 index 451c0a7..e57f551 100644
19791 --- a/arch/x86/kernel/trampoline_32.S
19792 +++ b/arch/x86/kernel/trampoline_32.S
19793 @@ -32,6 +32,12 @@
19794 #include <asm/segment.h>
19795 #include <asm/page_types.h>
19796
19797 +#ifdef CONFIG_PAX_KERNEXEC
19798 +#define ta(X) (X)
19799 +#else
19800 +#define ta(X) ((X) - __PAGE_OFFSET)
19801 +#endif
19802 +
19803 #ifdef CONFIG_SMP
19804
19805 .section ".x86_trampoline","a"
19806 @@ -62,7 +68,7 @@ r_base = .
19807 inc %ax # protected mode (PE) bit
19808 lmsw %ax # into protected mode
19809 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
19810 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
19811 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
19812
19813 # These need to be in the same 64K segment as the above;
19814 # hence we don't use the boot_gdt_descr defined in head.S
19815 diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
19816 index 09ff517..df19fbff 100644
19817 --- a/arch/x86/kernel/trampoline_64.S
19818 +++ b/arch/x86/kernel/trampoline_64.S
19819 @@ -90,7 +90,7 @@ startup_32:
19820 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
19821 movl %eax, %ds
19822
19823 - movl $X86_CR4_PAE, %eax
19824 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
19825 movl %eax, %cr4 # Enable PAE mode
19826
19827 # Setup trampoline 4 level pagetables
19828 @@ -138,7 +138,7 @@ tidt:
19829 # so the kernel can live anywhere
19830 .balign 4
19831 tgdt:
19832 - .short tgdt_end - tgdt # gdt limit
19833 + .short tgdt_end - tgdt - 1 # gdt limit
19834 .long tgdt - r_base
19835 .short 0
19836 .quad 0x00cf9b000000ffff # __KERNEL32_CS
19837 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
19838 index ff9281f1..30cb4ac 100644
19839 --- a/arch/x86/kernel/traps.c
19840 +++ b/arch/x86/kernel/traps.c
19841 @@ -70,12 +70,6 @@ asmlinkage int system_call(void);
19842
19843 /* Do we ignore FPU interrupts ? */
19844 char ignore_fpu_irq;
19845 -
19846 -/*
19847 - * The IDT has to be page-aligned to simplify the Pentium
19848 - * F0 0F bug workaround.
19849 - */
19850 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
19851 #endif
19852
19853 DECLARE_BITMAP(used_vectors, NR_VECTORS);
19854 @@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
19855 }
19856
19857 static void __kprobes
19858 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19859 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
19860 long error_code, siginfo_t *info)
19861 {
19862 struct task_struct *tsk = current;
19863
19864 #ifdef CONFIG_X86_32
19865 - if (regs->flags & X86_VM_MASK) {
19866 + if (v8086_mode(regs)) {
19867 /*
19868 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
19869 * On nmi (interrupt 2), do_trap should not be called.
19870 @@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19871 }
19872 #endif
19873
19874 - if (!user_mode(regs))
19875 + if (!user_mode_novm(regs))
19876 goto kernel_trap;
19877
19878 #ifdef CONFIG_X86_32
19879 @@ -148,7 +142,7 @@ trap_signal:
19880 printk_ratelimit()) {
19881 printk(KERN_INFO
19882 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
19883 - tsk->comm, tsk->pid, str,
19884 + tsk->comm, task_pid_nr(tsk), str,
19885 regs->ip, regs->sp, error_code);
19886 print_vma_addr(" in ", regs->ip);
19887 printk("\n");
19888 @@ -165,8 +159,20 @@ kernel_trap:
19889 if (!fixup_exception(regs)) {
19890 tsk->thread.error_code = error_code;
19891 tsk->thread.trap_nr = trapnr;
19892 +
19893 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19894 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
19895 + str = "PAX: suspicious stack segment fault";
19896 +#endif
19897 +
19898 die(str, regs, error_code);
19899 }
19900 +
19901 +#ifdef CONFIG_PAX_REFCOUNT
19902 + if (trapnr == 4)
19903 + pax_report_refcount_overflow(regs);
19904 +#endif
19905 +
19906 return;
19907
19908 #ifdef CONFIG_X86_32
19909 @@ -259,14 +265,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
19910 conditional_sti(regs);
19911
19912 #ifdef CONFIG_X86_32
19913 - if (regs->flags & X86_VM_MASK)
19914 + if (v8086_mode(regs))
19915 goto gp_in_vm86;
19916 #endif
19917
19918 tsk = current;
19919 - if (!user_mode(regs))
19920 + if (!user_mode_novm(regs))
19921 goto gp_in_kernel;
19922
19923 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19924 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
19925 + struct mm_struct *mm = tsk->mm;
19926 + unsigned long limit;
19927 +
19928 + down_write(&mm->mmap_sem);
19929 + limit = mm->context.user_cs_limit;
19930 + if (limit < TASK_SIZE) {
19931 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
19932 + up_write(&mm->mmap_sem);
19933 + return;
19934 + }
19935 + up_write(&mm->mmap_sem);
19936 + }
19937 +#endif
19938 +
19939 tsk->thread.error_code = error_code;
19940 tsk->thread.trap_nr = X86_TRAP_GP;
19941
19942 @@ -299,6 +321,13 @@ gp_in_kernel:
19943 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
19944 X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP)
19945 return;
19946 +
19947 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19948 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
19949 + die("PAX: suspicious general protection fault", regs, error_code);
19950 + else
19951 +#endif
19952 +
19953 die("general protection fault", regs, error_code);
19954 }
19955
19956 @@ -425,7 +454,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19957 /* It's safe to allow irq's after DR6 has been saved */
19958 preempt_conditional_sti(regs);
19959
19960 - if (regs->flags & X86_VM_MASK) {
19961 + if (v8086_mode(regs)) {
19962 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
19963 X86_TRAP_DB);
19964 preempt_conditional_cli(regs);
19965 @@ -440,7 +469,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
19966 * We already checked v86 mode above, so we can check for kernel mode
19967 * by just checking the CPL of CS.
19968 */
19969 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
19970 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
19971 tsk->thread.debugreg6 &= ~DR_STEP;
19972 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
19973 regs->flags &= ~X86_EFLAGS_TF;
19974 @@ -471,7 +500,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
19975 return;
19976 conditional_sti(regs);
19977
19978 - if (!user_mode_vm(regs))
19979 + if (!user_mode(regs))
19980 {
19981 if (!fixup_exception(regs)) {
19982 task->thread.error_code = error_code;
19983 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
19984 index b9242ba..50c5edd 100644
19985 --- a/arch/x86/kernel/verify_cpu.S
19986 +++ b/arch/x86/kernel/verify_cpu.S
19987 @@ -20,6 +20,7 @@
19988 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
19989 * arch/x86/kernel/trampoline_64.S: secondary processor verification
19990 * arch/x86/kernel/head_32.S: processor startup
19991 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
19992 *
19993 * verify_cpu, returns the status of longmode and SSE in register %eax.
19994 * 0: Success 1: Failure
19995 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
19996 index 255f58a..5e91150 100644
19997 --- a/arch/x86/kernel/vm86_32.c
19998 +++ b/arch/x86/kernel/vm86_32.c
19999 @@ -41,6 +41,7 @@
20000 #include <linux/ptrace.h>
20001 #include <linux/audit.h>
20002 #include <linux/stddef.h>
20003 +#include <linux/grsecurity.h>
20004
20005 #include <asm/uaccess.h>
20006 #include <asm/io.h>
20007 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
20008 do_exit(SIGSEGV);
20009 }
20010
20011 - tss = &per_cpu(init_tss, get_cpu());
20012 + tss = init_tss + get_cpu();
20013 current->thread.sp0 = current->thread.saved_sp0;
20014 current->thread.sysenter_cs = __KERNEL_CS;
20015 load_sp0(tss, &current->thread);
20016 @@ -210,6 +211,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
20017 struct task_struct *tsk;
20018 int tmp, ret = -EPERM;
20019
20020 +#ifdef CONFIG_GRKERNSEC_VM86
20021 + if (!capable(CAP_SYS_RAWIO)) {
20022 + gr_handle_vm86();
20023 + goto out;
20024 + }
20025 +#endif
20026 +
20027 tsk = current;
20028 if (tsk->thread.saved_sp0)
20029 goto out;
20030 @@ -240,6 +248,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
20031 int tmp, ret;
20032 struct vm86plus_struct __user *v86;
20033
20034 +#ifdef CONFIG_GRKERNSEC_VM86
20035 + if (!capable(CAP_SYS_RAWIO)) {
20036 + gr_handle_vm86();
20037 + ret = -EPERM;
20038 + goto out;
20039 + }
20040 +#endif
20041 +
20042 tsk = current;
20043 switch (cmd) {
20044 case VM86_REQUEST_IRQ:
20045 @@ -326,7 +342,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
20046 tsk->thread.saved_fs = info->regs32->fs;
20047 tsk->thread.saved_gs = get_user_gs(info->regs32);
20048
20049 - tss = &per_cpu(init_tss, get_cpu());
20050 + tss = init_tss + get_cpu();
20051 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
20052 if (cpu_has_sep)
20053 tsk->thread.sysenter_cs = 0;
20054 @@ -533,7 +549,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
20055 goto cannot_handle;
20056 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
20057 goto cannot_handle;
20058 - intr_ptr = (unsigned long __user *) (i << 2);
20059 + intr_ptr = (__force unsigned long __user *) (i << 2);
20060 if (get_user(segoffs, intr_ptr))
20061 goto cannot_handle;
20062 if ((segoffs >> 16) == BIOSSEG)
20063 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
20064 index 0f703f1..9e15f64 100644
20065 --- a/arch/x86/kernel/vmlinux.lds.S
20066 +++ b/arch/x86/kernel/vmlinux.lds.S
20067 @@ -26,6 +26,13 @@
20068 #include <asm/page_types.h>
20069 #include <asm/cache.h>
20070 #include <asm/boot.h>
20071 +#include <asm/segment.h>
20072 +
20073 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20074 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
20075 +#else
20076 +#define __KERNEL_TEXT_OFFSET 0
20077 +#endif
20078
20079 #undef i386 /* in case the preprocessor is a 32bit one */
20080
20081 @@ -69,30 +76,43 @@ jiffies_64 = jiffies;
20082
20083 PHDRS {
20084 text PT_LOAD FLAGS(5); /* R_E */
20085 +#ifdef CONFIG_X86_32
20086 + module PT_LOAD FLAGS(5); /* R_E */
20087 +#endif
20088 +#ifdef CONFIG_XEN
20089 + rodata PT_LOAD FLAGS(5); /* R_E */
20090 +#else
20091 + rodata PT_LOAD FLAGS(4); /* R__ */
20092 +#endif
20093 data PT_LOAD FLAGS(6); /* RW_ */
20094 -#ifdef CONFIG_X86_64
20095 + init.begin PT_LOAD FLAGS(6); /* RW_ */
20096 #ifdef CONFIG_SMP
20097 percpu PT_LOAD FLAGS(6); /* RW_ */
20098 #endif
20099 + text.init PT_LOAD FLAGS(5); /* R_E */
20100 + text.exit PT_LOAD FLAGS(5); /* R_E */
20101 init PT_LOAD FLAGS(7); /* RWE */
20102 -#endif
20103 note PT_NOTE FLAGS(0); /* ___ */
20104 }
20105
20106 SECTIONS
20107 {
20108 #ifdef CONFIG_X86_32
20109 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
20110 - phys_startup_32 = startup_32 - LOAD_OFFSET;
20111 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
20112 #else
20113 - . = __START_KERNEL;
20114 - phys_startup_64 = startup_64 - LOAD_OFFSET;
20115 + . = __START_KERNEL;
20116 #endif
20117
20118 /* Text and read-only data */
20119 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
20120 - _text = .;
20121 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20122 /* bootstrapping code */
20123 +#ifdef CONFIG_X86_32
20124 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20125 +#else
20126 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20127 +#endif
20128 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20129 + _text = .;
20130 HEAD_TEXT
20131 #ifdef CONFIG_X86_32
20132 . = ALIGN(PAGE_SIZE);
20133 @@ -108,13 +128,47 @@ SECTIONS
20134 IRQENTRY_TEXT
20135 *(.fixup)
20136 *(.gnu.warning)
20137 - /* End of text section */
20138 - _etext = .;
20139 } :text = 0x9090
20140
20141 - NOTES :text :note
20142 + . += __KERNEL_TEXT_OFFSET;
20143
20144 - EXCEPTION_TABLE(16) :text = 0x9090
20145 +#ifdef CONFIG_X86_32
20146 + . = ALIGN(PAGE_SIZE);
20147 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
20148 +
20149 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
20150 + MODULES_EXEC_VADDR = .;
20151 + BYTE(0)
20152 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
20153 + . = ALIGN(HPAGE_SIZE);
20154 + MODULES_EXEC_END = . - 1;
20155 +#endif
20156 +
20157 + } :module
20158 +#endif
20159 +
20160 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
20161 + /* End of text section */
20162 + _etext = . - __KERNEL_TEXT_OFFSET;
20163 + }
20164 +
20165 +#ifdef CONFIG_X86_32
20166 + . = ALIGN(PAGE_SIZE);
20167 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
20168 + *(.idt)
20169 + . = ALIGN(PAGE_SIZE);
20170 + *(.empty_zero_page)
20171 + *(.initial_pg_fixmap)
20172 + *(.initial_pg_pmd)
20173 + *(.initial_page_table)
20174 + *(.swapper_pg_dir)
20175 + } :rodata
20176 +#endif
20177 +
20178 + . = ALIGN(PAGE_SIZE);
20179 + NOTES :rodata :note
20180 +
20181 + EXCEPTION_TABLE(16) :rodata
20182
20183 #if defined(CONFIG_DEBUG_RODATA)
20184 /* .text should occupy whole number of pages */
20185 @@ -126,16 +180,20 @@ SECTIONS
20186
20187 /* Data */
20188 .data : AT(ADDR(.data) - LOAD_OFFSET) {
20189 +
20190 +#ifdef CONFIG_PAX_KERNEXEC
20191 + . = ALIGN(HPAGE_SIZE);
20192 +#else
20193 + . = ALIGN(PAGE_SIZE);
20194 +#endif
20195 +
20196 /* Start of data section */
20197 _sdata = .;
20198
20199 /* init_task */
20200 INIT_TASK_DATA(THREAD_SIZE)
20201
20202 -#ifdef CONFIG_X86_32
20203 - /* 32 bit has nosave before _edata */
20204 NOSAVE_DATA
20205 -#endif
20206
20207 PAGE_ALIGNED_DATA(PAGE_SIZE)
20208
20209 @@ -176,12 +234,19 @@ SECTIONS
20210 #endif /* CONFIG_X86_64 */
20211
20212 /* Init code and data - will be freed after init */
20213 - . = ALIGN(PAGE_SIZE);
20214 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
20215 + BYTE(0)
20216 +
20217 +#ifdef CONFIG_PAX_KERNEXEC
20218 + . = ALIGN(HPAGE_SIZE);
20219 +#else
20220 + . = ALIGN(PAGE_SIZE);
20221 +#endif
20222 +
20223 __init_begin = .; /* paired with __init_end */
20224 - }
20225 + } :init.begin
20226
20227 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
20228 +#ifdef CONFIG_SMP
20229 /*
20230 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
20231 * output PHDR, so the next output section - .init.text - should
20232 @@ -190,12 +255,27 @@ SECTIONS
20233 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
20234 #endif
20235
20236 - INIT_TEXT_SECTION(PAGE_SIZE)
20237 -#ifdef CONFIG_X86_64
20238 - :init
20239 -#endif
20240 + . = ALIGN(PAGE_SIZE);
20241 + init_begin = .;
20242 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
20243 + VMLINUX_SYMBOL(_sinittext) = .;
20244 + INIT_TEXT
20245 + VMLINUX_SYMBOL(_einittext) = .;
20246 + . = ALIGN(PAGE_SIZE);
20247 + } :text.init
20248
20249 - INIT_DATA_SECTION(16)
20250 + /*
20251 + * .exit.text is discard at runtime, not link time, to deal with
20252 + * references from .altinstructions and .eh_frame
20253 + */
20254 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20255 + EXIT_TEXT
20256 + . = ALIGN(16);
20257 + } :text.exit
20258 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
20259 +
20260 + . = ALIGN(PAGE_SIZE);
20261 + INIT_DATA_SECTION(16) :init
20262
20263 /*
20264 * Code and data for a variety of lowlevel trampolines, to be
20265 @@ -269,19 +349,12 @@ SECTIONS
20266 }
20267
20268 . = ALIGN(8);
20269 - /*
20270 - * .exit.text is discard at runtime, not link time, to deal with
20271 - * references from .altinstructions and .eh_frame
20272 - */
20273 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
20274 - EXIT_TEXT
20275 - }
20276
20277 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
20278 EXIT_DATA
20279 }
20280
20281 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
20282 +#ifndef CONFIG_SMP
20283 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
20284 #endif
20285
20286 @@ -300,16 +373,10 @@ SECTIONS
20287 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
20288 __smp_locks = .;
20289 *(.smp_locks)
20290 - . = ALIGN(PAGE_SIZE);
20291 __smp_locks_end = .;
20292 + . = ALIGN(PAGE_SIZE);
20293 }
20294
20295 -#ifdef CONFIG_X86_64
20296 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
20297 - NOSAVE_DATA
20298 - }
20299 -#endif
20300 -
20301 /* BSS */
20302 . = ALIGN(PAGE_SIZE);
20303 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
20304 @@ -325,6 +392,7 @@ SECTIONS
20305 __brk_base = .;
20306 . += 64 * 1024; /* 64k alignment slop space */
20307 *(.brk_reservation) /* areas brk users have reserved */
20308 + . = ALIGN(HPAGE_SIZE);
20309 __brk_limit = .;
20310 }
20311
20312 @@ -351,13 +419,12 @@ SECTIONS
20313 * for the boot processor.
20314 */
20315 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
20316 -INIT_PER_CPU(gdt_page);
20317 INIT_PER_CPU(irq_stack_union);
20318
20319 /*
20320 * Build-time check on the image size:
20321 */
20322 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
20323 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
20324 "kernel image bigger than KERNEL_IMAGE_SIZE");
20325
20326 #ifdef CONFIG_SMP
20327 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
20328 index 7515cf0..331a1a0 100644
20329 --- a/arch/x86/kernel/vsyscall_64.c
20330 +++ b/arch/x86/kernel/vsyscall_64.c
20331 @@ -54,15 +54,13 @@
20332 DEFINE_VVAR(int, vgetcpu_mode);
20333 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
20334
20335 -static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
20336 +static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
20337
20338 static int __init vsyscall_setup(char *str)
20339 {
20340 if (str) {
20341 if (!strcmp("emulate", str))
20342 vsyscall_mode = EMULATE;
20343 - else if (!strcmp("native", str))
20344 - vsyscall_mode = NATIVE;
20345 else if (!strcmp("none", str))
20346 vsyscall_mode = NONE;
20347 else
20348 @@ -206,7 +204,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
20349
20350 tsk = current;
20351 if (seccomp_mode(&tsk->seccomp))
20352 - do_exit(SIGKILL);
20353 + do_group_exit(SIGKILL);
20354
20355 /*
20356 * With a real vsyscall, page faults cause SIGSEGV. We want to
20357 @@ -278,8 +276,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
20358 return true;
20359
20360 sigsegv:
20361 - force_sig(SIGSEGV, current);
20362 - return true;
20363 + do_group_exit(SIGKILL);
20364 }
20365
20366 /*
20367 @@ -332,10 +329,7 @@ void __init map_vsyscall(void)
20368 extern char __vvar_page;
20369 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
20370
20371 - __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
20372 - vsyscall_mode == NATIVE
20373 - ? PAGE_KERNEL_VSYSCALL
20374 - : PAGE_KERNEL_VVAR);
20375 + __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
20376 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
20377 (unsigned long)VSYSCALL_START);
20378
20379 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
20380 index 9796c2f..f686fbf 100644
20381 --- a/arch/x86/kernel/x8664_ksyms_64.c
20382 +++ b/arch/x86/kernel/x8664_ksyms_64.c
20383 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
20384 EXPORT_SYMBOL(copy_user_generic_string);
20385 EXPORT_SYMBOL(copy_user_generic_unrolled);
20386 EXPORT_SYMBOL(__copy_user_nocache);
20387 -EXPORT_SYMBOL(_copy_from_user);
20388 -EXPORT_SYMBOL(_copy_to_user);
20389
20390 EXPORT_SYMBOL(copy_page);
20391 EXPORT_SYMBOL(clear_page);
20392 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
20393 index e62728e..5fc3a07 100644
20394 --- a/arch/x86/kernel/xsave.c
20395 +++ b/arch/x86/kernel/xsave.c
20396 @@ -131,7 +131,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
20397 fx_sw_user->xstate_size > fx_sw_user->extended_size)
20398 return -EINVAL;
20399
20400 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
20401 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
20402 fx_sw_user->extended_size -
20403 FP_XSTATE_MAGIC2_SIZE));
20404 if (err)
20405 @@ -267,7 +267,7 @@ fx_only:
20406 * the other extended state.
20407 */
20408 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
20409 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
20410 + return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
20411 }
20412
20413 /*
20414 @@ -296,7 +296,7 @@ int restore_i387_xstate(void __user *buf)
20415 if (use_xsave())
20416 err = restore_user_xstate(buf);
20417 else
20418 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
20419 + err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
20420 buf);
20421 if (unlikely(err)) {
20422 /*
20423 diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
20424 index 9fed5be..18fd595 100644
20425 --- a/arch/x86/kvm/cpuid.c
20426 +++ b/arch/x86/kvm/cpuid.c
20427 @@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
20428 struct kvm_cpuid2 *cpuid,
20429 struct kvm_cpuid_entry2 __user *entries)
20430 {
20431 - int r;
20432 + int r, i;
20433
20434 r = -E2BIG;
20435 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
20436 goto out;
20437 r = -EFAULT;
20438 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
20439 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
20440 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
20441 goto out;
20442 + for (i = 0; i < cpuid->nent; ++i) {
20443 + struct kvm_cpuid_entry2 cpuid_entry;
20444 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
20445 + goto out;
20446 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
20447 + }
20448 vcpu->arch.cpuid_nent = cpuid->nent;
20449 kvm_apic_set_version(vcpu);
20450 kvm_x86_ops->cpuid_update(vcpu);
20451 @@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
20452 struct kvm_cpuid2 *cpuid,
20453 struct kvm_cpuid_entry2 __user *entries)
20454 {
20455 - int r;
20456 + int r, i;
20457
20458 r = -E2BIG;
20459 if (cpuid->nent < vcpu->arch.cpuid_nent)
20460 goto out;
20461 r = -EFAULT;
20462 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
20463 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
20464 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
20465 goto out;
20466 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
20467 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
20468 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
20469 + goto out;
20470 + }
20471 return 0;
20472
20473 out:
20474 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
20475 index 8375622..b7bca1a 100644
20476 --- a/arch/x86/kvm/emulate.c
20477 +++ b/arch/x86/kvm/emulate.c
20478 @@ -252,6 +252,7 @@ struct gprefix {
20479
20480 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
20481 do { \
20482 + unsigned long _tmp; \
20483 __asm__ __volatile__ ( \
20484 _PRE_EFLAGS("0", "4", "2") \
20485 _op _suffix " %"_x"3,%1; " \
20486 @@ -266,8 +267,6 @@ struct gprefix {
20487 /* Raw emulation: instruction has two explicit operands. */
20488 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
20489 do { \
20490 - unsigned long _tmp; \
20491 - \
20492 switch ((ctxt)->dst.bytes) { \
20493 case 2: \
20494 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
20495 @@ -283,7 +282,6 @@ struct gprefix {
20496
20497 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
20498 do { \
20499 - unsigned long _tmp; \
20500 switch ((ctxt)->dst.bytes) { \
20501 case 1: \
20502 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
20503 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
20504 index 8584322..17d5955 100644
20505 --- a/arch/x86/kvm/lapic.c
20506 +++ b/arch/x86/kvm/lapic.c
20507 @@ -54,7 +54,7 @@
20508 #define APIC_BUS_CYCLE_NS 1
20509
20510 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
20511 -#define apic_debug(fmt, arg...)
20512 +#define apic_debug(fmt, arg...) do {} while (0)
20513
20514 #define APIC_LVT_NUM 6
20515 /* 14 is the version for Xeon and Pentium 8.4.8*/
20516 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
20517 index df5a703..63748a7 100644
20518 --- a/arch/x86/kvm/paging_tmpl.h
20519 +++ b/arch/x86/kvm/paging_tmpl.h
20520 @@ -197,7 +197,7 @@ retry_walk:
20521 if (unlikely(kvm_is_error_hva(host_addr)))
20522 goto error;
20523
20524 - ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
20525 + ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
20526 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
20527 goto error;
20528
20529 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
20530 index e334389..6839087 100644
20531 --- a/arch/x86/kvm/svm.c
20532 +++ b/arch/x86/kvm/svm.c
20533 @@ -3509,7 +3509,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
20534 int cpu = raw_smp_processor_id();
20535
20536 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
20537 +
20538 + pax_open_kernel();
20539 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
20540 + pax_close_kernel();
20541 +
20542 load_TR_desc();
20543 }
20544
20545 @@ -3887,6 +3891,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
20546 #endif
20547 #endif
20548
20549 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20550 + __set_fs(current_thread_info()->addr_limit);
20551 +#endif
20552 +
20553 reload_tss(vcpu);
20554
20555 local_irq_disable();
20556 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
20557 index 4ff0ab9..2ff68d3 100644
20558 --- a/arch/x86/kvm/vmx.c
20559 +++ b/arch/x86/kvm/vmx.c
20560 @@ -1303,7 +1303,11 @@ static void reload_tss(void)
20561 struct desc_struct *descs;
20562
20563 descs = (void *)gdt->address;
20564 +
20565 + pax_open_kernel();
20566 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
20567 + pax_close_kernel();
20568 +
20569 load_TR_desc();
20570 }
20571
20572 @@ -2625,8 +2629,11 @@ static __init int hardware_setup(void)
20573 if (!cpu_has_vmx_flexpriority())
20574 flexpriority_enabled = 0;
20575
20576 - if (!cpu_has_vmx_tpr_shadow())
20577 - kvm_x86_ops->update_cr8_intercept = NULL;
20578 + if (!cpu_has_vmx_tpr_shadow()) {
20579 + pax_open_kernel();
20580 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
20581 + pax_close_kernel();
20582 + }
20583
20584 if (enable_ept && !cpu_has_vmx_ept_2m_page())
20585 kvm_disable_largepages();
20586 @@ -3642,7 +3649,7 @@ static void vmx_set_constant_host_state(void)
20587 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
20588
20589 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
20590 - vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
20591 + vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
20592
20593 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
20594 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
20595 @@ -6180,6 +6187,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20596 "jmp .Lkvm_vmx_return \n\t"
20597 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
20598 ".Lkvm_vmx_return: "
20599 +
20600 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20601 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
20602 + ".Lkvm_vmx_return2: "
20603 +#endif
20604 +
20605 /* Save guest registers, load host registers, keep flags */
20606 "mov %0, %c[wordsize](%%"R"sp) \n\t"
20607 "pop %0 \n\t"
20608 @@ -6228,6 +6241,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20609 #endif
20610 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
20611 [wordsize]"i"(sizeof(ulong))
20612 +
20613 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20614 + ,[cs]"i"(__KERNEL_CS)
20615 +#endif
20616 +
20617 : "cc", "memory"
20618 , R"ax", R"bx", R"di", R"si"
20619 #ifdef CONFIG_X86_64
20620 @@ -6256,7 +6274,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
20621 }
20622 }
20623
20624 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
20625 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
20626 +
20627 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20628 + loadsegment(fs, __KERNEL_PERCPU);
20629 +#endif
20630 +
20631 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20632 + __set_fs(current_thread_info()->addr_limit);
20633 +#endif
20634 +
20635 vmx->loaded_vmcs->launched = 1;
20636
20637 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
20638 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
20639 index 185a2b8..866d2a6 100644
20640 --- a/arch/x86/kvm/x86.c
20641 +++ b/arch/x86/kvm/x86.c
20642 @@ -1357,8 +1357,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
20643 {
20644 struct kvm *kvm = vcpu->kvm;
20645 int lm = is_long_mode(vcpu);
20646 - u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
20647 - : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
20648 + u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
20649 + : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
20650 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
20651 : kvm->arch.xen_hvm_config.blob_size_32;
20652 u32 page_num = data & ~PAGE_MASK;
20653 @@ -2213,6 +2213,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
20654 if (n < msr_list.nmsrs)
20655 goto out;
20656 r = -EFAULT;
20657 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
20658 + goto out;
20659 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
20660 num_msrs_to_save * sizeof(u32)))
20661 goto out;
20662 @@ -2338,7 +2340,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
20663 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
20664 struct kvm_interrupt *irq)
20665 {
20666 - if (irq->irq < 0 || irq->irq >= 256)
20667 + if (irq->irq >= 256)
20668 return -EINVAL;
20669 if (irqchip_in_kernel(vcpu->kvm))
20670 return -ENXIO;
20671 @@ -4860,7 +4862,7 @@ static void kvm_set_mmio_spte_mask(void)
20672 kvm_mmu_set_mmio_spte_mask(mask);
20673 }
20674
20675 -int kvm_arch_init(void *opaque)
20676 +int kvm_arch_init(const void *opaque)
20677 {
20678 int r;
20679 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
20680 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
20681 index 642d880..44e0f3f 100644
20682 --- a/arch/x86/lguest/boot.c
20683 +++ b/arch/x86/lguest/boot.c
20684 @@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
20685 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
20686 * Launcher to reboot us.
20687 */
20688 -static void lguest_restart(char *reason)
20689 +static __noreturn void lguest_restart(char *reason)
20690 {
20691 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
20692 + BUG();
20693 }
20694
20695 /*G:050
20696 diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
20697 index 00933d5..3a64af9 100644
20698 --- a/arch/x86/lib/atomic64_386_32.S
20699 +++ b/arch/x86/lib/atomic64_386_32.S
20700 @@ -48,6 +48,10 @@ BEGIN(read)
20701 movl (v), %eax
20702 movl 4(v), %edx
20703 RET_ENDP
20704 +BEGIN(read_unchecked)
20705 + movl (v), %eax
20706 + movl 4(v), %edx
20707 +RET_ENDP
20708 #undef v
20709
20710 #define v %esi
20711 @@ -55,6 +59,10 @@ BEGIN(set)
20712 movl %ebx, (v)
20713 movl %ecx, 4(v)
20714 RET_ENDP
20715 +BEGIN(set_unchecked)
20716 + movl %ebx, (v)
20717 + movl %ecx, 4(v)
20718 +RET_ENDP
20719 #undef v
20720
20721 #define v %esi
20722 @@ -70,6 +78,20 @@ RET_ENDP
20723 BEGIN(add)
20724 addl %eax, (v)
20725 adcl %edx, 4(v)
20726 +
20727 +#ifdef CONFIG_PAX_REFCOUNT
20728 + jno 0f
20729 + subl %eax, (v)
20730 + sbbl %edx, 4(v)
20731 + int $4
20732 +0:
20733 + _ASM_EXTABLE(0b, 0b)
20734 +#endif
20735 +
20736 +RET_ENDP
20737 +BEGIN(add_unchecked)
20738 + addl %eax, (v)
20739 + adcl %edx, 4(v)
20740 RET_ENDP
20741 #undef v
20742
20743 @@ -77,6 +99,24 @@ RET_ENDP
20744 BEGIN(add_return)
20745 addl (v), %eax
20746 adcl 4(v), %edx
20747 +
20748 +#ifdef CONFIG_PAX_REFCOUNT
20749 + into
20750 +1234:
20751 + _ASM_EXTABLE(1234b, 2f)
20752 +#endif
20753 +
20754 + movl %eax, (v)
20755 + movl %edx, 4(v)
20756 +
20757 +#ifdef CONFIG_PAX_REFCOUNT
20758 +2:
20759 +#endif
20760 +
20761 +RET_ENDP
20762 +BEGIN(add_return_unchecked)
20763 + addl (v), %eax
20764 + adcl 4(v), %edx
20765 movl %eax, (v)
20766 movl %edx, 4(v)
20767 RET_ENDP
20768 @@ -86,6 +126,20 @@ RET_ENDP
20769 BEGIN(sub)
20770 subl %eax, (v)
20771 sbbl %edx, 4(v)
20772 +
20773 +#ifdef CONFIG_PAX_REFCOUNT
20774 + jno 0f
20775 + addl %eax, (v)
20776 + adcl %edx, 4(v)
20777 + int $4
20778 +0:
20779 + _ASM_EXTABLE(0b, 0b)
20780 +#endif
20781 +
20782 +RET_ENDP
20783 +BEGIN(sub_unchecked)
20784 + subl %eax, (v)
20785 + sbbl %edx, 4(v)
20786 RET_ENDP
20787 #undef v
20788
20789 @@ -96,6 +150,27 @@ BEGIN(sub_return)
20790 sbbl $0, %edx
20791 addl (v), %eax
20792 adcl 4(v), %edx
20793 +
20794 +#ifdef CONFIG_PAX_REFCOUNT
20795 + into
20796 +1234:
20797 + _ASM_EXTABLE(1234b, 2f)
20798 +#endif
20799 +
20800 + movl %eax, (v)
20801 + movl %edx, 4(v)
20802 +
20803 +#ifdef CONFIG_PAX_REFCOUNT
20804 +2:
20805 +#endif
20806 +
20807 +RET_ENDP
20808 +BEGIN(sub_return_unchecked)
20809 + negl %edx
20810 + negl %eax
20811 + sbbl $0, %edx
20812 + addl (v), %eax
20813 + adcl 4(v), %edx
20814 movl %eax, (v)
20815 movl %edx, 4(v)
20816 RET_ENDP
20817 @@ -105,6 +180,20 @@ RET_ENDP
20818 BEGIN(inc)
20819 addl $1, (v)
20820 adcl $0, 4(v)
20821 +
20822 +#ifdef CONFIG_PAX_REFCOUNT
20823 + jno 0f
20824 + subl $1, (v)
20825 + sbbl $0, 4(v)
20826 + int $4
20827 +0:
20828 + _ASM_EXTABLE(0b, 0b)
20829 +#endif
20830 +
20831 +RET_ENDP
20832 +BEGIN(inc_unchecked)
20833 + addl $1, (v)
20834 + adcl $0, 4(v)
20835 RET_ENDP
20836 #undef v
20837
20838 @@ -114,6 +203,26 @@ BEGIN(inc_return)
20839 movl 4(v), %edx
20840 addl $1, %eax
20841 adcl $0, %edx
20842 +
20843 +#ifdef CONFIG_PAX_REFCOUNT
20844 + into
20845 +1234:
20846 + _ASM_EXTABLE(1234b, 2f)
20847 +#endif
20848 +
20849 + movl %eax, (v)
20850 + movl %edx, 4(v)
20851 +
20852 +#ifdef CONFIG_PAX_REFCOUNT
20853 +2:
20854 +#endif
20855 +
20856 +RET_ENDP
20857 +BEGIN(inc_return_unchecked)
20858 + movl (v), %eax
20859 + movl 4(v), %edx
20860 + addl $1, %eax
20861 + adcl $0, %edx
20862 movl %eax, (v)
20863 movl %edx, 4(v)
20864 RET_ENDP
20865 @@ -123,6 +232,20 @@ RET_ENDP
20866 BEGIN(dec)
20867 subl $1, (v)
20868 sbbl $0, 4(v)
20869 +
20870 +#ifdef CONFIG_PAX_REFCOUNT
20871 + jno 0f
20872 + addl $1, (v)
20873 + adcl $0, 4(v)
20874 + int $4
20875 +0:
20876 + _ASM_EXTABLE(0b, 0b)
20877 +#endif
20878 +
20879 +RET_ENDP
20880 +BEGIN(dec_unchecked)
20881 + subl $1, (v)
20882 + sbbl $0, 4(v)
20883 RET_ENDP
20884 #undef v
20885
20886 @@ -132,6 +255,26 @@ BEGIN(dec_return)
20887 movl 4(v), %edx
20888 subl $1, %eax
20889 sbbl $0, %edx
20890 +
20891 +#ifdef CONFIG_PAX_REFCOUNT
20892 + into
20893 +1234:
20894 + _ASM_EXTABLE(1234b, 2f)
20895 +#endif
20896 +
20897 + movl %eax, (v)
20898 + movl %edx, 4(v)
20899 +
20900 +#ifdef CONFIG_PAX_REFCOUNT
20901 +2:
20902 +#endif
20903 +
20904 +RET_ENDP
20905 +BEGIN(dec_return_unchecked)
20906 + movl (v), %eax
20907 + movl 4(v), %edx
20908 + subl $1, %eax
20909 + sbbl $0, %edx
20910 movl %eax, (v)
20911 movl %edx, 4(v)
20912 RET_ENDP
20913 @@ -143,6 +286,13 @@ BEGIN(add_unless)
20914 adcl %edx, %edi
20915 addl (v), %eax
20916 adcl 4(v), %edx
20917 +
20918 +#ifdef CONFIG_PAX_REFCOUNT
20919 + into
20920 +1234:
20921 + _ASM_EXTABLE(1234b, 2f)
20922 +#endif
20923 +
20924 cmpl %eax, %ecx
20925 je 3f
20926 1:
20927 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
20928 1:
20929 addl $1, %eax
20930 adcl $0, %edx
20931 +
20932 +#ifdef CONFIG_PAX_REFCOUNT
20933 + into
20934 +1234:
20935 + _ASM_EXTABLE(1234b, 2f)
20936 +#endif
20937 +
20938 movl %eax, (v)
20939 movl %edx, 4(v)
20940 movl $1, %eax
20941 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
20942 movl 4(v), %edx
20943 subl $1, %eax
20944 sbbl $0, %edx
20945 +
20946 +#ifdef CONFIG_PAX_REFCOUNT
20947 + into
20948 +1234:
20949 + _ASM_EXTABLE(1234b, 1f)
20950 +#endif
20951 +
20952 js 1f
20953 movl %eax, (v)
20954 movl %edx, 4(v)
20955 diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
20956 index f5cc9eb..51fa319 100644
20957 --- a/arch/x86/lib/atomic64_cx8_32.S
20958 +++ b/arch/x86/lib/atomic64_cx8_32.S
20959 @@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
20960 CFI_STARTPROC
20961
20962 read64 %ecx
20963 + pax_force_retaddr
20964 ret
20965 CFI_ENDPROC
20966 ENDPROC(atomic64_read_cx8)
20967
20968 +ENTRY(atomic64_read_unchecked_cx8)
20969 + CFI_STARTPROC
20970 +
20971 + read64 %ecx
20972 + pax_force_retaddr
20973 + ret
20974 + CFI_ENDPROC
20975 +ENDPROC(atomic64_read_unchecked_cx8)
20976 +
20977 ENTRY(atomic64_set_cx8)
20978 CFI_STARTPROC
20979
20980 @@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
20981 cmpxchg8b (%esi)
20982 jne 1b
20983
20984 + pax_force_retaddr
20985 ret
20986 CFI_ENDPROC
20987 ENDPROC(atomic64_set_cx8)
20988
20989 +ENTRY(atomic64_set_unchecked_cx8)
20990 + CFI_STARTPROC
20991 +
20992 +1:
20993 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
20994 + * are atomic on 586 and newer */
20995 + cmpxchg8b (%esi)
20996 + jne 1b
20997 +
20998 + pax_force_retaddr
20999 + ret
21000 + CFI_ENDPROC
21001 +ENDPROC(atomic64_set_unchecked_cx8)
21002 +
21003 ENTRY(atomic64_xchg_cx8)
21004 CFI_STARTPROC
21005
21006 @@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
21007 cmpxchg8b (%esi)
21008 jne 1b
21009
21010 + pax_force_retaddr
21011 ret
21012 CFI_ENDPROC
21013 ENDPROC(atomic64_xchg_cx8)
21014
21015 -.macro addsub_return func ins insc
21016 -ENTRY(atomic64_\func\()_return_cx8)
21017 +.macro addsub_return func ins insc unchecked=""
21018 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
21019 CFI_STARTPROC
21020 SAVE ebp
21021 SAVE ebx
21022 @@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
21023 movl %edx, %ecx
21024 \ins\()l %esi, %ebx
21025 \insc\()l %edi, %ecx
21026 +
21027 +.ifb \unchecked
21028 +#ifdef CONFIG_PAX_REFCOUNT
21029 + into
21030 +2:
21031 + _ASM_EXTABLE(2b, 3f)
21032 +#endif
21033 +.endif
21034 +
21035 LOCK_PREFIX
21036 cmpxchg8b (%ebp)
21037 jne 1b
21038 -
21039 -10:
21040 movl %ebx, %eax
21041 movl %ecx, %edx
21042 +
21043 +.ifb \unchecked
21044 +#ifdef CONFIG_PAX_REFCOUNT
21045 +3:
21046 +#endif
21047 +.endif
21048 +
21049 RESTORE edi
21050 RESTORE esi
21051 RESTORE ebx
21052 RESTORE ebp
21053 + pax_force_retaddr
21054 ret
21055 CFI_ENDPROC
21056 -ENDPROC(atomic64_\func\()_return_cx8)
21057 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
21058 .endm
21059
21060 addsub_return add add adc
21061 addsub_return sub sub sbb
21062 +addsub_return add add adc _unchecked
21063 +addsub_return sub sub sbb _unchecked
21064
21065 -.macro incdec_return func ins insc
21066 -ENTRY(atomic64_\func\()_return_cx8)
21067 +.macro incdec_return func ins insc unchecked=""
21068 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
21069 CFI_STARTPROC
21070 SAVE ebx
21071
21072 @@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
21073 movl %edx, %ecx
21074 \ins\()l $1, %ebx
21075 \insc\()l $0, %ecx
21076 +
21077 +.ifb \unchecked
21078 +#ifdef CONFIG_PAX_REFCOUNT
21079 + into
21080 +2:
21081 + _ASM_EXTABLE(2b, 3f)
21082 +#endif
21083 +.endif
21084 +
21085 LOCK_PREFIX
21086 cmpxchg8b (%esi)
21087 jne 1b
21088
21089 -10:
21090 movl %ebx, %eax
21091 movl %ecx, %edx
21092 +
21093 +.ifb \unchecked
21094 +#ifdef CONFIG_PAX_REFCOUNT
21095 +3:
21096 +#endif
21097 +.endif
21098 +
21099 RESTORE ebx
21100 + pax_force_retaddr
21101 ret
21102 CFI_ENDPROC
21103 -ENDPROC(atomic64_\func\()_return_cx8)
21104 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
21105 .endm
21106
21107 incdec_return inc add adc
21108 incdec_return dec sub sbb
21109 +incdec_return inc add adc _unchecked
21110 +incdec_return dec sub sbb _unchecked
21111
21112 ENTRY(atomic64_dec_if_positive_cx8)
21113 CFI_STARTPROC
21114 @@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
21115 movl %edx, %ecx
21116 subl $1, %ebx
21117 sbb $0, %ecx
21118 +
21119 +#ifdef CONFIG_PAX_REFCOUNT
21120 + into
21121 +1234:
21122 + _ASM_EXTABLE(1234b, 2f)
21123 +#endif
21124 +
21125 js 2f
21126 LOCK_PREFIX
21127 cmpxchg8b (%esi)
21128 @@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
21129 movl %ebx, %eax
21130 movl %ecx, %edx
21131 RESTORE ebx
21132 + pax_force_retaddr
21133 ret
21134 CFI_ENDPROC
21135 ENDPROC(atomic64_dec_if_positive_cx8)
21136 @@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
21137 movl %edx, %ecx
21138 addl %ebp, %ebx
21139 adcl %edi, %ecx
21140 +
21141 +#ifdef CONFIG_PAX_REFCOUNT
21142 + into
21143 +1234:
21144 + _ASM_EXTABLE(1234b, 3f)
21145 +#endif
21146 +
21147 LOCK_PREFIX
21148 cmpxchg8b (%esi)
21149 jne 1b
21150 @@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
21151 CFI_ADJUST_CFA_OFFSET -8
21152 RESTORE ebx
21153 RESTORE ebp
21154 + pax_force_retaddr
21155 ret
21156 4:
21157 cmpl %edx, 4(%esp)
21158 @@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
21159 xorl %ecx, %ecx
21160 addl $1, %ebx
21161 adcl %edx, %ecx
21162 +
21163 +#ifdef CONFIG_PAX_REFCOUNT
21164 + into
21165 +1234:
21166 + _ASM_EXTABLE(1234b, 3f)
21167 +#endif
21168 +
21169 LOCK_PREFIX
21170 cmpxchg8b (%esi)
21171 jne 1b
21172 @@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
21173 movl $1, %eax
21174 3:
21175 RESTORE ebx
21176 + pax_force_retaddr
21177 ret
21178 CFI_ENDPROC
21179 ENDPROC(atomic64_inc_not_zero_cx8)
21180 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
21181 index 78d16a5..fbcf666 100644
21182 --- a/arch/x86/lib/checksum_32.S
21183 +++ b/arch/x86/lib/checksum_32.S
21184 @@ -28,7 +28,8 @@
21185 #include <linux/linkage.h>
21186 #include <asm/dwarf2.h>
21187 #include <asm/errno.h>
21188 -
21189 +#include <asm/segment.h>
21190 +
21191 /*
21192 * computes a partial checksum, e.g. for TCP/UDP fragments
21193 */
21194 @@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
21195
21196 #define ARGBASE 16
21197 #define FP 12
21198 -
21199 -ENTRY(csum_partial_copy_generic)
21200 +
21201 +ENTRY(csum_partial_copy_generic_to_user)
21202 CFI_STARTPROC
21203 +
21204 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21205 + pushl_cfi %gs
21206 + popl_cfi %es
21207 + jmp csum_partial_copy_generic
21208 +#endif
21209 +
21210 +ENTRY(csum_partial_copy_generic_from_user)
21211 +
21212 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21213 + pushl_cfi %gs
21214 + popl_cfi %ds
21215 +#endif
21216 +
21217 +ENTRY(csum_partial_copy_generic)
21218 subl $4,%esp
21219 CFI_ADJUST_CFA_OFFSET 4
21220 pushl_cfi %edi
21221 @@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
21222 jmp 4f
21223 SRC(1: movw (%esi), %bx )
21224 addl $2, %esi
21225 -DST( movw %bx, (%edi) )
21226 +DST( movw %bx, %es:(%edi) )
21227 addl $2, %edi
21228 addw %bx, %ax
21229 adcl $0, %eax
21230 @@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
21231 SRC(1: movl (%esi), %ebx )
21232 SRC( movl 4(%esi), %edx )
21233 adcl %ebx, %eax
21234 -DST( movl %ebx, (%edi) )
21235 +DST( movl %ebx, %es:(%edi) )
21236 adcl %edx, %eax
21237 -DST( movl %edx, 4(%edi) )
21238 +DST( movl %edx, %es:4(%edi) )
21239
21240 SRC( movl 8(%esi), %ebx )
21241 SRC( movl 12(%esi), %edx )
21242 adcl %ebx, %eax
21243 -DST( movl %ebx, 8(%edi) )
21244 +DST( movl %ebx, %es:8(%edi) )
21245 adcl %edx, %eax
21246 -DST( movl %edx, 12(%edi) )
21247 +DST( movl %edx, %es:12(%edi) )
21248
21249 SRC( movl 16(%esi), %ebx )
21250 SRC( movl 20(%esi), %edx )
21251 adcl %ebx, %eax
21252 -DST( movl %ebx, 16(%edi) )
21253 +DST( movl %ebx, %es:16(%edi) )
21254 adcl %edx, %eax
21255 -DST( movl %edx, 20(%edi) )
21256 +DST( movl %edx, %es:20(%edi) )
21257
21258 SRC( movl 24(%esi), %ebx )
21259 SRC( movl 28(%esi), %edx )
21260 adcl %ebx, %eax
21261 -DST( movl %ebx, 24(%edi) )
21262 +DST( movl %ebx, %es:24(%edi) )
21263 adcl %edx, %eax
21264 -DST( movl %edx, 28(%edi) )
21265 +DST( movl %edx, %es:28(%edi) )
21266
21267 lea 32(%esi), %esi
21268 lea 32(%edi), %edi
21269 @@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
21270 shrl $2, %edx # This clears CF
21271 SRC(3: movl (%esi), %ebx )
21272 adcl %ebx, %eax
21273 -DST( movl %ebx, (%edi) )
21274 +DST( movl %ebx, %es:(%edi) )
21275 lea 4(%esi), %esi
21276 lea 4(%edi), %edi
21277 dec %edx
21278 @@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
21279 jb 5f
21280 SRC( movw (%esi), %cx )
21281 leal 2(%esi), %esi
21282 -DST( movw %cx, (%edi) )
21283 +DST( movw %cx, %es:(%edi) )
21284 leal 2(%edi), %edi
21285 je 6f
21286 shll $16,%ecx
21287 SRC(5: movb (%esi), %cl )
21288 -DST( movb %cl, (%edi) )
21289 +DST( movb %cl, %es:(%edi) )
21290 6: addl %ecx, %eax
21291 adcl $0, %eax
21292 7:
21293 @@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
21294
21295 6001:
21296 movl ARGBASE+20(%esp), %ebx # src_err_ptr
21297 - movl $-EFAULT, (%ebx)
21298 + movl $-EFAULT, %ss:(%ebx)
21299
21300 # zero the complete destination - computing the rest
21301 # is too much work
21302 @@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
21303
21304 6002:
21305 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21306 - movl $-EFAULT,(%ebx)
21307 + movl $-EFAULT,%ss:(%ebx)
21308 jmp 5000b
21309
21310 .previous
21311
21312 + pushl_cfi %ss
21313 + popl_cfi %ds
21314 + pushl_cfi %ss
21315 + popl_cfi %es
21316 popl_cfi %ebx
21317 CFI_RESTORE ebx
21318 popl_cfi %esi
21319 @@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
21320 popl_cfi %ecx # equivalent to addl $4,%esp
21321 ret
21322 CFI_ENDPROC
21323 -ENDPROC(csum_partial_copy_generic)
21324 +ENDPROC(csum_partial_copy_generic_to_user)
21325
21326 #else
21327
21328 /* Version for PentiumII/PPro */
21329
21330 #define ROUND1(x) \
21331 + nop; nop; nop; \
21332 SRC(movl x(%esi), %ebx ) ; \
21333 addl %ebx, %eax ; \
21334 - DST(movl %ebx, x(%edi) ) ;
21335 + DST(movl %ebx, %es:x(%edi)) ;
21336
21337 #define ROUND(x) \
21338 + nop; nop; nop; \
21339 SRC(movl x(%esi), %ebx ) ; \
21340 adcl %ebx, %eax ; \
21341 - DST(movl %ebx, x(%edi) ) ;
21342 + DST(movl %ebx, %es:x(%edi)) ;
21343
21344 #define ARGBASE 12
21345 -
21346 -ENTRY(csum_partial_copy_generic)
21347 +
21348 +ENTRY(csum_partial_copy_generic_to_user)
21349 CFI_STARTPROC
21350 +
21351 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21352 + pushl_cfi %gs
21353 + popl_cfi %es
21354 + jmp csum_partial_copy_generic
21355 +#endif
21356 +
21357 +ENTRY(csum_partial_copy_generic_from_user)
21358 +
21359 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21360 + pushl_cfi %gs
21361 + popl_cfi %ds
21362 +#endif
21363 +
21364 +ENTRY(csum_partial_copy_generic)
21365 pushl_cfi %ebx
21366 CFI_REL_OFFSET ebx, 0
21367 pushl_cfi %edi
21368 @@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
21369 subl %ebx, %edi
21370 lea -1(%esi),%edx
21371 andl $-32,%edx
21372 - lea 3f(%ebx,%ebx), %ebx
21373 + lea 3f(%ebx,%ebx,2), %ebx
21374 testl %esi, %esi
21375 jmp *%ebx
21376 1: addl $64,%esi
21377 @@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
21378 jb 5f
21379 SRC( movw (%esi), %dx )
21380 leal 2(%esi), %esi
21381 -DST( movw %dx, (%edi) )
21382 +DST( movw %dx, %es:(%edi) )
21383 leal 2(%edi), %edi
21384 je 6f
21385 shll $16,%edx
21386 5:
21387 SRC( movb (%esi), %dl )
21388 -DST( movb %dl, (%edi) )
21389 +DST( movb %dl, %es:(%edi) )
21390 6: addl %edx, %eax
21391 adcl $0, %eax
21392 7:
21393 .section .fixup, "ax"
21394 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
21395 - movl $-EFAULT, (%ebx)
21396 + movl $-EFAULT, %ss:(%ebx)
21397 # zero the complete destination (computing the rest is too much work)
21398 movl ARGBASE+8(%esp),%edi # dst
21399 movl ARGBASE+12(%esp),%ecx # len
21400 @@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
21401 rep; stosb
21402 jmp 7b
21403 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21404 - movl $-EFAULT, (%ebx)
21405 + movl $-EFAULT, %ss:(%ebx)
21406 jmp 7b
21407 .previous
21408
21409 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21410 + pushl_cfi %ss
21411 + popl_cfi %ds
21412 + pushl_cfi %ss
21413 + popl_cfi %es
21414 +#endif
21415 +
21416 popl_cfi %esi
21417 CFI_RESTORE esi
21418 popl_cfi %edi
21419 @@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
21420 CFI_RESTORE ebx
21421 ret
21422 CFI_ENDPROC
21423 -ENDPROC(csum_partial_copy_generic)
21424 +ENDPROC(csum_partial_copy_generic_to_user)
21425
21426 #undef ROUND
21427 #undef ROUND1
21428 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
21429 index f2145cf..cea889d 100644
21430 --- a/arch/x86/lib/clear_page_64.S
21431 +++ b/arch/x86/lib/clear_page_64.S
21432 @@ -11,6 +11,7 @@ ENTRY(clear_page_c)
21433 movl $4096/8,%ecx
21434 xorl %eax,%eax
21435 rep stosq
21436 + pax_force_retaddr
21437 ret
21438 CFI_ENDPROC
21439 ENDPROC(clear_page_c)
21440 @@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
21441 movl $4096,%ecx
21442 xorl %eax,%eax
21443 rep stosb
21444 + pax_force_retaddr
21445 ret
21446 CFI_ENDPROC
21447 ENDPROC(clear_page_c_e)
21448 @@ -43,6 +45,7 @@ ENTRY(clear_page)
21449 leaq 64(%rdi),%rdi
21450 jnz .Lloop
21451 nop
21452 + pax_force_retaddr
21453 ret
21454 CFI_ENDPROC
21455 .Lclear_page_end:
21456 @@ -58,7 +61,7 @@ ENDPROC(clear_page)
21457
21458 #include <asm/cpufeature.h>
21459
21460 - .section .altinstr_replacement,"ax"
21461 + .section .altinstr_replacement,"a"
21462 1: .byte 0xeb /* jmp <disp8> */
21463 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
21464 2: .byte 0xeb /* jmp <disp8> */
21465 diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
21466 index 1e572c5..2a162cd 100644
21467 --- a/arch/x86/lib/cmpxchg16b_emu.S
21468 +++ b/arch/x86/lib/cmpxchg16b_emu.S
21469 @@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
21470
21471 popf
21472 mov $1, %al
21473 + pax_force_retaddr
21474 ret
21475
21476 not_same:
21477 popf
21478 xor %al,%al
21479 + pax_force_retaddr
21480 ret
21481
21482 CFI_ENDPROC
21483 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
21484 index 6b34d04..dccb07f 100644
21485 --- a/arch/x86/lib/copy_page_64.S
21486 +++ b/arch/x86/lib/copy_page_64.S
21487 @@ -9,6 +9,7 @@ copy_page_c:
21488 CFI_STARTPROC
21489 movl $4096/8,%ecx
21490 rep movsq
21491 + pax_force_retaddr
21492 ret
21493 CFI_ENDPROC
21494 ENDPROC(copy_page_c)
21495 @@ -20,12 +21,14 @@ ENDPROC(copy_page_c)
21496
21497 ENTRY(copy_page)
21498 CFI_STARTPROC
21499 - subq $2*8,%rsp
21500 - CFI_ADJUST_CFA_OFFSET 2*8
21501 + subq $3*8,%rsp
21502 + CFI_ADJUST_CFA_OFFSET 3*8
21503 movq %rbx,(%rsp)
21504 CFI_REL_OFFSET rbx, 0
21505 movq %r12,1*8(%rsp)
21506 CFI_REL_OFFSET r12, 1*8
21507 + movq %r13,2*8(%rsp)
21508 + CFI_REL_OFFSET r13, 2*8
21509
21510 movl $(4096/64)-5,%ecx
21511 .p2align 4
21512 @@ -37,7 +40,7 @@ ENTRY(copy_page)
21513 movq 16 (%rsi), %rdx
21514 movq 24 (%rsi), %r8
21515 movq 32 (%rsi), %r9
21516 - movq 40 (%rsi), %r10
21517 + movq 40 (%rsi), %r13
21518 movq 48 (%rsi), %r11
21519 movq 56 (%rsi), %r12
21520
21521 @@ -48,7 +51,7 @@ ENTRY(copy_page)
21522 movq %rdx, 16 (%rdi)
21523 movq %r8, 24 (%rdi)
21524 movq %r9, 32 (%rdi)
21525 - movq %r10, 40 (%rdi)
21526 + movq %r13, 40 (%rdi)
21527 movq %r11, 48 (%rdi)
21528 movq %r12, 56 (%rdi)
21529
21530 @@ -67,7 +70,7 @@ ENTRY(copy_page)
21531 movq 16 (%rsi), %rdx
21532 movq 24 (%rsi), %r8
21533 movq 32 (%rsi), %r9
21534 - movq 40 (%rsi), %r10
21535 + movq 40 (%rsi), %r13
21536 movq 48 (%rsi), %r11
21537 movq 56 (%rsi), %r12
21538
21539 @@ -76,7 +79,7 @@ ENTRY(copy_page)
21540 movq %rdx, 16 (%rdi)
21541 movq %r8, 24 (%rdi)
21542 movq %r9, 32 (%rdi)
21543 - movq %r10, 40 (%rdi)
21544 + movq %r13, 40 (%rdi)
21545 movq %r11, 48 (%rdi)
21546 movq %r12, 56 (%rdi)
21547
21548 @@ -89,8 +92,11 @@ ENTRY(copy_page)
21549 CFI_RESTORE rbx
21550 movq 1*8(%rsp),%r12
21551 CFI_RESTORE r12
21552 - addq $2*8,%rsp
21553 - CFI_ADJUST_CFA_OFFSET -2*8
21554 + movq 2*8(%rsp),%r13
21555 + CFI_RESTORE r13
21556 + addq $3*8,%rsp
21557 + CFI_ADJUST_CFA_OFFSET -3*8
21558 + pax_force_retaddr
21559 ret
21560 .Lcopy_page_end:
21561 CFI_ENDPROC
21562 @@ -101,7 +107,7 @@ ENDPROC(copy_page)
21563
21564 #include <asm/cpufeature.h>
21565
21566 - .section .altinstr_replacement,"ax"
21567 + .section .altinstr_replacement,"a"
21568 1: .byte 0xeb /* jmp <disp8> */
21569 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
21570 2:
21571 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
21572 index 0248402..821c786 100644
21573 --- a/arch/x86/lib/copy_user_64.S
21574 +++ b/arch/x86/lib/copy_user_64.S
21575 @@ -16,6 +16,7 @@
21576 #include <asm/thread_info.h>
21577 #include <asm/cpufeature.h>
21578 #include <asm/alternative-asm.h>
21579 +#include <asm/pgtable.h>
21580
21581 /*
21582 * By placing feature2 after feature1 in altinstructions section, we logically
21583 @@ -29,7 +30,7 @@
21584 .byte 0xe9 /* 32bit jump */
21585 .long \orig-1f /* by default jump to orig */
21586 1:
21587 - .section .altinstr_replacement,"ax"
21588 + .section .altinstr_replacement,"a"
21589 2: .byte 0xe9 /* near jump with 32bit immediate */
21590 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
21591 3: .byte 0xe9 /* near jump with 32bit immediate */
21592 @@ -71,47 +72,20 @@
21593 #endif
21594 .endm
21595
21596 -/* Standard copy_to_user with segment limit checking */
21597 -ENTRY(_copy_to_user)
21598 - CFI_STARTPROC
21599 - GET_THREAD_INFO(%rax)
21600 - movq %rdi,%rcx
21601 - addq %rdx,%rcx
21602 - jc bad_to_user
21603 - cmpq TI_addr_limit(%rax),%rcx
21604 - ja bad_to_user
21605 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
21606 - copy_user_generic_unrolled,copy_user_generic_string, \
21607 - copy_user_enhanced_fast_string
21608 - CFI_ENDPROC
21609 -ENDPROC(_copy_to_user)
21610 -
21611 -/* Standard copy_from_user with segment limit checking */
21612 -ENTRY(_copy_from_user)
21613 - CFI_STARTPROC
21614 - GET_THREAD_INFO(%rax)
21615 - movq %rsi,%rcx
21616 - addq %rdx,%rcx
21617 - jc bad_from_user
21618 - cmpq TI_addr_limit(%rax),%rcx
21619 - ja bad_from_user
21620 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
21621 - copy_user_generic_unrolled,copy_user_generic_string, \
21622 - copy_user_enhanced_fast_string
21623 - CFI_ENDPROC
21624 -ENDPROC(_copy_from_user)
21625 -
21626 .section .fixup,"ax"
21627 /* must zero dest */
21628 ENTRY(bad_from_user)
21629 bad_from_user:
21630 CFI_STARTPROC
21631 + testl %edx,%edx
21632 + js bad_to_user
21633 movl %edx,%ecx
21634 xorl %eax,%eax
21635 rep
21636 stosb
21637 bad_to_user:
21638 movl %edx,%eax
21639 + pax_force_retaddr
21640 ret
21641 CFI_ENDPROC
21642 ENDPROC(bad_from_user)
21643 @@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
21644 jz 17f
21645 1: movq (%rsi),%r8
21646 2: movq 1*8(%rsi),%r9
21647 -3: movq 2*8(%rsi),%r10
21648 +3: movq 2*8(%rsi),%rax
21649 4: movq 3*8(%rsi),%r11
21650 5: movq %r8,(%rdi)
21651 6: movq %r9,1*8(%rdi)
21652 -7: movq %r10,2*8(%rdi)
21653 +7: movq %rax,2*8(%rdi)
21654 8: movq %r11,3*8(%rdi)
21655 9: movq 4*8(%rsi),%r8
21656 10: movq 5*8(%rsi),%r9
21657 -11: movq 6*8(%rsi),%r10
21658 +11: movq 6*8(%rsi),%rax
21659 12: movq 7*8(%rsi),%r11
21660 13: movq %r8,4*8(%rdi)
21661 14: movq %r9,5*8(%rdi)
21662 -15: movq %r10,6*8(%rdi)
21663 +15: movq %rax,6*8(%rdi)
21664 16: movq %r11,7*8(%rdi)
21665 leaq 64(%rsi),%rsi
21666 leaq 64(%rdi),%rdi
21667 @@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
21668 decl %ecx
21669 jnz 21b
21670 23: xor %eax,%eax
21671 + pax_force_retaddr
21672 ret
21673
21674 .section .fixup,"ax"
21675 @@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
21676 3: rep
21677 movsb
21678 4: xorl %eax,%eax
21679 + pax_force_retaddr
21680 ret
21681
21682 .section .fixup,"ax"
21683 @@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
21684 1: rep
21685 movsb
21686 2: xorl %eax,%eax
21687 + pax_force_retaddr
21688 ret
21689
21690 .section .fixup,"ax"
21691 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
21692 index cb0c112..e3a6895 100644
21693 --- a/arch/x86/lib/copy_user_nocache_64.S
21694 +++ b/arch/x86/lib/copy_user_nocache_64.S
21695 @@ -8,12 +8,14 @@
21696
21697 #include <linux/linkage.h>
21698 #include <asm/dwarf2.h>
21699 +#include <asm/alternative-asm.h>
21700
21701 #define FIX_ALIGNMENT 1
21702
21703 #include <asm/current.h>
21704 #include <asm/asm-offsets.h>
21705 #include <asm/thread_info.h>
21706 +#include <asm/pgtable.h>
21707
21708 .macro ALIGN_DESTINATION
21709 #ifdef FIX_ALIGNMENT
21710 @@ -50,6 +52,15 @@
21711 */
21712 ENTRY(__copy_user_nocache)
21713 CFI_STARTPROC
21714 +
21715 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21716 + mov $PAX_USER_SHADOW_BASE,%rcx
21717 + cmp %rcx,%rsi
21718 + jae 1f
21719 + add %rcx,%rsi
21720 +1:
21721 +#endif
21722 +
21723 cmpl $8,%edx
21724 jb 20f /* less then 8 bytes, go to byte copy loop */
21725 ALIGN_DESTINATION
21726 @@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
21727 jz 17f
21728 1: movq (%rsi),%r8
21729 2: movq 1*8(%rsi),%r9
21730 -3: movq 2*8(%rsi),%r10
21731 +3: movq 2*8(%rsi),%rax
21732 4: movq 3*8(%rsi),%r11
21733 5: movnti %r8,(%rdi)
21734 6: movnti %r9,1*8(%rdi)
21735 -7: movnti %r10,2*8(%rdi)
21736 +7: movnti %rax,2*8(%rdi)
21737 8: movnti %r11,3*8(%rdi)
21738 9: movq 4*8(%rsi),%r8
21739 10: movq 5*8(%rsi),%r9
21740 -11: movq 6*8(%rsi),%r10
21741 +11: movq 6*8(%rsi),%rax
21742 12: movq 7*8(%rsi),%r11
21743 13: movnti %r8,4*8(%rdi)
21744 14: movnti %r9,5*8(%rdi)
21745 -15: movnti %r10,6*8(%rdi)
21746 +15: movnti %rax,6*8(%rdi)
21747 16: movnti %r11,7*8(%rdi)
21748 leaq 64(%rsi),%rsi
21749 leaq 64(%rdi),%rdi
21750 @@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
21751 jnz 21b
21752 23: xorl %eax,%eax
21753 sfence
21754 + pax_force_retaddr
21755 ret
21756
21757 .section .fixup,"ax"
21758 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
21759 index fb903b7..c92b7f7 100644
21760 --- a/arch/x86/lib/csum-copy_64.S
21761 +++ b/arch/x86/lib/csum-copy_64.S
21762 @@ -8,6 +8,7 @@
21763 #include <linux/linkage.h>
21764 #include <asm/dwarf2.h>
21765 #include <asm/errno.h>
21766 +#include <asm/alternative-asm.h>
21767
21768 /*
21769 * Checksum copy with exception handling.
21770 @@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
21771 CFI_RESTORE rbp
21772 addq $7*8, %rsp
21773 CFI_ADJUST_CFA_OFFSET -7*8
21774 + pax_force_retaddr 0, 1
21775 ret
21776 CFI_RESTORE_STATE
21777
21778 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
21779 index 459b58a..9570bc7 100644
21780 --- a/arch/x86/lib/csum-wrappers_64.c
21781 +++ b/arch/x86/lib/csum-wrappers_64.c
21782 @@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
21783 len -= 2;
21784 }
21785 }
21786 - isum = csum_partial_copy_generic((__force const void *)src,
21787 +
21788 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21789 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21790 + src += PAX_USER_SHADOW_BASE;
21791 +#endif
21792 +
21793 + isum = csum_partial_copy_generic((const void __force_kernel *)src,
21794 dst, len, isum, errp, NULL);
21795 if (unlikely(*errp))
21796 goto out_err;
21797 @@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
21798 }
21799
21800 *errp = 0;
21801 - return csum_partial_copy_generic(src, (void __force *)dst,
21802 +
21803 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21804 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
21805 + dst += PAX_USER_SHADOW_BASE;
21806 +#endif
21807 +
21808 + return csum_partial_copy_generic(src, (void __force_kernel *)dst,
21809 len, isum, NULL, errp);
21810 }
21811 EXPORT_SYMBOL(csum_partial_copy_to_user);
21812 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
21813 index 51f1504..ddac4c1 100644
21814 --- a/arch/x86/lib/getuser.S
21815 +++ b/arch/x86/lib/getuser.S
21816 @@ -33,15 +33,38 @@
21817 #include <asm/asm-offsets.h>
21818 #include <asm/thread_info.h>
21819 #include <asm/asm.h>
21820 +#include <asm/segment.h>
21821 +#include <asm/pgtable.h>
21822 +#include <asm/alternative-asm.h>
21823 +
21824 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21825 +#define __copyuser_seg gs;
21826 +#else
21827 +#define __copyuser_seg
21828 +#endif
21829
21830 .text
21831 ENTRY(__get_user_1)
21832 CFI_STARTPROC
21833 +
21834 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21835 GET_THREAD_INFO(%_ASM_DX)
21836 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21837 jae bad_get_user
21838 -1: movzb (%_ASM_AX),%edx
21839 +
21840 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21841 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21842 + cmp %_ASM_DX,%_ASM_AX
21843 + jae 1234f
21844 + add %_ASM_DX,%_ASM_AX
21845 +1234:
21846 +#endif
21847 +
21848 +#endif
21849 +
21850 +1: __copyuser_seg movzb (%_ASM_AX),%edx
21851 xor %eax,%eax
21852 + pax_force_retaddr
21853 ret
21854 CFI_ENDPROC
21855 ENDPROC(__get_user_1)
21856 @@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
21857 ENTRY(__get_user_2)
21858 CFI_STARTPROC
21859 add $1,%_ASM_AX
21860 +
21861 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21862 jc bad_get_user
21863 GET_THREAD_INFO(%_ASM_DX)
21864 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21865 jae bad_get_user
21866 -2: movzwl -1(%_ASM_AX),%edx
21867 +
21868 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21869 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21870 + cmp %_ASM_DX,%_ASM_AX
21871 + jae 1234f
21872 + add %_ASM_DX,%_ASM_AX
21873 +1234:
21874 +#endif
21875 +
21876 +#endif
21877 +
21878 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
21879 xor %eax,%eax
21880 + pax_force_retaddr
21881 ret
21882 CFI_ENDPROC
21883 ENDPROC(__get_user_2)
21884 @@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
21885 ENTRY(__get_user_4)
21886 CFI_STARTPROC
21887 add $3,%_ASM_AX
21888 +
21889 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
21890 jc bad_get_user
21891 GET_THREAD_INFO(%_ASM_DX)
21892 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21893 jae bad_get_user
21894 -3: mov -3(%_ASM_AX),%edx
21895 +
21896 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21897 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21898 + cmp %_ASM_DX,%_ASM_AX
21899 + jae 1234f
21900 + add %_ASM_DX,%_ASM_AX
21901 +1234:
21902 +#endif
21903 +
21904 +#endif
21905 +
21906 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
21907 xor %eax,%eax
21908 + pax_force_retaddr
21909 ret
21910 CFI_ENDPROC
21911 ENDPROC(__get_user_4)
21912 @@ -80,8 +131,18 @@ ENTRY(__get_user_8)
21913 GET_THREAD_INFO(%_ASM_DX)
21914 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21915 jae bad_get_user
21916 +
21917 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21918 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21919 + cmp %_ASM_DX,%_ASM_AX
21920 + jae 1234f
21921 + add %_ASM_DX,%_ASM_AX
21922 +1234:
21923 +#endif
21924 +
21925 4: movq -7(%_ASM_AX),%_ASM_DX
21926 xor %eax,%eax
21927 + pax_force_retaddr
21928 ret
21929 CFI_ENDPROC
21930 ENDPROC(__get_user_8)
21931 @@ -91,6 +152,7 @@ bad_get_user:
21932 CFI_STARTPROC
21933 xor %edx,%edx
21934 mov $(-EFAULT),%_ASM_AX
21935 + pax_force_retaddr
21936 ret
21937 CFI_ENDPROC
21938 END(bad_get_user)
21939 diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
21940 index b1e6c4b..21ae8fc 100644
21941 --- a/arch/x86/lib/insn.c
21942 +++ b/arch/x86/lib/insn.c
21943 @@ -21,6 +21,11 @@
21944 #include <linux/string.h>
21945 #include <asm/inat.h>
21946 #include <asm/insn.h>
21947 +#ifdef __KERNEL__
21948 +#include <asm/pgtable_types.h>
21949 +#else
21950 +#define ktla_ktva(addr) addr
21951 +#endif
21952
21953 /* Verify next sizeof(t) bytes can be on the same instruction */
21954 #define validate_next(t, insn, n) \
21955 @@ -49,8 +54,8 @@
21956 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
21957 {
21958 memset(insn, 0, sizeof(*insn));
21959 - insn->kaddr = kaddr;
21960 - insn->next_byte = kaddr;
21961 + insn->kaddr = ktla_ktva(kaddr);
21962 + insn->next_byte = ktla_ktva(kaddr);
21963 insn->x86_64 = x86_64 ? 1 : 0;
21964 insn->opnd_bytes = 4;
21965 if (x86_64)
21966 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
21967 index 05a95e7..326f2fa 100644
21968 --- a/arch/x86/lib/iomap_copy_64.S
21969 +++ b/arch/x86/lib/iomap_copy_64.S
21970 @@ -17,6 +17,7 @@
21971
21972 #include <linux/linkage.h>
21973 #include <asm/dwarf2.h>
21974 +#include <asm/alternative-asm.h>
21975
21976 /*
21977 * override generic version in lib/iomap_copy.c
21978 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
21979 CFI_STARTPROC
21980 movl %edx,%ecx
21981 rep movsd
21982 + pax_force_retaddr
21983 ret
21984 CFI_ENDPROC
21985 ENDPROC(__iowrite32_copy)
21986 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
21987 index 1c273be..da9cc0e 100644
21988 --- a/arch/x86/lib/memcpy_64.S
21989 +++ b/arch/x86/lib/memcpy_64.S
21990 @@ -33,6 +33,7 @@
21991 rep movsq
21992 movl %edx, %ecx
21993 rep movsb
21994 + pax_force_retaddr
21995 ret
21996 .Lmemcpy_e:
21997 .previous
21998 @@ -49,6 +50,7 @@
21999 movq %rdi, %rax
22000 movq %rdx, %rcx
22001 rep movsb
22002 + pax_force_retaddr
22003 ret
22004 .Lmemcpy_e_e:
22005 .previous
22006 @@ -76,13 +78,13 @@ ENTRY(memcpy)
22007 */
22008 movq 0*8(%rsi), %r8
22009 movq 1*8(%rsi), %r9
22010 - movq 2*8(%rsi), %r10
22011 + movq 2*8(%rsi), %rcx
22012 movq 3*8(%rsi), %r11
22013 leaq 4*8(%rsi), %rsi
22014
22015 movq %r8, 0*8(%rdi)
22016 movq %r9, 1*8(%rdi)
22017 - movq %r10, 2*8(%rdi)
22018 + movq %rcx, 2*8(%rdi)
22019 movq %r11, 3*8(%rdi)
22020 leaq 4*8(%rdi), %rdi
22021 jae .Lcopy_forward_loop
22022 @@ -105,12 +107,12 @@ ENTRY(memcpy)
22023 subq $0x20, %rdx
22024 movq -1*8(%rsi), %r8
22025 movq -2*8(%rsi), %r9
22026 - movq -3*8(%rsi), %r10
22027 + movq -3*8(%rsi), %rcx
22028 movq -4*8(%rsi), %r11
22029 leaq -4*8(%rsi), %rsi
22030 movq %r8, -1*8(%rdi)
22031 movq %r9, -2*8(%rdi)
22032 - movq %r10, -3*8(%rdi)
22033 + movq %rcx, -3*8(%rdi)
22034 movq %r11, -4*8(%rdi)
22035 leaq -4*8(%rdi), %rdi
22036 jae .Lcopy_backward_loop
22037 @@ -130,12 +132,13 @@ ENTRY(memcpy)
22038 */
22039 movq 0*8(%rsi), %r8
22040 movq 1*8(%rsi), %r9
22041 - movq -2*8(%rsi, %rdx), %r10
22042 + movq -2*8(%rsi, %rdx), %rcx
22043 movq -1*8(%rsi, %rdx), %r11
22044 movq %r8, 0*8(%rdi)
22045 movq %r9, 1*8(%rdi)
22046 - movq %r10, -2*8(%rdi, %rdx)
22047 + movq %rcx, -2*8(%rdi, %rdx)
22048 movq %r11, -1*8(%rdi, %rdx)
22049 + pax_force_retaddr
22050 retq
22051 .p2align 4
22052 .Lless_16bytes:
22053 @@ -148,6 +151,7 @@ ENTRY(memcpy)
22054 movq -1*8(%rsi, %rdx), %r9
22055 movq %r8, 0*8(%rdi)
22056 movq %r9, -1*8(%rdi, %rdx)
22057 + pax_force_retaddr
22058 retq
22059 .p2align 4
22060 .Lless_8bytes:
22061 @@ -161,6 +165,7 @@ ENTRY(memcpy)
22062 movl -4(%rsi, %rdx), %r8d
22063 movl %ecx, (%rdi)
22064 movl %r8d, -4(%rdi, %rdx)
22065 + pax_force_retaddr
22066 retq
22067 .p2align 4
22068 .Lless_3bytes:
22069 @@ -179,6 +184,7 @@ ENTRY(memcpy)
22070 movb %cl, (%rdi)
22071
22072 .Lend:
22073 + pax_force_retaddr
22074 retq
22075 CFI_ENDPROC
22076 ENDPROC(memcpy)
22077 diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
22078 index ee16461..c39c199 100644
22079 --- a/arch/x86/lib/memmove_64.S
22080 +++ b/arch/x86/lib/memmove_64.S
22081 @@ -61,13 +61,13 @@ ENTRY(memmove)
22082 5:
22083 sub $0x20, %rdx
22084 movq 0*8(%rsi), %r11
22085 - movq 1*8(%rsi), %r10
22086 + movq 1*8(%rsi), %rcx
22087 movq 2*8(%rsi), %r9
22088 movq 3*8(%rsi), %r8
22089 leaq 4*8(%rsi), %rsi
22090
22091 movq %r11, 0*8(%rdi)
22092 - movq %r10, 1*8(%rdi)
22093 + movq %rcx, 1*8(%rdi)
22094 movq %r9, 2*8(%rdi)
22095 movq %r8, 3*8(%rdi)
22096 leaq 4*8(%rdi), %rdi
22097 @@ -81,10 +81,10 @@ ENTRY(memmove)
22098 4:
22099 movq %rdx, %rcx
22100 movq -8(%rsi, %rdx), %r11
22101 - lea -8(%rdi, %rdx), %r10
22102 + lea -8(%rdi, %rdx), %r9
22103 shrq $3, %rcx
22104 rep movsq
22105 - movq %r11, (%r10)
22106 + movq %r11, (%r9)
22107 jmp 13f
22108 .Lmemmove_end_forward:
22109
22110 @@ -95,14 +95,14 @@ ENTRY(memmove)
22111 7:
22112 movq %rdx, %rcx
22113 movq (%rsi), %r11
22114 - movq %rdi, %r10
22115 + movq %rdi, %r9
22116 leaq -8(%rsi, %rdx), %rsi
22117 leaq -8(%rdi, %rdx), %rdi
22118 shrq $3, %rcx
22119 std
22120 rep movsq
22121 cld
22122 - movq %r11, (%r10)
22123 + movq %r11, (%r9)
22124 jmp 13f
22125
22126 /*
22127 @@ -127,13 +127,13 @@ ENTRY(memmove)
22128 8:
22129 subq $0x20, %rdx
22130 movq -1*8(%rsi), %r11
22131 - movq -2*8(%rsi), %r10
22132 + movq -2*8(%rsi), %rcx
22133 movq -3*8(%rsi), %r9
22134 movq -4*8(%rsi), %r8
22135 leaq -4*8(%rsi), %rsi
22136
22137 movq %r11, -1*8(%rdi)
22138 - movq %r10, -2*8(%rdi)
22139 + movq %rcx, -2*8(%rdi)
22140 movq %r9, -3*8(%rdi)
22141 movq %r8, -4*8(%rdi)
22142 leaq -4*8(%rdi), %rdi
22143 @@ -151,11 +151,11 @@ ENTRY(memmove)
22144 * Move data from 16 bytes to 31 bytes.
22145 */
22146 movq 0*8(%rsi), %r11
22147 - movq 1*8(%rsi), %r10
22148 + movq 1*8(%rsi), %rcx
22149 movq -2*8(%rsi, %rdx), %r9
22150 movq -1*8(%rsi, %rdx), %r8
22151 movq %r11, 0*8(%rdi)
22152 - movq %r10, 1*8(%rdi)
22153 + movq %rcx, 1*8(%rdi)
22154 movq %r9, -2*8(%rdi, %rdx)
22155 movq %r8, -1*8(%rdi, %rdx)
22156 jmp 13f
22157 @@ -167,9 +167,9 @@ ENTRY(memmove)
22158 * Move data from 8 bytes to 15 bytes.
22159 */
22160 movq 0*8(%rsi), %r11
22161 - movq -1*8(%rsi, %rdx), %r10
22162 + movq -1*8(%rsi, %rdx), %r9
22163 movq %r11, 0*8(%rdi)
22164 - movq %r10, -1*8(%rdi, %rdx)
22165 + movq %r9, -1*8(%rdi, %rdx)
22166 jmp 13f
22167 10:
22168 cmpq $4, %rdx
22169 @@ -178,9 +178,9 @@ ENTRY(memmove)
22170 * Move data from 4 bytes to 7 bytes.
22171 */
22172 movl (%rsi), %r11d
22173 - movl -4(%rsi, %rdx), %r10d
22174 + movl -4(%rsi, %rdx), %r9d
22175 movl %r11d, (%rdi)
22176 - movl %r10d, -4(%rdi, %rdx)
22177 + movl %r9d, -4(%rdi, %rdx)
22178 jmp 13f
22179 11:
22180 cmp $2, %rdx
22181 @@ -189,9 +189,9 @@ ENTRY(memmove)
22182 * Move data from 2 bytes to 3 bytes.
22183 */
22184 movw (%rsi), %r11w
22185 - movw -2(%rsi, %rdx), %r10w
22186 + movw -2(%rsi, %rdx), %r9w
22187 movw %r11w, (%rdi)
22188 - movw %r10w, -2(%rdi, %rdx)
22189 + movw %r9w, -2(%rdi, %rdx)
22190 jmp 13f
22191 12:
22192 cmp $1, %rdx
22193 @@ -202,6 +202,7 @@ ENTRY(memmove)
22194 movb (%rsi), %r11b
22195 movb %r11b, (%rdi)
22196 13:
22197 + pax_force_retaddr
22198 retq
22199 CFI_ENDPROC
22200
22201 @@ -210,6 +211,7 @@ ENTRY(memmove)
22202 /* Forward moving data. */
22203 movq %rdx, %rcx
22204 rep movsb
22205 + pax_force_retaddr
22206 retq
22207 .Lmemmove_end_forward_efs:
22208 .previous
22209 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
22210 index 2dcb380..963660a 100644
22211 --- a/arch/x86/lib/memset_64.S
22212 +++ b/arch/x86/lib/memset_64.S
22213 @@ -30,6 +30,7 @@
22214 movl %edx,%ecx
22215 rep stosb
22216 movq %r9,%rax
22217 + pax_force_retaddr
22218 ret
22219 .Lmemset_e:
22220 .previous
22221 @@ -52,6 +53,7 @@
22222 movq %rdx,%rcx
22223 rep stosb
22224 movq %r9,%rax
22225 + pax_force_retaddr
22226 ret
22227 .Lmemset_e_e:
22228 .previous
22229 @@ -59,7 +61,7 @@
22230 ENTRY(memset)
22231 ENTRY(__memset)
22232 CFI_STARTPROC
22233 - movq %rdi,%r10
22234 + movq %rdi,%r11
22235
22236 /* expand byte value */
22237 movzbl %sil,%ecx
22238 @@ -117,7 +119,8 @@ ENTRY(__memset)
22239 jnz .Lloop_1
22240
22241 .Lende:
22242 - movq %r10,%rax
22243 + movq %r11,%rax
22244 + pax_force_retaddr
22245 ret
22246
22247 CFI_RESTORE_STATE
22248 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
22249 index c9f2d9b..e7fd2c0 100644
22250 --- a/arch/x86/lib/mmx_32.c
22251 +++ b/arch/x86/lib/mmx_32.c
22252 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22253 {
22254 void *p;
22255 int i;
22256 + unsigned long cr0;
22257
22258 if (unlikely(in_interrupt()))
22259 return __memcpy(to, from, len);
22260 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22261 kernel_fpu_begin();
22262
22263 __asm__ __volatile__ (
22264 - "1: prefetch (%0)\n" /* This set is 28 bytes */
22265 - " prefetch 64(%0)\n"
22266 - " prefetch 128(%0)\n"
22267 - " prefetch 192(%0)\n"
22268 - " prefetch 256(%0)\n"
22269 + "1: prefetch (%1)\n" /* This set is 28 bytes */
22270 + " prefetch 64(%1)\n"
22271 + " prefetch 128(%1)\n"
22272 + " prefetch 192(%1)\n"
22273 + " prefetch 256(%1)\n"
22274 "2: \n"
22275 ".section .fixup, \"ax\"\n"
22276 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22277 + "3: \n"
22278 +
22279 +#ifdef CONFIG_PAX_KERNEXEC
22280 + " movl %%cr0, %0\n"
22281 + " movl %0, %%eax\n"
22282 + " andl $0xFFFEFFFF, %%eax\n"
22283 + " movl %%eax, %%cr0\n"
22284 +#endif
22285 +
22286 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22287 +
22288 +#ifdef CONFIG_PAX_KERNEXEC
22289 + " movl %0, %%cr0\n"
22290 +#endif
22291 +
22292 " jmp 2b\n"
22293 ".previous\n"
22294 _ASM_EXTABLE(1b, 3b)
22295 - : : "r" (from));
22296 + : "=&r" (cr0) : "r" (from) : "ax");
22297
22298 for ( ; i > 5; i--) {
22299 __asm__ __volatile__ (
22300 - "1: prefetch 320(%0)\n"
22301 - "2: movq (%0), %%mm0\n"
22302 - " movq 8(%0), %%mm1\n"
22303 - " movq 16(%0), %%mm2\n"
22304 - " movq 24(%0), %%mm3\n"
22305 - " movq %%mm0, (%1)\n"
22306 - " movq %%mm1, 8(%1)\n"
22307 - " movq %%mm2, 16(%1)\n"
22308 - " movq %%mm3, 24(%1)\n"
22309 - " movq 32(%0), %%mm0\n"
22310 - " movq 40(%0), %%mm1\n"
22311 - " movq 48(%0), %%mm2\n"
22312 - " movq 56(%0), %%mm3\n"
22313 - " movq %%mm0, 32(%1)\n"
22314 - " movq %%mm1, 40(%1)\n"
22315 - " movq %%mm2, 48(%1)\n"
22316 - " movq %%mm3, 56(%1)\n"
22317 + "1: prefetch 320(%1)\n"
22318 + "2: movq (%1), %%mm0\n"
22319 + " movq 8(%1), %%mm1\n"
22320 + " movq 16(%1), %%mm2\n"
22321 + " movq 24(%1), %%mm3\n"
22322 + " movq %%mm0, (%2)\n"
22323 + " movq %%mm1, 8(%2)\n"
22324 + " movq %%mm2, 16(%2)\n"
22325 + " movq %%mm3, 24(%2)\n"
22326 + " movq 32(%1), %%mm0\n"
22327 + " movq 40(%1), %%mm1\n"
22328 + " movq 48(%1), %%mm2\n"
22329 + " movq 56(%1), %%mm3\n"
22330 + " movq %%mm0, 32(%2)\n"
22331 + " movq %%mm1, 40(%2)\n"
22332 + " movq %%mm2, 48(%2)\n"
22333 + " movq %%mm3, 56(%2)\n"
22334 ".section .fixup, \"ax\"\n"
22335 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22336 + "3:\n"
22337 +
22338 +#ifdef CONFIG_PAX_KERNEXEC
22339 + " movl %%cr0, %0\n"
22340 + " movl %0, %%eax\n"
22341 + " andl $0xFFFEFFFF, %%eax\n"
22342 + " movl %%eax, %%cr0\n"
22343 +#endif
22344 +
22345 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22346 +
22347 +#ifdef CONFIG_PAX_KERNEXEC
22348 + " movl %0, %%cr0\n"
22349 +#endif
22350 +
22351 " jmp 2b\n"
22352 ".previous\n"
22353 _ASM_EXTABLE(1b, 3b)
22354 - : : "r" (from), "r" (to) : "memory");
22355 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22356
22357 from += 64;
22358 to += 64;
22359 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
22360 static void fast_copy_page(void *to, void *from)
22361 {
22362 int i;
22363 + unsigned long cr0;
22364
22365 kernel_fpu_begin();
22366
22367 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
22368 * but that is for later. -AV
22369 */
22370 __asm__ __volatile__(
22371 - "1: prefetch (%0)\n"
22372 - " prefetch 64(%0)\n"
22373 - " prefetch 128(%0)\n"
22374 - " prefetch 192(%0)\n"
22375 - " prefetch 256(%0)\n"
22376 + "1: prefetch (%1)\n"
22377 + " prefetch 64(%1)\n"
22378 + " prefetch 128(%1)\n"
22379 + " prefetch 192(%1)\n"
22380 + " prefetch 256(%1)\n"
22381 "2: \n"
22382 ".section .fixup, \"ax\"\n"
22383 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22384 + "3: \n"
22385 +
22386 +#ifdef CONFIG_PAX_KERNEXEC
22387 + " movl %%cr0, %0\n"
22388 + " movl %0, %%eax\n"
22389 + " andl $0xFFFEFFFF, %%eax\n"
22390 + " movl %%eax, %%cr0\n"
22391 +#endif
22392 +
22393 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22394 +
22395 +#ifdef CONFIG_PAX_KERNEXEC
22396 + " movl %0, %%cr0\n"
22397 +#endif
22398 +
22399 " jmp 2b\n"
22400 ".previous\n"
22401 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
22402 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22403
22404 for (i = 0; i < (4096-320)/64; i++) {
22405 __asm__ __volatile__ (
22406 - "1: prefetch 320(%0)\n"
22407 - "2: movq (%0), %%mm0\n"
22408 - " movntq %%mm0, (%1)\n"
22409 - " movq 8(%0), %%mm1\n"
22410 - " movntq %%mm1, 8(%1)\n"
22411 - " movq 16(%0), %%mm2\n"
22412 - " movntq %%mm2, 16(%1)\n"
22413 - " movq 24(%0), %%mm3\n"
22414 - " movntq %%mm3, 24(%1)\n"
22415 - " movq 32(%0), %%mm4\n"
22416 - " movntq %%mm4, 32(%1)\n"
22417 - " movq 40(%0), %%mm5\n"
22418 - " movntq %%mm5, 40(%1)\n"
22419 - " movq 48(%0), %%mm6\n"
22420 - " movntq %%mm6, 48(%1)\n"
22421 - " movq 56(%0), %%mm7\n"
22422 - " movntq %%mm7, 56(%1)\n"
22423 + "1: prefetch 320(%1)\n"
22424 + "2: movq (%1), %%mm0\n"
22425 + " movntq %%mm0, (%2)\n"
22426 + " movq 8(%1), %%mm1\n"
22427 + " movntq %%mm1, 8(%2)\n"
22428 + " movq 16(%1), %%mm2\n"
22429 + " movntq %%mm2, 16(%2)\n"
22430 + " movq 24(%1), %%mm3\n"
22431 + " movntq %%mm3, 24(%2)\n"
22432 + " movq 32(%1), %%mm4\n"
22433 + " movntq %%mm4, 32(%2)\n"
22434 + " movq 40(%1), %%mm5\n"
22435 + " movntq %%mm5, 40(%2)\n"
22436 + " movq 48(%1), %%mm6\n"
22437 + " movntq %%mm6, 48(%2)\n"
22438 + " movq 56(%1), %%mm7\n"
22439 + " movntq %%mm7, 56(%2)\n"
22440 ".section .fixup, \"ax\"\n"
22441 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22442 + "3:\n"
22443 +
22444 +#ifdef CONFIG_PAX_KERNEXEC
22445 + " movl %%cr0, %0\n"
22446 + " movl %0, %%eax\n"
22447 + " andl $0xFFFEFFFF, %%eax\n"
22448 + " movl %%eax, %%cr0\n"
22449 +#endif
22450 +
22451 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22452 +
22453 +#ifdef CONFIG_PAX_KERNEXEC
22454 + " movl %0, %%cr0\n"
22455 +#endif
22456 +
22457 " jmp 2b\n"
22458 ".previous\n"
22459 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
22460 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22461
22462 from += 64;
22463 to += 64;
22464 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
22465 static void fast_copy_page(void *to, void *from)
22466 {
22467 int i;
22468 + unsigned long cr0;
22469
22470 kernel_fpu_begin();
22471
22472 __asm__ __volatile__ (
22473 - "1: prefetch (%0)\n"
22474 - " prefetch 64(%0)\n"
22475 - " prefetch 128(%0)\n"
22476 - " prefetch 192(%0)\n"
22477 - " prefetch 256(%0)\n"
22478 + "1: prefetch (%1)\n"
22479 + " prefetch 64(%1)\n"
22480 + " prefetch 128(%1)\n"
22481 + " prefetch 192(%1)\n"
22482 + " prefetch 256(%1)\n"
22483 "2: \n"
22484 ".section .fixup, \"ax\"\n"
22485 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22486 + "3: \n"
22487 +
22488 +#ifdef CONFIG_PAX_KERNEXEC
22489 + " movl %%cr0, %0\n"
22490 + " movl %0, %%eax\n"
22491 + " andl $0xFFFEFFFF, %%eax\n"
22492 + " movl %%eax, %%cr0\n"
22493 +#endif
22494 +
22495 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22496 +
22497 +#ifdef CONFIG_PAX_KERNEXEC
22498 + " movl %0, %%cr0\n"
22499 +#endif
22500 +
22501 " jmp 2b\n"
22502 ".previous\n"
22503 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
22504 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22505
22506 for (i = 0; i < 4096/64; i++) {
22507 __asm__ __volatile__ (
22508 - "1: prefetch 320(%0)\n"
22509 - "2: movq (%0), %%mm0\n"
22510 - " movq 8(%0), %%mm1\n"
22511 - " movq 16(%0), %%mm2\n"
22512 - " movq 24(%0), %%mm3\n"
22513 - " movq %%mm0, (%1)\n"
22514 - " movq %%mm1, 8(%1)\n"
22515 - " movq %%mm2, 16(%1)\n"
22516 - " movq %%mm3, 24(%1)\n"
22517 - " movq 32(%0), %%mm0\n"
22518 - " movq 40(%0), %%mm1\n"
22519 - " movq 48(%0), %%mm2\n"
22520 - " movq 56(%0), %%mm3\n"
22521 - " movq %%mm0, 32(%1)\n"
22522 - " movq %%mm1, 40(%1)\n"
22523 - " movq %%mm2, 48(%1)\n"
22524 - " movq %%mm3, 56(%1)\n"
22525 + "1: prefetch 320(%1)\n"
22526 + "2: movq (%1), %%mm0\n"
22527 + " movq 8(%1), %%mm1\n"
22528 + " movq 16(%1), %%mm2\n"
22529 + " movq 24(%1), %%mm3\n"
22530 + " movq %%mm0, (%2)\n"
22531 + " movq %%mm1, 8(%2)\n"
22532 + " movq %%mm2, 16(%2)\n"
22533 + " movq %%mm3, 24(%2)\n"
22534 + " movq 32(%1), %%mm0\n"
22535 + " movq 40(%1), %%mm1\n"
22536 + " movq 48(%1), %%mm2\n"
22537 + " movq 56(%1), %%mm3\n"
22538 + " movq %%mm0, 32(%2)\n"
22539 + " movq %%mm1, 40(%2)\n"
22540 + " movq %%mm2, 48(%2)\n"
22541 + " movq %%mm3, 56(%2)\n"
22542 ".section .fixup, \"ax\"\n"
22543 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22544 + "3:\n"
22545 +
22546 +#ifdef CONFIG_PAX_KERNEXEC
22547 + " movl %%cr0, %0\n"
22548 + " movl %0, %%eax\n"
22549 + " andl $0xFFFEFFFF, %%eax\n"
22550 + " movl %%eax, %%cr0\n"
22551 +#endif
22552 +
22553 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22554 +
22555 +#ifdef CONFIG_PAX_KERNEXEC
22556 + " movl %0, %%cr0\n"
22557 +#endif
22558 +
22559 " jmp 2b\n"
22560 ".previous\n"
22561 _ASM_EXTABLE(1b, 3b)
22562 - : : "r" (from), "r" (to) : "memory");
22563 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22564
22565 from += 64;
22566 to += 64;
22567 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
22568 index 69fa106..adda88b 100644
22569 --- a/arch/x86/lib/msr-reg.S
22570 +++ b/arch/x86/lib/msr-reg.S
22571 @@ -3,6 +3,7 @@
22572 #include <asm/dwarf2.h>
22573 #include <asm/asm.h>
22574 #include <asm/msr.h>
22575 +#include <asm/alternative-asm.h>
22576
22577 #ifdef CONFIG_X86_64
22578 /*
22579 @@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
22580 CFI_STARTPROC
22581 pushq_cfi %rbx
22582 pushq_cfi %rbp
22583 - movq %rdi, %r10 /* Save pointer */
22584 + movq %rdi, %r9 /* Save pointer */
22585 xorl %r11d, %r11d /* Return value */
22586 movl (%rdi), %eax
22587 movl 4(%rdi), %ecx
22588 @@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
22589 movl 28(%rdi), %edi
22590 CFI_REMEMBER_STATE
22591 1: \op
22592 -2: movl %eax, (%r10)
22593 +2: movl %eax, (%r9)
22594 movl %r11d, %eax /* Return value */
22595 - movl %ecx, 4(%r10)
22596 - movl %edx, 8(%r10)
22597 - movl %ebx, 12(%r10)
22598 - movl %ebp, 20(%r10)
22599 - movl %esi, 24(%r10)
22600 - movl %edi, 28(%r10)
22601 + movl %ecx, 4(%r9)
22602 + movl %edx, 8(%r9)
22603 + movl %ebx, 12(%r9)
22604 + movl %ebp, 20(%r9)
22605 + movl %esi, 24(%r9)
22606 + movl %edi, 28(%r9)
22607 popq_cfi %rbp
22608 popq_cfi %rbx
22609 + pax_force_retaddr
22610 ret
22611 3:
22612 CFI_RESTORE_STATE
22613 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
22614 index 36b0d15..d381858 100644
22615 --- a/arch/x86/lib/putuser.S
22616 +++ b/arch/x86/lib/putuser.S
22617 @@ -15,7 +15,9 @@
22618 #include <asm/thread_info.h>
22619 #include <asm/errno.h>
22620 #include <asm/asm.h>
22621 -
22622 +#include <asm/segment.h>
22623 +#include <asm/pgtable.h>
22624 +#include <asm/alternative-asm.h>
22625
22626 /*
22627 * __put_user_X
22628 @@ -29,52 +31,119 @@
22629 * as they get called from within inline assembly.
22630 */
22631
22632 -#define ENTER CFI_STARTPROC ; \
22633 - GET_THREAD_INFO(%_ASM_BX)
22634 -#define EXIT ret ; \
22635 +#define ENTER CFI_STARTPROC
22636 +#define EXIT pax_force_retaddr; ret ; \
22637 CFI_ENDPROC
22638
22639 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22640 +#define _DEST %_ASM_CX,%_ASM_BX
22641 +#else
22642 +#define _DEST %_ASM_CX
22643 +#endif
22644 +
22645 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22646 +#define __copyuser_seg gs;
22647 +#else
22648 +#define __copyuser_seg
22649 +#endif
22650 +
22651 .text
22652 ENTRY(__put_user_1)
22653 ENTER
22654 +
22655 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22656 + GET_THREAD_INFO(%_ASM_BX)
22657 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
22658 jae bad_put_user
22659 -1: movb %al,(%_ASM_CX)
22660 +
22661 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22662 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22663 + cmp %_ASM_BX,%_ASM_CX
22664 + jb 1234f
22665 + xor %ebx,%ebx
22666 +1234:
22667 +#endif
22668 +
22669 +#endif
22670 +
22671 +1: __copyuser_seg movb %al,(_DEST)
22672 xor %eax,%eax
22673 EXIT
22674 ENDPROC(__put_user_1)
22675
22676 ENTRY(__put_user_2)
22677 ENTER
22678 +
22679 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22680 + GET_THREAD_INFO(%_ASM_BX)
22681 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22682 sub $1,%_ASM_BX
22683 cmp %_ASM_BX,%_ASM_CX
22684 jae bad_put_user
22685 -2: movw %ax,(%_ASM_CX)
22686 +
22687 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22688 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22689 + cmp %_ASM_BX,%_ASM_CX
22690 + jb 1234f
22691 + xor %ebx,%ebx
22692 +1234:
22693 +#endif
22694 +
22695 +#endif
22696 +
22697 +2: __copyuser_seg movw %ax,(_DEST)
22698 xor %eax,%eax
22699 EXIT
22700 ENDPROC(__put_user_2)
22701
22702 ENTRY(__put_user_4)
22703 ENTER
22704 +
22705 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22706 + GET_THREAD_INFO(%_ASM_BX)
22707 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22708 sub $3,%_ASM_BX
22709 cmp %_ASM_BX,%_ASM_CX
22710 jae bad_put_user
22711 -3: movl %eax,(%_ASM_CX)
22712 +
22713 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22714 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22715 + cmp %_ASM_BX,%_ASM_CX
22716 + jb 1234f
22717 + xor %ebx,%ebx
22718 +1234:
22719 +#endif
22720 +
22721 +#endif
22722 +
22723 +3: __copyuser_seg movl %eax,(_DEST)
22724 xor %eax,%eax
22725 EXIT
22726 ENDPROC(__put_user_4)
22727
22728 ENTRY(__put_user_8)
22729 ENTER
22730 +
22731 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22732 + GET_THREAD_INFO(%_ASM_BX)
22733 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22734 sub $7,%_ASM_BX
22735 cmp %_ASM_BX,%_ASM_CX
22736 jae bad_put_user
22737 -4: mov %_ASM_AX,(%_ASM_CX)
22738 +
22739 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22740 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22741 + cmp %_ASM_BX,%_ASM_CX
22742 + jb 1234f
22743 + xor %ebx,%ebx
22744 +1234:
22745 +#endif
22746 +
22747 +#endif
22748 +
22749 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
22750 #ifdef CONFIG_X86_32
22751 -5: movl %edx,4(%_ASM_CX)
22752 +5: __copyuser_seg movl %edx,4(_DEST)
22753 #endif
22754 xor %eax,%eax
22755 EXIT
22756 diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
22757 index 1cad221..de671ee 100644
22758 --- a/arch/x86/lib/rwlock.S
22759 +++ b/arch/x86/lib/rwlock.S
22760 @@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
22761 FRAME
22762 0: LOCK_PREFIX
22763 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22764 +
22765 +#ifdef CONFIG_PAX_REFCOUNT
22766 + jno 1234f
22767 + LOCK_PREFIX
22768 + WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22769 + int $4
22770 +1234:
22771 + _ASM_EXTABLE(1234b, 1234b)
22772 +#endif
22773 +
22774 1: rep; nop
22775 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
22776 jne 1b
22777 LOCK_PREFIX
22778 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22779 +
22780 +#ifdef CONFIG_PAX_REFCOUNT
22781 + jno 1234f
22782 + LOCK_PREFIX
22783 + WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22784 + int $4
22785 +1234:
22786 + _ASM_EXTABLE(1234b, 1234b)
22787 +#endif
22788 +
22789 jnz 0b
22790 ENDFRAME
22791 + pax_force_retaddr
22792 ret
22793 CFI_ENDPROC
22794 END(__write_lock_failed)
22795 @@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
22796 FRAME
22797 0: LOCK_PREFIX
22798 READ_LOCK_SIZE(inc) (%__lock_ptr)
22799 +
22800 +#ifdef CONFIG_PAX_REFCOUNT
22801 + jno 1234f
22802 + LOCK_PREFIX
22803 + READ_LOCK_SIZE(dec) (%__lock_ptr)
22804 + int $4
22805 +1234:
22806 + _ASM_EXTABLE(1234b, 1234b)
22807 +#endif
22808 +
22809 1: rep; nop
22810 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
22811 js 1b
22812 LOCK_PREFIX
22813 READ_LOCK_SIZE(dec) (%__lock_ptr)
22814 +
22815 +#ifdef CONFIG_PAX_REFCOUNT
22816 + jno 1234f
22817 + LOCK_PREFIX
22818 + READ_LOCK_SIZE(inc) (%__lock_ptr)
22819 + int $4
22820 +1234:
22821 + _ASM_EXTABLE(1234b, 1234b)
22822 +#endif
22823 +
22824 js 0b
22825 ENDFRAME
22826 + pax_force_retaddr
22827 ret
22828 CFI_ENDPROC
22829 END(__read_lock_failed)
22830 diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
22831 index 5dff5f0..cadebf4 100644
22832 --- a/arch/x86/lib/rwsem.S
22833 +++ b/arch/x86/lib/rwsem.S
22834 @@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
22835 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22836 CFI_RESTORE __ASM_REG(dx)
22837 restore_common_regs
22838 + pax_force_retaddr
22839 ret
22840 CFI_ENDPROC
22841 ENDPROC(call_rwsem_down_read_failed)
22842 @@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
22843 movq %rax,%rdi
22844 call rwsem_down_write_failed
22845 restore_common_regs
22846 + pax_force_retaddr
22847 ret
22848 CFI_ENDPROC
22849 ENDPROC(call_rwsem_down_write_failed)
22850 @@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
22851 movq %rax,%rdi
22852 call rwsem_wake
22853 restore_common_regs
22854 -1: ret
22855 +1: pax_force_retaddr
22856 + ret
22857 CFI_ENDPROC
22858 ENDPROC(call_rwsem_wake)
22859
22860 @@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
22861 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22862 CFI_RESTORE __ASM_REG(dx)
22863 restore_common_regs
22864 + pax_force_retaddr
22865 ret
22866 CFI_ENDPROC
22867 ENDPROC(call_rwsem_downgrade_wake)
22868 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
22869 index a63efd6..ccecad8 100644
22870 --- a/arch/x86/lib/thunk_64.S
22871 +++ b/arch/x86/lib/thunk_64.S
22872 @@ -8,6 +8,7 @@
22873 #include <linux/linkage.h>
22874 #include <asm/dwarf2.h>
22875 #include <asm/calling.h>
22876 +#include <asm/alternative-asm.h>
22877
22878 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
22879 .macro THUNK name, func, put_ret_addr_in_rdi=0
22880 @@ -41,5 +42,6 @@
22881 SAVE_ARGS
22882 restore:
22883 RESTORE_ARGS
22884 + pax_force_retaddr
22885 ret
22886 CFI_ENDPROC
22887 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
22888 index ef2a6a5..3b28862 100644
22889 --- a/arch/x86/lib/usercopy_32.c
22890 +++ b/arch/x86/lib/usercopy_32.c
22891 @@ -41,10 +41,12 @@ do { \
22892 int __d0; \
22893 might_fault(); \
22894 __asm__ __volatile__( \
22895 + __COPYUSER_SET_ES \
22896 "0: rep; stosl\n" \
22897 " movl %2,%0\n" \
22898 "1: rep; stosb\n" \
22899 "2:\n" \
22900 + __COPYUSER_RESTORE_ES \
22901 ".section .fixup,\"ax\"\n" \
22902 "3: lea 0(%2,%0,4),%0\n" \
22903 " jmp 2b\n" \
22904 @@ -113,6 +115,7 @@ long strnlen_user(const char __user *s, long n)
22905 might_fault();
22906
22907 __asm__ __volatile__(
22908 + __COPYUSER_SET_ES
22909 " testl %0, %0\n"
22910 " jz 3f\n"
22911 " andl %0,%%ecx\n"
22912 @@ -121,6 +124,7 @@ long strnlen_user(const char __user *s, long n)
22913 " subl %%ecx,%0\n"
22914 " addl %0,%%eax\n"
22915 "1:\n"
22916 + __COPYUSER_RESTORE_ES
22917 ".section .fixup,\"ax\"\n"
22918 "2: xorl %%eax,%%eax\n"
22919 " jmp 1b\n"
22920 @@ -140,7 +144,7 @@ EXPORT_SYMBOL(strnlen_user);
22921
22922 #ifdef CONFIG_X86_INTEL_USERCOPY
22923 static unsigned long
22924 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
22925 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
22926 {
22927 int d0, d1;
22928 __asm__ __volatile__(
22929 @@ -152,36 +156,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22930 " .align 2,0x90\n"
22931 "3: movl 0(%4), %%eax\n"
22932 "4: movl 4(%4), %%edx\n"
22933 - "5: movl %%eax, 0(%3)\n"
22934 - "6: movl %%edx, 4(%3)\n"
22935 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
22936 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
22937 "7: movl 8(%4), %%eax\n"
22938 "8: movl 12(%4),%%edx\n"
22939 - "9: movl %%eax, 8(%3)\n"
22940 - "10: movl %%edx, 12(%3)\n"
22941 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
22942 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
22943 "11: movl 16(%4), %%eax\n"
22944 "12: movl 20(%4), %%edx\n"
22945 - "13: movl %%eax, 16(%3)\n"
22946 - "14: movl %%edx, 20(%3)\n"
22947 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
22948 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
22949 "15: movl 24(%4), %%eax\n"
22950 "16: movl 28(%4), %%edx\n"
22951 - "17: movl %%eax, 24(%3)\n"
22952 - "18: movl %%edx, 28(%3)\n"
22953 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
22954 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
22955 "19: movl 32(%4), %%eax\n"
22956 "20: movl 36(%4), %%edx\n"
22957 - "21: movl %%eax, 32(%3)\n"
22958 - "22: movl %%edx, 36(%3)\n"
22959 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
22960 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
22961 "23: movl 40(%4), %%eax\n"
22962 "24: movl 44(%4), %%edx\n"
22963 - "25: movl %%eax, 40(%3)\n"
22964 - "26: movl %%edx, 44(%3)\n"
22965 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
22966 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
22967 "27: movl 48(%4), %%eax\n"
22968 "28: movl 52(%4), %%edx\n"
22969 - "29: movl %%eax, 48(%3)\n"
22970 - "30: movl %%edx, 52(%3)\n"
22971 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
22972 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
22973 "31: movl 56(%4), %%eax\n"
22974 "32: movl 60(%4), %%edx\n"
22975 - "33: movl %%eax, 56(%3)\n"
22976 - "34: movl %%edx, 60(%3)\n"
22977 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
22978 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
22979 " addl $-64, %0\n"
22980 " addl $64, %4\n"
22981 " addl $64, %3\n"
22982 @@ -191,10 +195,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22983 " shrl $2, %0\n"
22984 " andl $3, %%eax\n"
22985 " cld\n"
22986 + __COPYUSER_SET_ES
22987 "99: rep; movsl\n"
22988 "36: movl %%eax, %0\n"
22989 "37: rep; movsb\n"
22990 "100:\n"
22991 + __COPYUSER_RESTORE_ES
22992 ".section .fixup,\"ax\"\n"
22993 "101: lea 0(%%eax,%0,4),%0\n"
22994 " jmp 100b\n"
22995 @@ -247,46 +253,155 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
22996 }
22997
22998 static unsigned long
22999 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
23000 +{
23001 + int d0, d1;
23002 + __asm__ __volatile__(
23003 + " .align 2,0x90\n"
23004 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
23005 + " cmpl $67, %0\n"
23006 + " jbe 3f\n"
23007 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
23008 + " .align 2,0x90\n"
23009 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
23010 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
23011 + "5: movl %%eax, 0(%3)\n"
23012 + "6: movl %%edx, 4(%3)\n"
23013 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
23014 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
23015 + "9: movl %%eax, 8(%3)\n"
23016 + "10: movl %%edx, 12(%3)\n"
23017 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
23018 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
23019 + "13: movl %%eax, 16(%3)\n"
23020 + "14: movl %%edx, 20(%3)\n"
23021 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
23022 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
23023 + "17: movl %%eax, 24(%3)\n"
23024 + "18: movl %%edx, 28(%3)\n"
23025 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
23026 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
23027 + "21: movl %%eax, 32(%3)\n"
23028 + "22: movl %%edx, 36(%3)\n"
23029 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
23030 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
23031 + "25: movl %%eax, 40(%3)\n"
23032 + "26: movl %%edx, 44(%3)\n"
23033 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
23034 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
23035 + "29: movl %%eax, 48(%3)\n"
23036 + "30: movl %%edx, 52(%3)\n"
23037 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
23038 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
23039 + "33: movl %%eax, 56(%3)\n"
23040 + "34: movl %%edx, 60(%3)\n"
23041 + " addl $-64, %0\n"
23042 + " addl $64, %4\n"
23043 + " addl $64, %3\n"
23044 + " cmpl $63, %0\n"
23045 + " ja 1b\n"
23046 + "35: movl %0, %%eax\n"
23047 + " shrl $2, %0\n"
23048 + " andl $3, %%eax\n"
23049 + " cld\n"
23050 + "99: rep; "__copyuser_seg" movsl\n"
23051 + "36: movl %%eax, %0\n"
23052 + "37: rep; "__copyuser_seg" movsb\n"
23053 + "100:\n"
23054 + ".section .fixup,\"ax\"\n"
23055 + "101: lea 0(%%eax,%0,4),%0\n"
23056 + " jmp 100b\n"
23057 + ".previous\n"
23058 + ".section __ex_table,\"a\"\n"
23059 + " .align 4\n"
23060 + " .long 1b,100b\n"
23061 + " .long 2b,100b\n"
23062 + " .long 3b,100b\n"
23063 + " .long 4b,100b\n"
23064 + " .long 5b,100b\n"
23065 + " .long 6b,100b\n"
23066 + " .long 7b,100b\n"
23067 + " .long 8b,100b\n"
23068 + " .long 9b,100b\n"
23069 + " .long 10b,100b\n"
23070 + " .long 11b,100b\n"
23071 + " .long 12b,100b\n"
23072 + " .long 13b,100b\n"
23073 + " .long 14b,100b\n"
23074 + " .long 15b,100b\n"
23075 + " .long 16b,100b\n"
23076 + " .long 17b,100b\n"
23077 + " .long 18b,100b\n"
23078 + " .long 19b,100b\n"
23079 + " .long 20b,100b\n"
23080 + " .long 21b,100b\n"
23081 + " .long 22b,100b\n"
23082 + " .long 23b,100b\n"
23083 + " .long 24b,100b\n"
23084 + " .long 25b,100b\n"
23085 + " .long 26b,100b\n"
23086 + " .long 27b,100b\n"
23087 + " .long 28b,100b\n"
23088 + " .long 29b,100b\n"
23089 + " .long 30b,100b\n"
23090 + " .long 31b,100b\n"
23091 + " .long 32b,100b\n"
23092 + " .long 33b,100b\n"
23093 + " .long 34b,100b\n"
23094 + " .long 35b,100b\n"
23095 + " .long 36b,100b\n"
23096 + " .long 37b,100b\n"
23097 + " .long 99b,101b\n"
23098 + ".previous"
23099 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
23100 + : "1"(to), "2"(from), "0"(size)
23101 + : "eax", "edx", "memory");
23102 + return size;
23103 +}
23104 +
23105 +static unsigned long
23106 +__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) __size_overflow(3);
23107 +static unsigned long
23108 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23109 {
23110 int d0, d1;
23111 __asm__ __volatile__(
23112 " .align 2,0x90\n"
23113 - "0: movl 32(%4), %%eax\n"
23114 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23115 " cmpl $67, %0\n"
23116 " jbe 2f\n"
23117 - "1: movl 64(%4), %%eax\n"
23118 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23119 " .align 2,0x90\n"
23120 - "2: movl 0(%4), %%eax\n"
23121 - "21: movl 4(%4), %%edx\n"
23122 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23123 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23124 " movl %%eax, 0(%3)\n"
23125 " movl %%edx, 4(%3)\n"
23126 - "3: movl 8(%4), %%eax\n"
23127 - "31: movl 12(%4),%%edx\n"
23128 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23129 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23130 " movl %%eax, 8(%3)\n"
23131 " movl %%edx, 12(%3)\n"
23132 - "4: movl 16(%4), %%eax\n"
23133 - "41: movl 20(%4), %%edx\n"
23134 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23135 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23136 " movl %%eax, 16(%3)\n"
23137 " movl %%edx, 20(%3)\n"
23138 - "10: movl 24(%4), %%eax\n"
23139 - "51: movl 28(%4), %%edx\n"
23140 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23141 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23142 " movl %%eax, 24(%3)\n"
23143 " movl %%edx, 28(%3)\n"
23144 - "11: movl 32(%4), %%eax\n"
23145 - "61: movl 36(%4), %%edx\n"
23146 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23147 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23148 " movl %%eax, 32(%3)\n"
23149 " movl %%edx, 36(%3)\n"
23150 - "12: movl 40(%4), %%eax\n"
23151 - "71: movl 44(%4), %%edx\n"
23152 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23153 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23154 " movl %%eax, 40(%3)\n"
23155 " movl %%edx, 44(%3)\n"
23156 - "13: movl 48(%4), %%eax\n"
23157 - "81: movl 52(%4), %%edx\n"
23158 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23159 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23160 " movl %%eax, 48(%3)\n"
23161 " movl %%edx, 52(%3)\n"
23162 - "14: movl 56(%4), %%eax\n"
23163 - "91: movl 60(%4), %%edx\n"
23164 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23165 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23166 " movl %%eax, 56(%3)\n"
23167 " movl %%edx, 60(%3)\n"
23168 " addl $-64, %0\n"
23169 @@ -298,9 +413,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23170 " shrl $2, %0\n"
23171 " andl $3, %%eax\n"
23172 " cld\n"
23173 - "6: rep; movsl\n"
23174 + "6: rep; "__copyuser_seg" movsl\n"
23175 " movl %%eax,%0\n"
23176 - "7: rep; movsb\n"
23177 + "7: rep; "__copyuser_seg" movsb\n"
23178 "8:\n"
23179 ".section .fixup,\"ax\"\n"
23180 "9: lea 0(%%eax,%0,4),%0\n"
23181 @@ -347,47 +462,49 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23182 */
23183
23184 static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23185 + const void __user *from, unsigned long size) __size_overflow(3);
23186 +static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23187 const void __user *from, unsigned long size)
23188 {
23189 int d0, d1;
23190
23191 __asm__ __volatile__(
23192 " .align 2,0x90\n"
23193 - "0: movl 32(%4), %%eax\n"
23194 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23195 " cmpl $67, %0\n"
23196 " jbe 2f\n"
23197 - "1: movl 64(%4), %%eax\n"
23198 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23199 " .align 2,0x90\n"
23200 - "2: movl 0(%4), %%eax\n"
23201 - "21: movl 4(%4), %%edx\n"
23202 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23203 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23204 " movnti %%eax, 0(%3)\n"
23205 " movnti %%edx, 4(%3)\n"
23206 - "3: movl 8(%4), %%eax\n"
23207 - "31: movl 12(%4),%%edx\n"
23208 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23209 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23210 " movnti %%eax, 8(%3)\n"
23211 " movnti %%edx, 12(%3)\n"
23212 - "4: movl 16(%4), %%eax\n"
23213 - "41: movl 20(%4), %%edx\n"
23214 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23215 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23216 " movnti %%eax, 16(%3)\n"
23217 " movnti %%edx, 20(%3)\n"
23218 - "10: movl 24(%4), %%eax\n"
23219 - "51: movl 28(%4), %%edx\n"
23220 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23221 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23222 " movnti %%eax, 24(%3)\n"
23223 " movnti %%edx, 28(%3)\n"
23224 - "11: movl 32(%4), %%eax\n"
23225 - "61: movl 36(%4), %%edx\n"
23226 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23227 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23228 " movnti %%eax, 32(%3)\n"
23229 " movnti %%edx, 36(%3)\n"
23230 - "12: movl 40(%4), %%eax\n"
23231 - "71: movl 44(%4), %%edx\n"
23232 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23233 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23234 " movnti %%eax, 40(%3)\n"
23235 " movnti %%edx, 44(%3)\n"
23236 - "13: movl 48(%4), %%eax\n"
23237 - "81: movl 52(%4), %%edx\n"
23238 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23239 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23240 " movnti %%eax, 48(%3)\n"
23241 " movnti %%edx, 52(%3)\n"
23242 - "14: movl 56(%4), %%eax\n"
23243 - "91: movl 60(%4), %%edx\n"
23244 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23245 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23246 " movnti %%eax, 56(%3)\n"
23247 " movnti %%edx, 60(%3)\n"
23248 " addl $-64, %0\n"
23249 @@ -400,9 +517,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23250 " shrl $2, %0\n"
23251 " andl $3, %%eax\n"
23252 " cld\n"
23253 - "6: rep; movsl\n"
23254 + "6: rep; "__copyuser_seg" movsl\n"
23255 " movl %%eax,%0\n"
23256 - "7: rep; movsb\n"
23257 + "7: rep; "__copyuser_seg" movsb\n"
23258 "8:\n"
23259 ".section .fixup,\"ax\"\n"
23260 "9: lea 0(%%eax,%0,4),%0\n"
23261 @@ -444,47 +561,49 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23262 }
23263
23264 static unsigned long __copy_user_intel_nocache(void *to,
23265 + const void __user *from, unsigned long size) __size_overflow(3);
23266 +static unsigned long __copy_user_intel_nocache(void *to,
23267 const void __user *from, unsigned long size)
23268 {
23269 int d0, d1;
23270
23271 __asm__ __volatile__(
23272 " .align 2,0x90\n"
23273 - "0: movl 32(%4), %%eax\n"
23274 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23275 " cmpl $67, %0\n"
23276 " jbe 2f\n"
23277 - "1: movl 64(%4), %%eax\n"
23278 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23279 " .align 2,0x90\n"
23280 - "2: movl 0(%4), %%eax\n"
23281 - "21: movl 4(%4), %%edx\n"
23282 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23283 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23284 " movnti %%eax, 0(%3)\n"
23285 " movnti %%edx, 4(%3)\n"
23286 - "3: movl 8(%4), %%eax\n"
23287 - "31: movl 12(%4),%%edx\n"
23288 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23289 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23290 " movnti %%eax, 8(%3)\n"
23291 " movnti %%edx, 12(%3)\n"
23292 - "4: movl 16(%4), %%eax\n"
23293 - "41: movl 20(%4), %%edx\n"
23294 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23295 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23296 " movnti %%eax, 16(%3)\n"
23297 " movnti %%edx, 20(%3)\n"
23298 - "10: movl 24(%4), %%eax\n"
23299 - "51: movl 28(%4), %%edx\n"
23300 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23301 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23302 " movnti %%eax, 24(%3)\n"
23303 " movnti %%edx, 28(%3)\n"
23304 - "11: movl 32(%4), %%eax\n"
23305 - "61: movl 36(%4), %%edx\n"
23306 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23307 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23308 " movnti %%eax, 32(%3)\n"
23309 " movnti %%edx, 36(%3)\n"
23310 - "12: movl 40(%4), %%eax\n"
23311 - "71: movl 44(%4), %%edx\n"
23312 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23313 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23314 " movnti %%eax, 40(%3)\n"
23315 " movnti %%edx, 44(%3)\n"
23316 - "13: movl 48(%4), %%eax\n"
23317 - "81: movl 52(%4), %%edx\n"
23318 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23319 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23320 " movnti %%eax, 48(%3)\n"
23321 " movnti %%edx, 52(%3)\n"
23322 - "14: movl 56(%4), %%eax\n"
23323 - "91: movl 60(%4), %%edx\n"
23324 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23325 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23326 " movnti %%eax, 56(%3)\n"
23327 " movnti %%edx, 60(%3)\n"
23328 " addl $-64, %0\n"
23329 @@ -497,9 +616,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
23330 " shrl $2, %0\n"
23331 " andl $3, %%eax\n"
23332 " cld\n"
23333 - "6: rep; movsl\n"
23334 + "6: rep; "__copyuser_seg" movsl\n"
23335 " movl %%eax,%0\n"
23336 - "7: rep; movsb\n"
23337 + "7: rep; "__copyuser_seg" movsb\n"
23338 "8:\n"
23339 ".section .fixup,\"ax\"\n"
23340 "9: lea 0(%%eax,%0,4),%0\n"
23341 @@ -542,32 +661,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
23342 */
23343 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
23344 unsigned long size);
23345 -unsigned long __copy_user_intel(void __user *to, const void *from,
23346 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
23347 + unsigned long size);
23348 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
23349 unsigned long size);
23350 unsigned long __copy_user_zeroing_intel_nocache(void *to,
23351 const void __user *from, unsigned long size);
23352 #endif /* CONFIG_X86_INTEL_USERCOPY */
23353
23354 /* Generic arbitrary sized copy. */
23355 -#define __copy_user(to, from, size) \
23356 +#define __copy_user(to, from, size, prefix, set, restore) \
23357 do { \
23358 int __d0, __d1, __d2; \
23359 __asm__ __volatile__( \
23360 + set \
23361 " cmp $7,%0\n" \
23362 " jbe 1f\n" \
23363 " movl %1,%0\n" \
23364 " negl %0\n" \
23365 " andl $7,%0\n" \
23366 " subl %0,%3\n" \
23367 - "4: rep; movsb\n" \
23368 + "4: rep; "prefix"movsb\n" \
23369 " movl %3,%0\n" \
23370 " shrl $2,%0\n" \
23371 " andl $3,%3\n" \
23372 " .align 2,0x90\n" \
23373 - "0: rep; movsl\n" \
23374 + "0: rep; "prefix"movsl\n" \
23375 " movl %3,%0\n" \
23376 - "1: rep; movsb\n" \
23377 + "1: rep; "prefix"movsb\n" \
23378 "2:\n" \
23379 + restore \
23380 ".section .fixup,\"ax\"\n" \
23381 "5: addl %3,%0\n" \
23382 " jmp 2b\n" \
23383 @@ -595,14 +718,14 @@ do { \
23384 " negl %0\n" \
23385 " andl $7,%0\n" \
23386 " subl %0,%3\n" \
23387 - "4: rep; movsb\n" \
23388 + "4: rep; "__copyuser_seg"movsb\n" \
23389 " movl %3,%0\n" \
23390 " shrl $2,%0\n" \
23391 " andl $3,%3\n" \
23392 " .align 2,0x90\n" \
23393 - "0: rep; movsl\n" \
23394 + "0: rep; "__copyuser_seg"movsl\n" \
23395 " movl %3,%0\n" \
23396 - "1: rep; movsb\n" \
23397 + "1: rep; "__copyuser_seg"movsb\n" \
23398 "2:\n" \
23399 ".section .fixup,\"ax\"\n" \
23400 "5: addl %3,%0\n" \
23401 @@ -688,9 +811,9 @@ survive:
23402 }
23403 #endif
23404 if (movsl_is_ok(to, from, n))
23405 - __copy_user(to, from, n);
23406 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
23407 else
23408 - n = __copy_user_intel(to, from, n);
23409 + n = __generic_copy_to_user_intel(to, from, n);
23410 return n;
23411 }
23412 EXPORT_SYMBOL(__copy_to_user_ll);
23413 @@ -710,10 +833,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
23414 unsigned long n)
23415 {
23416 if (movsl_is_ok(to, from, n))
23417 - __copy_user(to, from, n);
23418 + __copy_user(to, from, n, __copyuser_seg, "", "");
23419 else
23420 - n = __copy_user_intel((void __user *)to,
23421 - (const void *)from, n);
23422 + n = __generic_copy_from_user_intel(to, from, n);
23423 return n;
23424 }
23425 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
23426 @@ -740,65 +862,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
23427 if (n > 64 && cpu_has_xmm2)
23428 n = __copy_user_intel_nocache(to, from, n);
23429 else
23430 - __copy_user(to, from, n);
23431 + __copy_user(to, from, n, __copyuser_seg, "", "");
23432 #else
23433 - __copy_user(to, from, n);
23434 + __copy_user(to, from, n, __copyuser_seg, "", "");
23435 #endif
23436 return n;
23437 }
23438 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
23439
23440 -/**
23441 - * copy_to_user: - Copy a block of data into user space.
23442 - * @to: Destination address, in user space.
23443 - * @from: Source address, in kernel space.
23444 - * @n: Number of bytes to copy.
23445 - *
23446 - * Context: User context only. This function may sleep.
23447 - *
23448 - * Copy data from kernel space to user space.
23449 - *
23450 - * Returns number of bytes that could not be copied.
23451 - * On success, this will be zero.
23452 - */
23453 -unsigned long
23454 -copy_to_user(void __user *to, const void *from, unsigned long n)
23455 -{
23456 - if (access_ok(VERIFY_WRITE, to, n))
23457 - n = __copy_to_user(to, from, n);
23458 - return n;
23459 -}
23460 -EXPORT_SYMBOL(copy_to_user);
23461 -
23462 -/**
23463 - * copy_from_user: - Copy a block of data from user space.
23464 - * @to: Destination address, in kernel space.
23465 - * @from: Source address, in user space.
23466 - * @n: Number of bytes to copy.
23467 - *
23468 - * Context: User context only. This function may sleep.
23469 - *
23470 - * Copy data from user space to kernel space.
23471 - *
23472 - * Returns number of bytes that could not be copied.
23473 - * On success, this will be zero.
23474 - *
23475 - * If some data could not be copied, this function will pad the copied
23476 - * data to the requested size using zero bytes.
23477 - */
23478 -unsigned long
23479 -_copy_from_user(void *to, const void __user *from, unsigned long n)
23480 -{
23481 - if (access_ok(VERIFY_READ, from, n))
23482 - n = __copy_from_user(to, from, n);
23483 - else
23484 - memset(to, 0, n);
23485 - return n;
23486 -}
23487 -EXPORT_SYMBOL(_copy_from_user);
23488 -
23489 void copy_from_user_overflow(void)
23490 {
23491 WARN(1, "Buffer overflow detected!\n");
23492 }
23493 EXPORT_SYMBOL(copy_from_user_overflow);
23494 +
23495 +void copy_to_user_overflow(void)
23496 +{
23497 + WARN(1, "Buffer overflow detected!\n");
23498 +}
23499 +EXPORT_SYMBOL(copy_to_user_overflow);
23500 +
23501 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23502 +void __set_fs(mm_segment_t x)
23503 +{
23504 + switch (x.seg) {
23505 + case 0:
23506 + loadsegment(gs, 0);
23507 + break;
23508 + case TASK_SIZE_MAX:
23509 + loadsegment(gs, __USER_DS);
23510 + break;
23511 + case -1UL:
23512 + loadsegment(gs, __KERNEL_DS);
23513 + break;
23514 + default:
23515 + BUG();
23516 + }
23517 + return;
23518 +}
23519 +EXPORT_SYMBOL(__set_fs);
23520 +
23521 +void set_fs(mm_segment_t x)
23522 +{
23523 + current_thread_info()->addr_limit = x;
23524 + __set_fs(x);
23525 +}
23526 +EXPORT_SYMBOL(set_fs);
23527 +#endif
23528 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
23529 index 0d0326f..6a6155b 100644
23530 --- a/arch/x86/lib/usercopy_64.c
23531 +++ b/arch/x86/lib/usercopy_64.c
23532 @@ -16,6 +16,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
23533 {
23534 long __d0;
23535 might_fault();
23536 +
23537 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23538 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
23539 + addr += PAX_USER_SHADOW_BASE;
23540 +#endif
23541 +
23542 /* no memory constraint because it doesn't change any memory gcc knows
23543 about */
23544 asm volatile(
23545 @@ -100,12 +106,20 @@ long strlen_user(const char __user *s)
23546 }
23547 EXPORT_SYMBOL(strlen_user);
23548
23549 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
23550 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
23551 {
23552 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23553 - return copy_user_generic((__force void *)to, (__force void *)from, len);
23554 - }
23555 - return len;
23556 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23557 +
23558 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23559 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
23560 + to += PAX_USER_SHADOW_BASE;
23561 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
23562 + from += PAX_USER_SHADOW_BASE;
23563 +#endif
23564 +
23565 + return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
23566 + }
23567 + return len;
23568 }
23569 EXPORT_SYMBOL(copy_in_user);
23570
23571 @@ -115,7 +129,7 @@ EXPORT_SYMBOL(copy_in_user);
23572 * it is not necessary to optimize tail handling.
23573 */
23574 unsigned long
23575 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23576 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
23577 {
23578 char c;
23579 unsigned zero_len;
23580 @@ -132,3 +146,15 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23581 break;
23582 return len;
23583 }
23584 +
23585 +void copy_from_user_overflow(void)
23586 +{
23587 + WARN(1, "Buffer overflow detected!\n");
23588 +}
23589 +EXPORT_SYMBOL(copy_from_user_overflow);
23590 +
23591 +void copy_to_user_overflow(void)
23592 +{
23593 + WARN(1, "Buffer overflow detected!\n");
23594 +}
23595 +EXPORT_SYMBOL(copy_to_user_overflow);
23596 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
23597 index 1fb85db..8b3540b 100644
23598 --- a/arch/x86/mm/extable.c
23599 +++ b/arch/x86/mm/extable.c
23600 @@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
23601 const struct exception_table_entry *fixup;
23602
23603 #ifdef CONFIG_PNPBIOS
23604 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
23605 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
23606 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
23607 extern u32 pnp_bios_is_utter_crap;
23608 pnp_bios_is_utter_crap = 1;
23609 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
23610 index 3ecfd1a..304d554 100644
23611 --- a/arch/x86/mm/fault.c
23612 +++ b/arch/x86/mm/fault.c
23613 @@ -13,11 +13,18 @@
23614 #include <linux/perf_event.h> /* perf_sw_event */
23615 #include <linux/hugetlb.h> /* hstate_index_to_shift */
23616 #include <linux/prefetch.h> /* prefetchw */
23617 +#include <linux/unistd.h>
23618 +#include <linux/compiler.h>
23619
23620 #include <asm/traps.h> /* dotraplinkage, ... */
23621 #include <asm/pgalloc.h> /* pgd_*(), ... */
23622 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
23623 #include <asm/fixmap.h> /* VSYSCALL_START */
23624 +#include <asm/tlbflush.h>
23625 +
23626 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23627 +#include <asm/stacktrace.h>
23628 +#endif
23629
23630 /*
23631 * Page fault error code bits:
23632 @@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
23633 int ret = 0;
23634
23635 /* kprobe_running() needs smp_processor_id() */
23636 - if (kprobes_built_in() && !user_mode_vm(regs)) {
23637 + if (kprobes_built_in() && !user_mode(regs)) {
23638 preempt_disable();
23639 if (kprobe_running() && kprobe_fault_handler(regs, 14))
23640 ret = 1;
23641 @@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
23642 return !instr_lo || (instr_lo>>1) == 1;
23643 case 0x00:
23644 /* Prefetch instruction is 0x0F0D or 0x0F18 */
23645 - if (probe_kernel_address(instr, opcode))
23646 + if (user_mode(regs)) {
23647 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23648 + return 0;
23649 + } else if (probe_kernel_address(instr, opcode))
23650 return 0;
23651
23652 *prefetch = (instr_lo == 0xF) &&
23653 @@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
23654 while (instr < max_instr) {
23655 unsigned char opcode;
23656
23657 - if (probe_kernel_address(instr, opcode))
23658 + if (user_mode(regs)) {
23659 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23660 + break;
23661 + } else if (probe_kernel_address(instr, opcode))
23662 break;
23663
23664 instr++;
23665 @@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
23666 force_sig_info(si_signo, &info, tsk);
23667 }
23668
23669 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23670 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
23671 +#endif
23672 +
23673 +#ifdef CONFIG_PAX_EMUTRAMP
23674 +static int pax_handle_fetch_fault(struct pt_regs *regs);
23675 +#endif
23676 +
23677 +#ifdef CONFIG_PAX_PAGEEXEC
23678 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
23679 +{
23680 + pgd_t *pgd;
23681 + pud_t *pud;
23682 + pmd_t *pmd;
23683 +
23684 + pgd = pgd_offset(mm, address);
23685 + if (!pgd_present(*pgd))
23686 + return NULL;
23687 + pud = pud_offset(pgd, address);
23688 + if (!pud_present(*pud))
23689 + return NULL;
23690 + pmd = pmd_offset(pud, address);
23691 + if (!pmd_present(*pmd))
23692 + return NULL;
23693 + return pmd;
23694 +}
23695 +#endif
23696 +
23697 DEFINE_SPINLOCK(pgd_lock);
23698 LIST_HEAD(pgd_list);
23699
23700 @@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
23701 for (address = VMALLOC_START & PMD_MASK;
23702 address >= TASK_SIZE && address < FIXADDR_TOP;
23703 address += PMD_SIZE) {
23704 +
23705 +#ifdef CONFIG_PAX_PER_CPU_PGD
23706 + unsigned long cpu;
23707 +#else
23708 struct page *page;
23709 +#endif
23710
23711 spin_lock(&pgd_lock);
23712 +
23713 +#ifdef CONFIG_PAX_PER_CPU_PGD
23714 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
23715 + pgd_t *pgd = get_cpu_pgd(cpu);
23716 + pmd_t *ret;
23717 +#else
23718 list_for_each_entry(page, &pgd_list, lru) {
23719 + pgd_t *pgd = page_address(page);
23720 spinlock_t *pgt_lock;
23721 pmd_t *ret;
23722
23723 @@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
23724 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
23725
23726 spin_lock(pgt_lock);
23727 - ret = vmalloc_sync_one(page_address(page), address);
23728 +#endif
23729 +
23730 + ret = vmalloc_sync_one(pgd, address);
23731 +
23732 +#ifndef CONFIG_PAX_PER_CPU_PGD
23733 spin_unlock(pgt_lock);
23734 +#endif
23735
23736 if (!ret)
23737 break;
23738 @@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23739 * an interrupt in the middle of a task switch..
23740 */
23741 pgd_paddr = read_cr3();
23742 +
23743 +#ifdef CONFIG_PAX_PER_CPU_PGD
23744 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
23745 +#endif
23746 +
23747 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
23748 if (!pmd_k)
23749 return -1;
23750 @@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
23751 * happen within a race in page table update. In the later
23752 * case just flush:
23753 */
23754 +
23755 +#ifdef CONFIG_PAX_PER_CPU_PGD
23756 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
23757 + pgd = pgd_offset_cpu(smp_processor_id(), address);
23758 +#else
23759 pgd = pgd_offset(current->active_mm, address);
23760 +#endif
23761 +
23762 pgd_ref = pgd_offset_k(address);
23763 if (pgd_none(*pgd_ref))
23764 return -1;
23765 @@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
23766 static int is_errata100(struct pt_regs *regs, unsigned long address)
23767 {
23768 #ifdef CONFIG_X86_64
23769 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
23770 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
23771 return 1;
23772 #endif
23773 return 0;
23774 @@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
23775 }
23776
23777 static const char nx_warning[] = KERN_CRIT
23778 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
23779 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
23780
23781 static void
23782 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23783 @@ -576,15 +646,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
23784 if (!oops_may_print())
23785 return;
23786
23787 - if (error_code & PF_INSTR) {
23788 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
23789 unsigned int level;
23790
23791 pte_t *pte = lookup_address(address, &level);
23792
23793 if (pte && pte_present(*pte) && !pte_exec(*pte))
23794 - printk(nx_warning, current_uid());
23795 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
23796 }
23797
23798 +#ifdef CONFIG_PAX_KERNEXEC
23799 + if (init_mm.start_code <= address && address < init_mm.end_code) {
23800 + if (current->signal->curr_ip)
23801 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23802 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
23803 + else
23804 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23805 + current->comm, task_pid_nr(current), current_uid(), current_euid());
23806 + }
23807 +#endif
23808 +
23809 printk(KERN_ALERT "BUG: unable to handle kernel ");
23810 if (address < PAGE_SIZE)
23811 printk(KERN_CONT "NULL pointer dereference");
23812 @@ -748,6 +829,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
23813 }
23814 #endif
23815
23816 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23817 + if (pax_is_fetch_fault(regs, error_code, address)) {
23818 +
23819 +#ifdef CONFIG_PAX_EMUTRAMP
23820 + switch (pax_handle_fetch_fault(regs)) {
23821 + case 2:
23822 + return;
23823 + }
23824 +#endif
23825 +
23826 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23827 + do_group_exit(SIGKILL);
23828 + }
23829 +#endif
23830 +
23831 if (unlikely(show_unhandled_signals))
23832 show_signal_msg(regs, error_code, address, tsk);
23833
23834 @@ -844,7 +940,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
23835 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
23836 printk(KERN_ERR
23837 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
23838 - tsk->comm, tsk->pid, address);
23839 + tsk->comm, task_pid_nr(tsk), address);
23840 code = BUS_MCEERR_AR;
23841 }
23842 #endif
23843 @@ -900,6 +996,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
23844 return 1;
23845 }
23846
23847 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23848 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
23849 +{
23850 + pte_t *pte;
23851 + pmd_t *pmd;
23852 + spinlock_t *ptl;
23853 + unsigned char pte_mask;
23854 +
23855 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
23856 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
23857 + return 0;
23858 +
23859 + /* PaX: it's our fault, let's handle it if we can */
23860 +
23861 + /* PaX: take a look at read faults before acquiring any locks */
23862 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
23863 + /* instruction fetch attempt from a protected page in user mode */
23864 + up_read(&mm->mmap_sem);
23865 +
23866 +#ifdef CONFIG_PAX_EMUTRAMP
23867 + switch (pax_handle_fetch_fault(regs)) {
23868 + case 2:
23869 + return 1;
23870 + }
23871 +#endif
23872 +
23873 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23874 + do_group_exit(SIGKILL);
23875 + }
23876 +
23877 + pmd = pax_get_pmd(mm, address);
23878 + if (unlikely(!pmd))
23879 + return 0;
23880 +
23881 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
23882 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
23883 + pte_unmap_unlock(pte, ptl);
23884 + return 0;
23885 + }
23886 +
23887 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
23888 + /* write attempt to a protected page in user mode */
23889 + pte_unmap_unlock(pte, ptl);
23890 + return 0;
23891 + }
23892 +
23893 +#ifdef CONFIG_SMP
23894 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
23895 +#else
23896 + if (likely(address > get_limit(regs->cs)))
23897 +#endif
23898 + {
23899 + set_pte(pte, pte_mkread(*pte));
23900 + __flush_tlb_one(address);
23901 + pte_unmap_unlock(pte, ptl);
23902 + up_read(&mm->mmap_sem);
23903 + return 1;
23904 + }
23905 +
23906 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
23907 +
23908 + /*
23909 + * PaX: fill DTLB with user rights and retry
23910 + */
23911 + __asm__ __volatile__ (
23912 + "orb %2,(%1)\n"
23913 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
23914 +/*
23915 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
23916 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
23917 + * page fault when examined during a TLB load attempt. this is true not only
23918 + * for PTEs holding a non-present entry but also present entries that will
23919 + * raise a page fault (such as those set up by PaX, or the copy-on-write
23920 + * mechanism). in effect it means that we do *not* need to flush the TLBs
23921 + * for our target pages since their PTEs are simply not in the TLBs at all.
23922 +
23923 + * the best thing in omitting it is that we gain around 15-20% speed in the
23924 + * fast path of the page fault handler and can get rid of tracing since we
23925 + * can no longer flush unintended entries.
23926 + */
23927 + "invlpg (%0)\n"
23928 +#endif
23929 + __copyuser_seg"testb $0,(%0)\n"
23930 + "xorb %3,(%1)\n"
23931 + :
23932 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
23933 + : "memory", "cc");
23934 + pte_unmap_unlock(pte, ptl);
23935 + up_read(&mm->mmap_sem);
23936 + return 1;
23937 +}
23938 +#endif
23939 +
23940 /*
23941 * Handle a spurious fault caused by a stale TLB entry.
23942 *
23943 @@ -972,6 +1161,9 @@ int show_unhandled_signals = 1;
23944 static inline int
23945 access_error(unsigned long error_code, struct vm_area_struct *vma)
23946 {
23947 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
23948 + return 1;
23949 +
23950 if (error_code & PF_WRITE) {
23951 /* write, present and write, not present: */
23952 if (unlikely(!(vma->vm_flags & VM_WRITE)))
23953 @@ -1005,18 +1197,33 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
23954 {
23955 struct vm_area_struct *vma;
23956 struct task_struct *tsk;
23957 - unsigned long address;
23958 struct mm_struct *mm;
23959 int fault;
23960 int write = error_code & PF_WRITE;
23961 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
23962 (write ? FAULT_FLAG_WRITE : 0);
23963
23964 - tsk = current;
23965 - mm = tsk->mm;
23966 -
23967 /* Get the faulting address: */
23968 - address = read_cr2();
23969 + unsigned long address = read_cr2();
23970 +
23971 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23972 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
23973 + if (!search_exception_tables(regs->ip)) {
23974 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
23975 + bad_area_nosemaphore(regs, error_code, address);
23976 + return;
23977 + }
23978 + if (address < PAX_USER_SHADOW_BASE) {
23979 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
23980 + printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
23981 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
23982 + } else
23983 + address -= PAX_USER_SHADOW_BASE;
23984 + }
23985 +#endif
23986 +
23987 + tsk = current;
23988 + mm = tsk->mm;
23989
23990 /*
23991 * Detect and handle instructions that would cause a page fault for
23992 @@ -1077,7 +1284,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
23993 * User-mode registers count as a user access even for any
23994 * potential system fault or CPU buglet:
23995 */
23996 - if (user_mode_vm(regs)) {
23997 + if (user_mode(regs)) {
23998 local_irq_enable();
23999 error_code |= PF_USER;
24000 } else {
24001 @@ -1132,6 +1339,11 @@ retry:
24002 might_sleep();
24003 }
24004
24005 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24006 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
24007 + return;
24008 +#endif
24009 +
24010 vma = find_vma(mm, address);
24011 if (unlikely(!vma)) {
24012 bad_area(regs, error_code, address);
24013 @@ -1143,18 +1355,24 @@ retry:
24014 bad_area(regs, error_code, address);
24015 return;
24016 }
24017 - if (error_code & PF_USER) {
24018 - /*
24019 - * Accessing the stack below %sp is always a bug.
24020 - * The large cushion allows instructions like enter
24021 - * and pusha to work. ("enter $65535, $31" pushes
24022 - * 32 pointers and then decrements %sp by 65535.)
24023 - */
24024 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
24025 - bad_area(regs, error_code, address);
24026 - return;
24027 - }
24028 + /*
24029 + * Accessing the stack below %sp is always a bug.
24030 + * The large cushion allows instructions like enter
24031 + * and pusha to work. ("enter $65535, $31" pushes
24032 + * 32 pointers and then decrements %sp by 65535.)
24033 + */
24034 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
24035 + bad_area(regs, error_code, address);
24036 + return;
24037 }
24038 +
24039 +#ifdef CONFIG_PAX_SEGMEXEC
24040 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
24041 + bad_area(regs, error_code, address);
24042 + return;
24043 + }
24044 +#endif
24045 +
24046 if (unlikely(expand_stack(vma, address))) {
24047 bad_area(regs, error_code, address);
24048 return;
24049 @@ -1209,3 +1427,292 @@ good_area:
24050
24051 up_read(&mm->mmap_sem);
24052 }
24053 +
24054 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24055 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
24056 +{
24057 + struct mm_struct *mm = current->mm;
24058 + unsigned long ip = regs->ip;
24059 +
24060 + if (v8086_mode(regs))
24061 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
24062 +
24063 +#ifdef CONFIG_PAX_PAGEEXEC
24064 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
24065 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
24066 + return true;
24067 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
24068 + return true;
24069 + return false;
24070 + }
24071 +#endif
24072 +
24073 +#ifdef CONFIG_PAX_SEGMEXEC
24074 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
24075 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
24076 + return true;
24077 + return false;
24078 + }
24079 +#endif
24080 +
24081 + return false;
24082 +}
24083 +#endif
24084 +
24085 +#ifdef CONFIG_PAX_EMUTRAMP
24086 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
24087 +{
24088 + int err;
24089 +
24090 + do { /* PaX: libffi trampoline emulation */
24091 + unsigned char mov, jmp;
24092 + unsigned int addr1, addr2;
24093 +
24094 +#ifdef CONFIG_X86_64
24095 + if ((regs->ip + 9) >> 32)
24096 + break;
24097 +#endif
24098 +
24099 + err = get_user(mov, (unsigned char __user *)regs->ip);
24100 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24101 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24102 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24103 +
24104 + if (err)
24105 + break;
24106 +
24107 + if (mov == 0xB8 && jmp == 0xE9) {
24108 + regs->ax = addr1;
24109 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24110 + return 2;
24111 + }
24112 + } while (0);
24113 +
24114 + do { /* PaX: gcc trampoline emulation #1 */
24115 + unsigned char mov1, mov2;
24116 + unsigned short jmp;
24117 + unsigned int addr1, addr2;
24118 +
24119 +#ifdef CONFIG_X86_64
24120 + if ((regs->ip + 11) >> 32)
24121 + break;
24122 +#endif
24123 +
24124 + err = get_user(mov1, (unsigned char __user *)regs->ip);
24125 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24126 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
24127 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24128 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
24129 +
24130 + if (err)
24131 + break;
24132 +
24133 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
24134 + regs->cx = addr1;
24135 + regs->ax = addr2;
24136 + regs->ip = addr2;
24137 + return 2;
24138 + }
24139 + } while (0);
24140 +
24141 + do { /* PaX: gcc trampoline emulation #2 */
24142 + unsigned char mov, jmp;
24143 + unsigned int addr1, addr2;
24144 +
24145 +#ifdef CONFIG_X86_64
24146 + if ((regs->ip + 9) >> 32)
24147 + break;
24148 +#endif
24149 +
24150 + err = get_user(mov, (unsigned char __user *)regs->ip);
24151 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24152 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24153 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24154 +
24155 + if (err)
24156 + break;
24157 +
24158 + if (mov == 0xB9 && jmp == 0xE9) {
24159 + regs->cx = addr1;
24160 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24161 + return 2;
24162 + }
24163 + } while (0);
24164 +
24165 + return 1; /* PaX in action */
24166 +}
24167 +
24168 +#ifdef CONFIG_X86_64
24169 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
24170 +{
24171 + int err;
24172 +
24173 + do { /* PaX: libffi trampoline emulation */
24174 + unsigned short mov1, mov2, jmp1;
24175 + unsigned char stcclc, jmp2;
24176 + unsigned long addr1, addr2;
24177 +
24178 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24179 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24180 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24181 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24182 + err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
24183 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
24184 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
24185 +
24186 + if (err)
24187 + break;
24188 +
24189 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24190 + regs->r11 = addr1;
24191 + regs->r10 = addr2;
24192 + if (stcclc == 0xF8)
24193 + regs->flags &= ~X86_EFLAGS_CF;
24194 + else
24195 + regs->flags |= X86_EFLAGS_CF;
24196 + regs->ip = addr1;
24197 + return 2;
24198 + }
24199 + } while (0);
24200 +
24201 + do { /* PaX: gcc trampoline emulation #1 */
24202 + unsigned short mov1, mov2, jmp1;
24203 + unsigned char jmp2;
24204 + unsigned int addr1;
24205 + unsigned long addr2;
24206 +
24207 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24208 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
24209 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
24210 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
24211 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
24212 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
24213 +
24214 + if (err)
24215 + break;
24216 +
24217 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24218 + regs->r11 = addr1;
24219 + regs->r10 = addr2;
24220 + regs->ip = addr1;
24221 + return 2;
24222 + }
24223 + } while (0);
24224 +
24225 + do { /* PaX: gcc trampoline emulation #2 */
24226 + unsigned short mov1, mov2, jmp1;
24227 + unsigned char jmp2;
24228 + unsigned long addr1, addr2;
24229 +
24230 + err = get_user(mov1, (unsigned short __user *)regs->ip);
24231 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24232 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24233 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24234 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
24235 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
24236 +
24237 + if (err)
24238 + break;
24239 +
24240 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24241 + regs->r11 = addr1;
24242 + regs->r10 = addr2;
24243 + regs->ip = addr1;
24244 + return 2;
24245 + }
24246 + } while (0);
24247 +
24248 + return 1; /* PaX in action */
24249 +}
24250 +#endif
24251 +
24252 +/*
24253 + * PaX: decide what to do with offenders (regs->ip = fault address)
24254 + *
24255 + * returns 1 when task should be killed
24256 + * 2 when gcc trampoline was detected
24257 + */
24258 +static int pax_handle_fetch_fault(struct pt_regs *regs)
24259 +{
24260 + if (v8086_mode(regs))
24261 + return 1;
24262 +
24263 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
24264 + return 1;
24265 +
24266 +#ifdef CONFIG_X86_32
24267 + return pax_handle_fetch_fault_32(regs);
24268 +#else
24269 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
24270 + return pax_handle_fetch_fault_32(regs);
24271 + else
24272 + return pax_handle_fetch_fault_64(regs);
24273 +#endif
24274 +}
24275 +#endif
24276 +
24277 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24278 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
24279 +{
24280 + long i;
24281 +
24282 + printk(KERN_ERR "PAX: bytes at PC: ");
24283 + for (i = 0; i < 20; i++) {
24284 + unsigned char c;
24285 + if (get_user(c, (unsigned char __force_user *)pc+i))
24286 + printk(KERN_CONT "?? ");
24287 + else
24288 + printk(KERN_CONT "%02x ", c);
24289 + }
24290 + printk("\n");
24291 +
24292 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
24293 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
24294 + unsigned long c;
24295 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
24296 +#ifdef CONFIG_X86_32
24297 + printk(KERN_CONT "???????? ");
24298 +#else
24299 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
24300 + printk(KERN_CONT "???????? ???????? ");
24301 + else
24302 + printk(KERN_CONT "???????????????? ");
24303 +#endif
24304 + } else {
24305 +#ifdef CONFIG_X86_64
24306 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
24307 + printk(KERN_CONT "%08x ", (unsigned int)c);
24308 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
24309 + } else
24310 +#endif
24311 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
24312 + }
24313 + }
24314 + printk("\n");
24315 +}
24316 +#endif
24317 +
24318 +/**
24319 + * probe_kernel_write(): safely attempt to write to a location
24320 + * @dst: address to write to
24321 + * @src: pointer to the data that shall be written
24322 + * @size: size of the data chunk
24323 + *
24324 + * Safely write to address @dst from the buffer at @src. If a kernel fault
24325 + * happens, handle that and return -EFAULT.
24326 + */
24327 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
24328 +{
24329 + long ret;
24330 + mm_segment_t old_fs = get_fs();
24331 +
24332 + set_fs(KERNEL_DS);
24333 + pagefault_disable();
24334 + pax_open_kernel();
24335 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
24336 + pax_close_kernel();
24337 + pagefault_enable();
24338 + set_fs(old_fs);
24339 +
24340 + return ret ? -EFAULT : 0;
24341 +}
24342 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
24343 index dd74e46..7d26398 100644
24344 --- a/arch/x86/mm/gup.c
24345 +++ b/arch/x86/mm/gup.c
24346 @@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
24347 addr = start;
24348 len = (unsigned long) nr_pages << PAGE_SHIFT;
24349 end = start + len;
24350 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24351 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24352 (void __user *)start, len)))
24353 return 0;
24354
24355 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
24356 index 6f31ee5..8ee4164 100644
24357 --- a/arch/x86/mm/highmem_32.c
24358 +++ b/arch/x86/mm/highmem_32.c
24359 @@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
24360 idx = type + KM_TYPE_NR*smp_processor_id();
24361 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24362 BUG_ON(!pte_none(*(kmap_pte-idx)));
24363 +
24364 + pax_open_kernel();
24365 set_pte(kmap_pte-idx, mk_pte(page, prot));
24366 + pax_close_kernel();
24367 +
24368 arch_flush_lazy_mmu_mode();
24369
24370 return (void *)vaddr;
24371 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
24372 index f6679a7..8f795a3 100644
24373 --- a/arch/x86/mm/hugetlbpage.c
24374 +++ b/arch/x86/mm/hugetlbpage.c
24375 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
24376 struct hstate *h = hstate_file(file);
24377 struct mm_struct *mm = current->mm;
24378 struct vm_area_struct *vma;
24379 - unsigned long start_addr;
24380 + unsigned long start_addr, pax_task_size = TASK_SIZE;
24381 +
24382 +#ifdef CONFIG_PAX_SEGMEXEC
24383 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24384 + pax_task_size = SEGMEXEC_TASK_SIZE;
24385 +#endif
24386 +
24387 + pax_task_size -= PAGE_SIZE;
24388
24389 if (len > mm->cached_hole_size) {
24390 - start_addr = mm->free_area_cache;
24391 + start_addr = mm->free_area_cache;
24392 } else {
24393 - start_addr = TASK_UNMAPPED_BASE;
24394 - mm->cached_hole_size = 0;
24395 + start_addr = mm->mmap_base;
24396 + mm->cached_hole_size = 0;
24397 }
24398
24399 full_search:
24400 @@ -280,26 +287,27 @@ full_search:
24401
24402 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
24403 /* At this point: (!vma || addr < vma->vm_end). */
24404 - if (TASK_SIZE - len < addr) {
24405 + if (pax_task_size - len < addr) {
24406 /*
24407 * Start a new search - just in case we missed
24408 * some holes.
24409 */
24410 - if (start_addr != TASK_UNMAPPED_BASE) {
24411 - start_addr = TASK_UNMAPPED_BASE;
24412 + if (start_addr != mm->mmap_base) {
24413 + start_addr = mm->mmap_base;
24414 mm->cached_hole_size = 0;
24415 goto full_search;
24416 }
24417 return -ENOMEM;
24418 }
24419 - if (!vma || addr + len <= vma->vm_start) {
24420 - mm->free_area_cache = addr + len;
24421 - return addr;
24422 - }
24423 + if (check_heap_stack_gap(vma, addr, len))
24424 + break;
24425 if (addr + mm->cached_hole_size < vma->vm_start)
24426 mm->cached_hole_size = vma->vm_start - addr;
24427 addr = ALIGN(vma->vm_end, huge_page_size(h));
24428 }
24429 +
24430 + mm->free_area_cache = addr + len;
24431 + return addr;
24432 }
24433
24434 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24435 @@ -310,9 +318,8 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24436 struct mm_struct *mm = current->mm;
24437 struct vm_area_struct *vma;
24438 unsigned long base = mm->mmap_base;
24439 - unsigned long addr = addr0;
24440 + unsigned long addr;
24441 unsigned long largest_hole = mm->cached_hole_size;
24442 - unsigned long start_addr;
24443
24444 /* don't allow allocations above current base */
24445 if (mm->free_area_cache > base)
24446 @@ -322,16 +329,15 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24447 largest_hole = 0;
24448 mm->free_area_cache = base;
24449 }
24450 -try_again:
24451 - start_addr = mm->free_area_cache;
24452
24453 /* make sure it can fit in the remaining address space */
24454 if (mm->free_area_cache < len)
24455 goto fail;
24456
24457 /* either no address requested or can't fit in requested address hole */
24458 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
24459 + addr = mm->free_area_cache - len;
24460 do {
24461 + addr &= huge_page_mask(h);
24462 /*
24463 * Lookup failure means no vma is above this address,
24464 * i.e. return with success:
24465 @@ -340,10 +346,10 @@ try_again:
24466 if (!vma)
24467 return addr;
24468
24469 - if (addr + len <= vma->vm_start) {
24470 + if (check_heap_stack_gap(vma, addr, len)) {
24471 /* remember the address as a hint for next time */
24472 - mm->cached_hole_size = largest_hole;
24473 - return (mm->free_area_cache = addr);
24474 + mm->cached_hole_size = largest_hole;
24475 + return (mm->free_area_cache = addr);
24476 } else if (mm->free_area_cache == vma->vm_end) {
24477 /* pull free_area_cache down to the first hole */
24478 mm->free_area_cache = vma->vm_start;
24479 @@ -352,29 +358,34 @@ try_again:
24480
24481 /* remember the largest hole we saw so far */
24482 if (addr + largest_hole < vma->vm_start)
24483 - largest_hole = vma->vm_start - addr;
24484 + largest_hole = vma->vm_start - addr;
24485
24486 /* try just below the current vma->vm_start */
24487 - addr = (vma->vm_start - len) & huge_page_mask(h);
24488 - } while (len <= vma->vm_start);
24489 + addr = skip_heap_stack_gap(vma, len);
24490 + } while (!IS_ERR_VALUE(addr));
24491
24492 fail:
24493 /*
24494 - * if hint left us with no space for the requested
24495 - * mapping then try again:
24496 - */
24497 - if (start_addr != base) {
24498 - mm->free_area_cache = base;
24499 - largest_hole = 0;
24500 - goto try_again;
24501 - }
24502 - /*
24503 * A failed mmap() very likely causes application failure,
24504 * so fall back to the bottom-up function here. This scenario
24505 * can happen with large stack limits and large mmap()
24506 * allocations.
24507 */
24508 - mm->free_area_cache = TASK_UNMAPPED_BASE;
24509 +
24510 +#ifdef CONFIG_PAX_SEGMEXEC
24511 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24512 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
24513 + else
24514 +#endif
24515 +
24516 + mm->mmap_base = TASK_UNMAPPED_BASE;
24517 +
24518 +#ifdef CONFIG_PAX_RANDMMAP
24519 + if (mm->pax_flags & MF_PAX_RANDMMAP)
24520 + mm->mmap_base += mm->delta_mmap;
24521 +#endif
24522 +
24523 + mm->free_area_cache = mm->mmap_base;
24524 mm->cached_hole_size = ~0UL;
24525 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
24526 len, pgoff, flags);
24527 @@ -382,6 +393,7 @@ fail:
24528 /*
24529 * Restore the topdown base:
24530 */
24531 + mm->mmap_base = base;
24532 mm->free_area_cache = base;
24533 mm->cached_hole_size = ~0UL;
24534
24535 @@ -395,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24536 struct hstate *h = hstate_file(file);
24537 struct mm_struct *mm = current->mm;
24538 struct vm_area_struct *vma;
24539 + unsigned long pax_task_size = TASK_SIZE;
24540
24541 if (len & ~huge_page_mask(h))
24542 return -EINVAL;
24543 - if (len > TASK_SIZE)
24544 +
24545 +#ifdef CONFIG_PAX_SEGMEXEC
24546 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
24547 + pax_task_size = SEGMEXEC_TASK_SIZE;
24548 +#endif
24549 +
24550 + pax_task_size -= PAGE_SIZE;
24551 +
24552 + if (len > pax_task_size)
24553 return -ENOMEM;
24554
24555 if (flags & MAP_FIXED) {
24556 @@ -410,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24557 if (addr) {
24558 addr = ALIGN(addr, huge_page_size(h));
24559 vma = find_vma(mm, addr);
24560 - if (TASK_SIZE - len >= addr &&
24561 - (!vma || addr + len <= vma->vm_start))
24562 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
24563 return addr;
24564 }
24565 if (mm->get_unmapped_area == arch_get_unmapped_area)
24566 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
24567 index 4f0cec7..00976ce 100644
24568 --- a/arch/x86/mm/init.c
24569 +++ b/arch/x86/mm/init.c
24570 @@ -16,6 +16,8 @@
24571 #include <asm/tlb.h>
24572 #include <asm/proto.h>
24573 #include <asm/dma.h> /* for MAX_DMA_PFN */
24574 +#include <asm/desc.h>
24575 +#include <asm/bios_ebda.h>
24576
24577 unsigned long __initdata pgt_buf_start;
24578 unsigned long __meminitdata pgt_buf_end;
24579 @@ -32,7 +34,7 @@ int direct_gbpages
24580 static void __init find_early_table_space(unsigned long end, int use_pse,
24581 int use_gbpages)
24582 {
24583 - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
24584 + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
24585 phys_addr_t base;
24586
24587 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
24588 @@ -311,10 +313,37 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
24589 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
24590 * mmio resources as well as potential bios/acpi data regions.
24591 */
24592 +
24593 +#ifdef CONFIG_GRKERNSEC_KMEM
24594 +static unsigned int ebda_start __read_only;
24595 +static unsigned int ebda_end __read_only;
24596 +#endif
24597 +
24598 int devmem_is_allowed(unsigned long pagenr)
24599 {
24600 +#ifdef CONFIG_GRKERNSEC_KMEM
24601 + /* allow BDA */
24602 + if (!pagenr)
24603 + return 1;
24604 + /* allow EBDA */
24605 + if (pagenr >= ebda_start && pagenr < ebda_end)
24606 + return 1;
24607 +#else
24608 + if (!pagenr)
24609 + return 1;
24610 +#ifdef CONFIG_VM86
24611 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
24612 + return 1;
24613 +#endif
24614 +#endif
24615 +
24616 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
24617 + return 1;
24618 +#ifdef CONFIG_GRKERNSEC_KMEM
24619 + /* throw out everything else below 1MB */
24620 if (pagenr <= 256)
24621 - return 1;
24622 + return 0;
24623 +#endif
24624 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
24625 return 0;
24626 if (!page_is_ram(pagenr))
24627 @@ -371,8 +400,116 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
24628 #endif
24629 }
24630
24631 +#ifdef CONFIG_GRKERNSEC_KMEM
24632 +static inline void gr_init_ebda(void)
24633 +{
24634 + unsigned int ebda_addr;
24635 + unsigned int ebda_size = 0;
24636 +
24637 + ebda_addr = get_bios_ebda();
24638 + if (ebda_addr) {
24639 + ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
24640 + ebda_size <<= 10;
24641 + }
24642 + if (ebda_addr && ebda_size) {
24643 + ebda_start = ebda_addr >> PAGE_SHIFT;
24644 + ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
24645 + } else {
24646 + ebda_start = 0x9f000 >> PAGE_SHIFT;
24647 + ebda_end = 0xa0000 >> PAGE_SHIFT;
24648 + }
24649 +}
24650 +#else
24651 +static inline void gr_init_ebda(void) { }
24652 +#endif
24653 +
24654 void free_initmem(void)
24655 {
24656 +#ifdef CONFIG_PAX_KERNEXEC
24657 +#ifdef CONFIG_X86_32
24658 + /* PaX: limit KERNEL_CS to actual size */
24659 + unsigned long addr, limit;
24660 + struct desc_struct d;
24661 + int cpu;
24662 +#else
24663 + pgd_t *pgd;
24664 + pud_t *pud;
24665 + pmd_t *pmd;
24666 + unsigned long addr, end;
24667 +#endif
24668 +#endif
24669 +
24670 + gr_init_ebda();
24671 +
24672 +#ifdef CONFIG_PAX_KERNEXEC
24673 +#ifdef CONFIG_X86_32
24674 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
24675 + limit = (limit - 1UL) >> PAGE_SHIFT;
24676 +
24677 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
24678 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
24679 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
24680 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
24681 + }
24682 +
24683 + /* PaX: make KERNEL_CS read-only */
24684 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
24685 + if (!paravirt_enabled())
24686 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
24687 +/*
24688 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
24689 + pgd = pgd_offset_k(addr);
24690 + pud = pud_offset(pgd, addr);
24691 + pmd = pmd_offset(pud, addr);
24692 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24693 + }
24694 +*/
24695 +#ifdef CONFIG_X86_PAE
24696 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
24697 +/*
24698 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
24699 + pgd = pgd_offset_k(addr);
24700 + pud = pud_offset(pgd, addr);
24701 + pmd = pmd_offset(pud, addr);
24702 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24703 + }
24704 +*/
24705 +#endif
24706 +
24707 +#ifdef CONFIG_MODULES
24708 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
24709 +#endif
24710 +
24711 +#else
24712 + /* PaX: make kernel code/rodata read-only, rest non-executable */
24713 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
24714 + pgd = pgd_offset_k(addr);
24715 + pud = pud_offset(pgd, addr);
24716 + pmd = pmd_offset(pud, addr);
24717 + if (!pmd_present(*pmd))
24718 + continue;
24719 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
24720 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24721 + else
24722 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24723 + }
24724 +
24725 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
24726 + end = addr + KERNEL_IMAGE_SIZE;
24727 + for (; addr < end; addr += PMD_SIZE) {
24728 + pgd = pgd_offset_k(addr);
24729 + pud = pud_offset(pgd, addr);
24730 + pmd = pmd_offset(pud, addr);
24731 + if (!pmd_present(*pmd))
24732 + continue;
24733 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
24734 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24735 + }
24736 +#endif
24737 +
24738 + flush_tlb_all();
24739 +#endif
24740 +
24741 free_init_pages("unused kernel memory",
24742 (unsigned long)(&__init_begin),
24743 (unsigned long)(&__init_end));
24744 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
24745 index 575d86f..4987469 100644
24746 --- a/arch/x86/mm/init_32.c
24747 +++ b/arch/x86/mm/init_32.c
24748 @@ -73,36 +73,6 @@ static __init void *alloc_low_page(void)
24749 }
24750
24751 /*
24752 - * Creates a middle page table and puts a pointer to it in the
24753 - * given global directory entry. This only returns the gd entry
24754 - * in non-PAE compilation mode, since the middle layer is folded.
24755 - */
24756 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
24757 -{
24758 - pud_t *pud;
24759 - pmd_t *pmd_table;
24760 -
24761 -#ifdef CONFIG_X86_PAE
24762 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
24763 - if (after_bootmem)
24764 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
24765 - else
24766 - pmd_table = (pmd_t *)alloc_low_page();
24767 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
24768 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
24769 - pud = pud_offset(pgd, 0);
24770 - BUG_ON(pmd_table != pmd_offset(pud, 0));
24771 -
24772 - return pmd_table;
24773 - }
24774 -#endif
24775 - pud = pud_offset(pgd, 0);
24776 - pmd_table = pmd_offset(pud, 0);
24777 -
24778 - return pmd_table;
24779 -}
24780 -
24781 -/*
24782 * Create a page table and place a pointer to it in a middle page
24783 * directory entry:
24784 */
24785 @@ -122,13 +92,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
24786 page_table = (pte_t *)alloc_low_page();
24787
24788 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
24789 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24790 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
24791 +#else
24792 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
24793 +#endif
24794 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
24795 }
24796
24797 return pte_offset_kernel(pmd, 0);
24798 }
24799
24800 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
24801 +{
24802 + pud_t *pud;
24803 + pmd_t *pmd_table;
24804 +
24805 + pud = pud_offset(pgd, 0);
24806 + pmd_table = pmd_offset(pud, 0);
24807 +
24808 + return pmd_table;
24809 +}
24810 +
24811 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
24812 {
24813 int pgd_idx = pgd_index(vaddr);
24814 @@ -202,6 +187,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24815 int pgd_idx, pmd_idx;
24816 unsigned long vaddr;
24817 pgd_t *pgd;
24818 + pud_t *pud;
24819 pmd_t *pmd;
24820 pte_t *pte = NULL;
24821
24822 @@ -211,8 +197,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24823 pgd = pgd_base + pgd_idx;
24824
24825 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
24826 - pmd = one_md_table_init(pgd);
24827 - pmd = pmd + pmd_index(vaddr);
24828 + pud = pud_offset(pgd, vaddr);
24829 + pmd = pmd_offset(pud, vaddr);
24830 +
24831 +#ifdef CONFIG_X86_PAE
24832 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24833 +#endif
24834 +
24835 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
24836 pmd++, pmd_idx++) {
24837 pte = page_table_kmap_check(one_page_table_init(pmd),
24838 @@ -224,11 +215,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
24839 }
24840 }
24841
24842 -static inline int is_kernel_text(unsigned long addr)
24843 +static inline int is_kernel_text(unsigned long start, unsigned long end)
24844 {
24845 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
24846 - return 1;
24847 - return 0;
24848 + if ((start > ktla_ktva((unsigned long)_etext) ||
24849 + end <= ktla_ktva((unsigned long)_stext)) &&
24850 + (start > ktla_ktva((unsigned long)_einittext) ||
24851 + end <= ktla_ktva((unsigned long)_sinittext)) &&
24852 +
24853 +#ifdef CONFIG_ACPI_SLEEP
24854 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
24855 +#endif
24856 +
24857 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
24858 + return 0;
24859 + return 1;
24860 }
24861
24862 /*
24863 @@ -245,9 +245,10 @@ kernel_physical_mapping_init(unsigned long start,
24864 unsigned long last_map_addr = end;
24865 unsigned long start_pfn, end_pfn;
24866 pgd_t *pgd_base = swapper_pg_dir;
24867 - int pgd_idx, pmd_idx, pte_ofs;
24868 + unsigned int pgd_idx, pmd_idx, pte_ofs;
24869 unsigned long pfn;
24870 pgd_t *pgd;
24871 + pud_t *pud;
24872 pmd_t *pmd;
24873 pte_t *pte;
24874 unsigned pages_2m, pages_4k;
24875 @@ -280,8 +281,13 @@ repeat:
24876 pfn = start_pfn;
24877 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24878 pgd = pgd_base + pgd_idx;
24879 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
24880 - pmd = one_md_table_init(pgd);
24881 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
24882 + pud = pud_offset(pgd, 0);
24883 + pmd = pmd_offset(pud, 0);
24884 +
24885 +#ifdef CONFIG_X86_PAE
24886 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24887 +#endif
24888
24889 if (pfn >= end_pfn)
24890 continue;
24891 @@ -293,14 +299,13 @@ repeat:
24892 #endif
24893 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
24894 pmd++, pmd_idx++) {
24895 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
24896 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
24897
24898 /*
24899 * Map with big pages if possible, otherwise
24900 * create normal page tables:
24901 */
24902 if (use_pse) {
24903 - unsigned int addr2;
24904 pgprot_t prot = PAGE_KERNEL_LARGE;
24905 /*
24906 * first pass will use the same initial
24907 @@ -310,11 +315,7 @@ repeat:
24908 __pgprot(PTE_IDENT_ATTR |
24909 _PAGE_PSE);
24910
24911 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
24912 - PAGE_OFFSET + PAGE_SIZE-1;
24913 -
24914 - if (is_kernel_text(addr) ||
24915 - is_kernel_text(addr2))
24916 + if (is_kernel_text(address, address + PMD_SIZE))
24917 prot = PAGE_KERNEL_LARGE_EXEC;
24918
24919 pages_2m++;
24920 @@ -331,7 +332,7 @@ repeat:
24921 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24922 pte += pte_ofs;
24923 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
24924 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
24925 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
24926 pgprot_t prot = PAGE_KERNEL;
24927 /*
24928 * first pass will use the same initial
24929 @@ -339,7 +340,7 @@ repeat:
24930 */
24931 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
24932
24933 - if (is_kernel_text(addr))
24934 + if (is_kernel_text(address, address + PAGE_SIZE))
24935 prot = PAGE_KERNEL_EXEC;
24936
24937 pages_4k++;
24938 @@ -465,7 +466,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
24939
24940 pud = pud_offset(pgd, va);
24941 pmd = pmd_offset(pud, va);
24942 - if (!pmd_present(*pmd))
24943 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
24944 break;
24945
24946 pte = pte_offset_kernel(pmd, va);
24947 @@ -517,12 +518,10 @@ void __init early_ioremap_page_table_range_init(void)
24948
24949 static void __init pagetable_init(void)
24950 {
24951 - pgd_t *pgd_base = swapper_pg_dir;
24952 -
24953 - permanent_kmaps_init(pgd_base);
24954 + permanent_kmaps_init(swapper_pg_dir);
24955 }
24956
24957 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24958 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24959 EXPORT_SYMBOL_GPL(__supported_pte_mask);
24960
24961 /* user-defined highmem size */
24962 @@ -734,6 +733,12 @@ void __init mem_init(void)
24963
24964 pci_iommu_alloc();
24965
24966 +#ifdef CONFIG_PAX_PER_CPU_PGD
24967 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
24968 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
24969 + KERNEL_PGD_PTRS);
24970 +#endif
24971 +
24972 #ifdef CONFIG_FLATMEM
24973 BUG_ON(!mem_map);
24974 #endif
24975 @@ -760,7 +765,7 @@ void __init mem_init(void)
24976 reservedpages++;
24977
24978 codesize = (unsigned long) &_etext - (unsigned long) &_text;
24979 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
24980 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
24981 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
24982
24983 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
24984 @@ -801,10 +806,10 @@ void __init mem_init(void)
24985 ((unsigned long)&__init_end -
24986 (unsigned long)&__init_begin) >> 10,
24987
24988 - (unsigned long)&_etext, (unsigned long)&_edata,
24989 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
24990 + (unsigned long)&_sdata, (unsigned long)&_edata,
24991 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
24992
24993 - (unsigned long)&_text, (unsigned long)&_etext,
24994 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
24995 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
24996
24997 /*
24998 @@ -882,6 +887,7 @@ void set_kernel_text_rw(void)
24999 if (!kernel_set_to_readonly)
25000 return;
25001
25002 + start = ktla_ktva(start);
25003 pr_debug("Set kernel text: %lx - %lx for read write\n",
25004 start, start+size);
25005
25006 @@ -896,6 +902,7 @@ void set_kernel_text_ro(void)
25007 if (!kernel_set_to_readonly)
25008 return;
25009
25010 + start = ktla_ktva(start);
25011 pr_debug("Set kernel text: %lx - %lx for read only\n",
25012 start, start+size);
25013
25014 @@ -924,6 +931,7 @@ void mark_rodata_ro(void)
25015 unsigned long start = PFN_ALIGN(_text);
25016 unsigned long size = PFN_ALIGN(_etext) - start;
25017
25018 + start = ktla_ktva(start);
25019 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
25020 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
25021 size >> 10);
25022 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
25023 index fc18be0..e539653 100644
25024 --- a/arch/x86/mm/init_64.c
25025 +++ b/arch/x86/mm/init_64.c
25026 @@ -74,7 +74,7 @@ early_param("gbpages", parse_direct_gbpages_on);
25027 * around without checking the pgd every time.
25028 */
25029
25030 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
25031 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
25032 EXPORT_SYMBOL_GPL(__supported_pte_mask);
25033
25034 int force_personality32;
25035 @@ -107,12 +107,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
25036
25037 for (address = start; address <= end; address += PGDIR_SIZE) {
25038 const pgd_t *pgd_ref = pgd_offset_k(address);
25039 +
25040 +#ifdef CONFIG_PAX_PER_CPU_PGD
25041 + unsigned long cpu;
25042 +#else
25043 struct page *page;
25044 +#endif
25045
25046 if (pgd_none(*pgd_ref))
25047 continue;
25048
25049 spin_lock(&pgd_lock);
25050 +
25051 +#ifdef CONFIG_PAX_PER_CPU_PGD
25052 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25053 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
25054 +#else
25055 list_for_each_entry(page, &pgd_list, lru) {
25056 pgd_t *pgd;
25057 spinlock_t *pgt_lock;
25058 @@ -121,6 +131,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
25059 /* the pgt_lock only for Xen */
25060 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
25061 spin_lock(pgt_lock);
25062 +#endif
25063
25064 if (pgd_none(*pgd))
25065 set_pgd(pgd, *pgd_ref);
25066 @@ -128,7 +139,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
25067 BUG_ON(pgd_page_vaddr(*pgd)
25068 != pgd_page_vaddr(*pgd_ref));
25069
25070 +#ifndef CONFIG_PAX_PER_CPU_PGD
25071 spin_unlock(pgt_lock);
25072 +#endif
25073 +
25074 }
25075 spin_unlock(&pgd_lock);
25076 }
25077 @@ -161,7 +175,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
25078 {
25079 if (pgd_none(*pgd)) {
25080 pud_t *pud = (pud_t *)spp_getpage();
25081 - pgd_populate(&init_mm, pgd, pud);
25082 + pgd_populate_kernel(&init_mm, pgd, pud);
25083 if (pud != pud_offset(pgd, 0))
25084 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
25085 pud, pud_offset(pgd, 0));
25086 @@ -173,7 +187,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
25087 {
25088 if (pud_none(*pud)) {
25089 pmd_t *pmd = (pmd_t *) spp_getpage();
25090 - pud_populate(&init_mm, pud, pmd);
25091 + pud_populate_kernel(&init_mm, pud, pmd);
25092 if (pmd != pmd_offset(pud, 0))
25093 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
25094 pmd, pmd_offset(pud, 0));
25095 @@ -202,7 +216,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
25096 pmd = fill_pmd(pud, vaddr);
25097 pte = fill_pte(pmd, vaddr);
25098
25099 + pax_open_kernel();
25100 set_pte(pte, new_pte);
25101 + pax_close_kernel();
25102
25103 /*
25104 * It's enough to flush this one mapping.
25105 @@ -261,14 +277,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
25106 pgd = pgd_offset_k((unsigned long)__va(phys));
25107 if (pgd_none(*pgd)) {
25108 pud = (pud_t *) spp_getpage();
25109 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
25110 - _PAGE_USER));
25111 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
25112 }
25113 pud = pud_offset(pgd, (unsigned long)__va(phys));
25114 if (pud_none(*pud)) {
25115 pmd = (pmd_t *) spp_getpage();
25116 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
25117 - _PAGE_USER));
25118 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
25119 }
25120 pmd = pmd_offset(pud, phys);
25121 BUG_ON(!pmd_none(*pmd));
25122 @@ -329,7 +343,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
25123 if (pfn >= pgt_buf_top)
25124 panic("alloc_low_page: ran out of memory");
25125
25126 - adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
25127 + adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
25128 clear_page(adr);
25129 *phys = pfn * PAGE_SIZE;
25130 return adr;
25131 @@ -345,7 +359,7 @@ static __ref void *map_low_page(void *virt)
25132
25133 phys = __pa(virt);
25134 left = phys & (PAGE_SIZE - 1);
25135 - adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
25136 + adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
25137 adr = (void *)(((unsigned long)adr) | left);
25138
25139 return adr;
25140 @@ -545,7 +559,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
25141 unmap_low_page(pmd);
25142
25143 spin_lock(&init_mm.page_table_lock);
25144 - pud_populate(&init_mm, pud, __va(pmd_phys));
25145 + pud_populate_kernel(&init_mm, pud, __va(pmd_phys));
25146 spin_unlock(&init_mm.page_table_lock);
25147 }
25148 __flush_tlb_all();
25149 @@ -591,7 +605,7 @@ kernel_physical_mapping_init(unsigned long start,
25150 unmap_low_page(pud);
25151
25152 spin_lock(&init_mm.page_table_lock);
25153 - pgd_populate(&init_mm, pgd, __va(pud_phys));
25154 + pgd_populate_kernel(&init_mm, pgd, __va(pud_phys));
25155 spin_unlock(&init_mm.page_table_lock);
25156 pgd_changed = true;
25157 }
25158 @@ -683,6 +697,12 @@ void __init mem_init(void)
25159
25160 pci_iommu_alloc();
25161
25162 +#ifdef CONFIG_PAX_PER_CPU_PGD
25163 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25164 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25165 + KERNEL_PGD_PTRS);
25166 +#endif
25167 +
25168 /* clear_bss() already clear the empty_zero_page */
25169
25170 reservedpages = 0;
25171 @@ -843,8 +863,8 @@ int kern_addr_valid(unsigned long addr)
25172 static struct vm_area_struct gate_vma = {
25173 .vm_start = VSYSCALL_START,
25174 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
25175 - .vm_page_prot = PAGE_READONLY_EXEC,
25176 - .vm_flags = VM_READ | VM_EXEC
25177 + .vm_page_prot = PAGE_READONLY,
25178 + .vm_flags = VM_READ
25179 };
25180
25181 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
25182 @@ -878,7 +898,7 @@ int in_gate_area_no_mm(unsigned long addr)
25183
25184 const char *arch_vma_name(struct vm_area_struct *vma)
25185 {
25186 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
25187 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
25188 return "[vdso]";
25189 if (vma == &gate_vma)
25190 return "[vsyscall]";
25191 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
25192 index 7b179b4..6bd1777 100644
25193 --- a/arch/x86/mm/iomap_32.c
25194 +++ b/arch/x86/mm/iomap_32.c
25195 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
25196 type = kmap_atomic_idx_push();
25197 idx = type + KM_TYPE_NR * smp_processor_id();
25198 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
25199 +
25200 + pax_open_kernel();
25201 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
25202 + pax_close_kernel();
25203 +
25204 arch_flush_lazy_mmu_mode();
25205
25206 return (void *)vaddr;
25207 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
25208 index be1ef57..55f0160 100644
25209 --- a/arch/x86/mm/ioremap.c
25210 +++ b/arch/x86/mm/ioremap.c
25211 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
25212 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
25213 int is_ram = page_is_ram(pfn);
25214
25215 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
25216 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
25217 return NULL;
25218 WARN_ON_ONCE(is_ram);
25219 }
25220 @@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
25221
25222 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
25223 if (page_is_ram(start >> PAGE_SHIFT))
25224 +#ifdef CONFIG_HIGHMEM
25225 + if ((start >> PAGE_SHIFT) < max_low_pfn)
25226 +#endif
25227 return __va(phys);
25228
25229 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
25230 @@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
25231 early_param("early_ioremap_debug", early_ioremap_debug_setup);
25232
25233 static __initdata int after_paging_init;
25234 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
25235 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
25236
25237 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
25238 {
25239 @@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
25240 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
25241
25242 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
25243 - memset(bm_pte, 0, sizeof(bm_pte));
25244 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
25245 + pmd_populate_user(&init_mm, pmd, bm_pte);
25246
25247 /*
25248 * The boot-ioremap range spans multiple pmds, for which
25249 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
25250 index d87dd6d..bf3fa66 100644
25251 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
25252 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
25253 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
25254 * memory (e.g. tracked pages)? For now, we need this to avoid
25255 * invoking kmemcheck for PnP BIOS calls.
25256 */
25257 - if (regs->flags & X86_VM_MASK)
25258 + if (v8086_mode(regs))
25259 return false;
25260 - if (regs->cs != __KERNEL_CS)
25261 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
25262 return false;
25263
25264 pte = kmemcheck_pte_lookup(address);
25265 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
25266 index 845df68..1d8d29f 100644
25267 --- a/arch/x86/mm/mmap.c
25268 +++ b/arch/x86/mm/mmap.c
25269 @@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
25270 * Leave an at least ~128 MB hole with possible stack randomization.
25271 */
25272 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
25273 -#define MAX_GAP (TASK_SIZE/6*5)
25274 +#define MAX_GAP (pax_task_size/6*5)
25275
25276 static int mmap_is_legacy(void)
25277 {
25278 @@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
25279 return rnd << PAGE_SHIFT;
25280 }
25281
25282 -static unsigned long mmap_base(void)
25283 +static unsigned long mmap_base(struct mm_struct *mm)
25284 {
25285 unsigned long gap = rlimit(RLIMIT_STACK);
25286 + unsigned long pax_task_size = TASK_SIZE;
25287 +
25288 +#ifdef CONFIG_PAX_SEGMEXEC
25289 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25290 + pax_task_size = SEGMEXEC_TASK_SIZE;
25291 +#endif
25292
25293 if (gap < MIN_GAP)
25294 gap = MIN_GAP;
25295 else if (gap > MAX_GAP)
25296 gap = MAX_GAP;
25297
25298 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
25299 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
25300 }
25301
25302 /*
25303 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
25304 * does, but not when emulating X86_32
25305 */
25306 -static unsigned long mmap_legacy_base(void)
25307 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
25308 {
25309 - if (mmap_is_ia32())
25310 + if (mmap_is_ia32()) {
25311 +
25312 +#ifdef CONFIG_PAX_SEGMEXEC
25313 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
25314 + return SEGMEXEC_TASK_UNMAPPED_BASE;
25315 + else
25316 +#endif
25317 +
25318 return TASK_UNMAPPED_BASE;
25319 - else
25320 + } else
25321 return TASK_UNMAPPED_BASE + mmap_rnd();
25322 }
25323
25324 @@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
25325 void arch_pick_mmap_layout(struct mm_struct *mm)
25326 {
25327 if (mmap_is_legacy()) {
25328 - mm->mmap_base = mmap_legacy_base();
25329 + mm->mmap_base = mmap_legacy_base(mm);
25330 +
25331 +#ifdef CONFIG_PAX_RANDMMAP
25332 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25333 + mm->mmap_base += mm->delta_mmap;
25334 +#endif
25335 +
25336 mm->get_unmapped_area = arch_get_unmapped_area;
25337 mm->unmap_area = arch_unmap_area;
25338 } else {
25339 - mm->mmap_base = mmap_base();
25340 + mm->mmap_base = mmap_base(mm);
25341 +
25342 +#ifdef CONFIG_PAX_RANDMMAP
25343 + if (mm->pax_flags & MF_PAX_RANDMMAP)
25344 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
25345 +#endif
25346 +
25347 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
25348 mm->unmap_area = arch_unmap_area_topdown;
25349 }
25350 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
25351 index dc0b727..dc9d71a 100644
25352 --- a/arch/x86/mm/mmio-mod.c
25353 +++ b/arch/x86/mm/mmio-mod.c
25354 @@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
25355 break;
25356 default:
25357 {
25358 - unsigned char *ip = (unsigned char *)instptr;
25359 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
25360 my_trace->opcode = MMIO_UNKNOWN_OP;
25361 my_trace->width = 0;
25362 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
25363 @@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
25364 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25365 void __iomem *addr)
25366 {
25367 - static atomic_t next_id;
25368 + static atomic_unchecked_t next_id;
25369 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
25370 /* These are page-unaligned. */
25371 struct mmiotrace_map map = {
25372 @@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25373 .private = trace
25374 },
25375 .phys = offset,
25376 - .id = atomic_inc_return(&next_id)
25377 + .id = atomic_inc_return_unchecked(&next_id)
25378 };
25379 map.map_id = trace->id;
25380
25381 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
25382 index b008656..773eac2 100644
25383 --- a/arch/x86/mm/pageattr-test.c
25384 +++ b/arch/x86/mm/pageattr-test.c
25385 @@ -36,7 +36,7 @@ enum {
25386
25387 static int pte_testbit(pte_t pte)
25388 {
25389 - return pte_flags(pte) & _PAGE_UNUSED1;
25390 + return pte_flags(pte) & _PAGE_CPA_TEST;
25391 }
25392
25393 struct split_state {
25394 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
25395 index e1ebde3..b1e1db38 100644
25396 --- a/arch/x86/mm/pageattr.c
25397 +++ b/arch/x86/mm/pageattr.c
25398 @@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25399 */
25400 #ifdef CONFIG_PCI_BIOS
25401 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
25402 - pgprot_val(forbidden) |= _PAGE_NX;
25403 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25404 #endif
25405
25406 /*
25407 @@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25408 * Does not cover __inittext since that is gone later on. On
25409 * 64bit we do not enforce !NX on the low mapping
25410 */
25411 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
25412 - pgprot_val(forbidden) |= _PAGE_NX;
25413 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
25414 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25415
25416 +#ifdef CONFIG_DEBUG_RODATA
25417 /*
25418 * The .rodata section needs to be read-only. Using the pfn
25419 * catches all aliases.
25420 @@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25421 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
25422 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
25423 pgprot_val(forbidden) |= _PAGE_RW;
25424 +#endif
25425
25426 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
25427 /*
25428 @@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25429 }
25430 #endif
25431
25432 +#ifdef CONFIG_PAX_KERNEXEC
25433 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
25434 + pgprot_val(forbidden) |= _PAGE_RW;
25435 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25436 + }
25437 +#endif
25438 +
25439 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
25440
25441 return prot;
25442 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
25443 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
25444 {
25445 /* change init_mm */
25446 + pax_open_kernel();
25447 set_pte_atomic(kpte, pte);
25448 +
25449 #ifdef CONFIG_X86_32
25450 if (!SHARED_KERNEL_PMD) {
25451 +
25452 +#ifdef CONFIG_PAX_PER_CPU_PGD
25453 + unsigned long cpu;
25454 +#else
25455 struct page *page;
25456 +#endif
25457
25458 +#ifdef CONFIG_PAX_PER_CPU_PGD
25459 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25460 + pgd_t *pgd = get_cpu_pgd(cpu);
25461 +#else
25462 list_for_each_entry(page, &pgd_list, lru) {
25463 - pgd_t *pgd;
25464 + pgd_t *pgd = (pgd_t *)page_address(page);
25465 +#endif
25466 +
25467 pud_t *pud;
25468 pmd_t *pmd;
25469
25470 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
25471 + pgd += pgd_index(address);
25472 pud = pud_offset(pgd, address);
25473 pmd = pmd_offset(pud, address);
25474 set_pte_atomic((pte_t *)pmd, pte);
25475 }
25476 }
25477 #endif
25478 + pax_close_kernel();
25479 }
25480
25481 static int
25482 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
25483 index f6ff57b..481690f 100644
25484 --- a/arch/x86/mm/pat.c
25485 +++ b/arch/x86/mm/pat.c
25486 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
25487
25488 if (!entry) {
25489 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
25490 - current->comm, current->pid, start, end);
25491 + current->comm, task_pid_nr(current), start, end);
25492 return -EINVAL;
25493 }
25494
25495 @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25496 while (cursor < to) {
25497 if (!devmem_is_allowed(pfn)) {
25498 printk(KERN_INFO
25499 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
25500 - current->comm, from, to);
25501 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
25502 + current->comm, from, to, cursor);
25503 return 0;
25504 }
25505 cursor += PAGE_SIZE;
25506 @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
25507 printk(KERN_INFO
25508 "%s:%d ioremap_change_attr failed %s "
25509 "for %Lx-%Lx\n",
25510 - current->comm, current->pid,
25511 + current->comm, task_pid_nr(current),
25512 cattr_name(flags),
25513 base, (unsigned long long)(base + size));
25514 return -EINVAL;
25515 @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25516 if (want_flags != flags) {
25517 printk(KERN_WARNING
25518 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
25519 - current->comm, current->pid,
25520 + current->comm, task_pid_nr(current),
25521 cattr_name(want_flags),
25522 (unsigned long long)paddr,
25523 (unsigned long long)(paddr + size),
25524 @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25525 free_memtype(paddr, paddr + size);
25526 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
25527 " for %Lx-%Lx, got %s\n",
25528 - current->comm, current->pid,
25529 + current->comm, task_pid_nr(current),
25530 cattr_name(want_flags),
25531 (unsigned long long)paddr,
25532 (unsigned long long)(paddr + size),
25533 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
25534 index 9f0614d..92ae64a 100644
25535 --- a/arch/x86/mm/pf_in.c
25536 +++ b/arch/x86/mm/pf_in.c
25537 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
25538 int i;
25539 enum reason_type rv = OTHERS;
25540
25541 - p = (unsigned char *)ins_addr;
25542 + p = (unsigned char *)ktla_ktva(ins_addr);
25543 p += skip_prefix(p, &prf);
25544 p += get_opcode(p, &opcode);
25545
25546 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
25547 struct prefix_bits prf;
25548 int i;
25549
25550 - p = (unsigned char *)ins_addr;
25551 + p = (unsigned char *)ktla_ktva(ins_addr);
25552 p += skip_prefix(p, &prf);
25553 p += get_opcode(p, &opcode);
25554
25555 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
25556 struct prefix_bits prf;
25557 int i;
25558
25559 - p = (unsigned char *)ins_addr;
25560 + p = (unsigned char *)ktla_ktva(ins_addr);
25561 p += skip_prefix(p, &prf);
25562 p += get_opcode(p, &opcode);
25563
25564 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
25565 struct prefix_bits prf;
25566 int i;
25567
25568 - p = (unsigned char *)ins_addr;
25569 + p = (unsigned char *)ktla_ktva(ins_addr);
25570 p += skip_prefix(p, &prf);
25571 p += get_opcode(p, &opcode);
25572 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
25573 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
25574 struct prefix_bits prf;
25575 int i;
25576
25577 - p = (unsigned char *)ins_addr;
25578 + p = (unsigned char *)ktla_ktva(ins_addr);
25579 p += skip_prefix(p, &prf);
25580 p += get_opcode(p, &opcode);
25581 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
25582 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
25583 index 8573b83..4f3ed7e 100644
25584 --- a/arch/x86/mm/pgtable.c
25585 +++ b/arch/x86/mm/pgtable.c
25586 @@ -84,10 +84,64 @@ static inline void pgd_list_del(pgd_t *pgd)
25587 list_del(&page->lru);
25588 }
25589
25590 -#define UNSHARED_PTRS_PER_PGD \
25591 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25592 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25593 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
25594
25595 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
25596 +{
25597 + unsigned int count = USER_PGD_PTRS;
25598
25599 + while (count--)
25600 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
25601 +}
25602 +#endif
25603 +
25604 +#ifdef CONFIG_PAX_PER_CPU_PGD
25605 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
25606 +{
25607 + unsigned int count = USER_PGD_PTRS;
25608 +
25609 + while (count--) {
25610 + pgd_t pgd;
25611 +
25612 +#ifdef CONFIG_X86_64
25613 + pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
25614 +#else
25615 + pgd = *src++;
25616 +#endif
25617 +
25618 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25619 + pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
25620 +#endif
25621 +
25622 + *dst++ = pgd;
25623 + }
25624 +
25625 +}
25626 +#endif
25627 +
25628 +#ifdef CONFIG_X86_64
25629 +#define pxd_t pud_t
25630 +#define pyd_t pgd_t
25631 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
25632 +#define pxd_free(mm, pud) pud_free((mm), (pud))
25633 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
25634 +#define pyd_offset(mm, address) pgd_offset((mm), (address))
25635 +#define PYD_SIZE PGDIR_SIZE
25636 +#else
25637 +#define pxd_t pmd_t
25638 +#define pyd_t pud_t
25639 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
25640 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
25641 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
25642 +#define pyd_offset(mm, address) pud_offset((mm), (address))
25643 +#define PYD_SIZE PUD_SIZE
25644 +#endif
25645 +
25646 +#ifdef CONFIG_PAX_PER_CPU_PGD
25647 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
25648 +static inline void pgd_dtor(pgd_t *pgd) {}
25649 +#else
25650 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
25651 {
25652 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
25653 @@ -128,6 +182,7 @@ static void pgd_dtor(pgd_t *pgd)
25654 pgd_list_del(pgd);
25655 spin_unlock(&pgd_lock);
25656 }
25657 +#endif
25658
25659 /*
25660 * List of all pgd's needed for non-PAE so it can invalidate entries
25661 @@ -140,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
25662 * -- wli
25663 */
25664
25665 -#ifdef CONFIG_X86_PAE
25666 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25667 /*
25668 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
25669 * updating the top-level pagetable entries to guarantee the
25670 @@ -152,7 +207,7 @@ static void pgd_dtor(pgd_t *pgd)
25671 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
25672 * and initialize the kernel pmds here.
25673 */
25674 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
25675 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25676
25677 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25678 {
25679 @@ -170,36 +225,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25680 */
25681 flush_tlb_mm(mm);
25682 }
25683 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
25684 +#define PREALLOCATED_PXDS USER_PGD_PTRS
25685 #else /* !CONFIG_X86_PAE */
25686
25687 /* No need to prepopulate any pagetable entries in non-PAE modes. */
25688 -#define PREALLOCATED_PMDS 0
25689 +#define PREALLOCATED_PXDS 0
25690
25691 #endif /* CONFIG_X86_PAE */
25692
25693 -static void free_pmds(pmd_t *pmds[])
25694 +static void free_pxds(pxd_t *pxds[])
25695 {
25696 int i;
25697
25698 - for(i = 0; i < PREALLOCATED_PMDS; i++)
25699 - if (pmds[i])
25700 - free_page((unsigned long)pmds[i]);
25701 + for(i = 0; i < PREALLOCATED_PXDS; i++)
25702 + if (pxds[i])
25703 + free_page((unsigned long)pxds[i]);
25704 }
25705
25706 -static int preallocate_pmds(pmd_t *pmds[])
25707 +static int preallocate_pxds(pxd_t *pxds[])
25708 {
25709 int i;
25710 bool failed = false;
25711
25712 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
25713 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
25714 - if (pmd == NULL)
25715 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
25716 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
25717 + if (pxd == NULL)
25718 failed = true;
25719 - pmds[i] = pmd;
25720 + pxds[i] = pxd;
25721 }
25722
25723 if (failed) {
25724 - free_pmds(pmds);
25725 + free_pxds(pxds);
25726 return -ENOMEM;
25727 }
25728
25729 @@ -212,51 +269,55 @@ static int preallocate_pmds(pmd_t *pmds[])
25730 * preallocate which never got a corresponding vma will need to be
25731 * freed manually.
25732 */
25733 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
25734 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
25735 {
25736 int i;
25737
25738 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
25739 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
25740 pgd_t pgd = pgdp[i];
25741
25742 if (pgd_val(pgd) != 0) {
25743 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
25744 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
25745
25746 - pgdp[i] = native_make_pgd(0);
25747 + set_pgd(pgdp + i, native_make_pgd(0));
25748
25749 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
25750 - pmd_free(mm, pmd);
25751 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
25752 + pxd_free(mm, pxd);
25753 }
25754 }
25755 }
25756
25757 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
25758 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
25759 {
25760 - pud_t *pud;
25761 + pyd_t *pyd;
25762 unsigned long addr;
25763 int i;
25764
25765 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
25766 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
25767 return;
25768
25769 - pud = pud_offset(pgd, 0);
25770 +#ifdef CONFIG_X86_64
25771 + pyd = pyd_offset(mm, 0L);
25772 +#else
25773 + pyd = pyd_offset(pgd, 0L);
25774 +#endif
25775
25776 - for (addr = i = 0; i < PREALLOCATED_PMDS;
25777 - i++, pud++, addr += PUD_SIZE) {
25778 - pmd_t *pmd = pmds[i];
25779 + for (addr = i = 0; i < PREALLOCATED_PXDS;
25780 + i++, pyd++, addr += PYD_SIZE) {
25781 + pxd_t *pxd = pxds[i];
25782
25783 if (i >= KERNEL_PGD_BOUNDARY)
25784 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25785 - sizeof(pmd_t) * PTRS_PER_PMD);
25786 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25787 + sizeof(pxd_t) * PTRS_PER_PMD);
25788
25789 - pud_populate(mm, pud, pmd);
25790 + pyd_populate(mm, pyd, pxd);
25791 }
25792 }
25793
25794 pgd_t *pgd_alloc(struct mm_struct *mm)
25795 {
25796 pgd_t *pgd;
25797 - pmd_t *pmds[PREALLOCATED_PMDS];
25798 + pxd_t *pxds[PREALLOCATED_PXDS];
25799
25800 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
25801
25802 @@ -265,11 +326,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25803
25804 mm->pgd = pgd;
25805
25806 - if (preallocate_pmds(pmds) != 0)
25807 + if (preallocate_pxds(pxds) != 0)
25808 goto out_free_pgd;
25809
25810 if (paravirt_pgd_alloc(mm) != 0)
25811 - goto out_free_pmds;
25812 + goto out_free_pxds;
25813
25814 /*
25815 * Make sure that pre-populating the pmds is atomic with
25816 @@ -279,14 +340,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25817 spin_lock(&pgd_lock);
25818
25819 pgd_ctor(mm, pgd);
25820 - pgd_prepopulate_pmd(mm, pgd, pmds);
25821 + pgd_prepopulate_pxd(mm, pgd, pxds);
25822
25823 spin_unlock(&pgd_lock);
25824
25825 return pgd;
25826
25827 -out_free_pmds:
25828 - free_pmds(pmds);
25829 +out_free_pxds:
25830 + free_pxds(pxds);
25831 out_free_pgd:
25832 free_page((unsigned long)pgd);
25833 out:
25834 @@ -295,7 +356,7 @@ out:
25835
25836 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
25837 {
25838 - pgd_mop_up_pmds(mm, pgd);
25839 + pgd_mop_up_pxds(mm, pgd);
25840 pgd_dtor(pgd);
25841 paravirt_pgd_free(mm, pgd);
25842 free_page((unsigned long)pgd);
25843 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
25844 index a69bcb8..19068ab 100644
25845 --- a/arch/x86/mm/pgtable_32.c
25846 +++ b/arch/x86/mm/pgtable_32.c
25847 @@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
25848 return;
25849 }
25850 pte = pte_offset_kernel(pmd, vaddr);
25851 +
25852 + pax_open_kernel();
25853 if (pte_val(pteval))
25854 set_pte_at(&init_mm, vaddr, pte, pteval);
25855 else
25856 pte_clear(&init_mm, vaddr, pte);
25857 + pax_close_kernel();
25858
25859 /*
25860 * It's enough to flush this one mapping.
25861 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
25862 index 410531d..0f16030 100644
25863 --- a/arch/x86/mm/setup_nx.c
25864 +++ b/arch/x86/mm/setup_nx.c
25865 @@ -5,8 +5,10 @@
25866 #include <asm/pgtable.h>
25867 #include <asm/proto.h>
25868
25869 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25870 static int disable_nx __cpuinitdata;
25871
25872 +#ifndef CONFIG_PAX_PAGEEXEC
25873 /*
25874 * noexec = on|off
25875 *
25876 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
25877 return 0;
25878 }
25879 early_param("noexec", noexec_setup);
25880 +#endif
25881 +
25882 +#endif
25883
25884 void __cpuinit x86_configure_nx(void)
25885 {
25886 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25887 if (cpu_has_nx && !disable_nx)
25888 __supported_pte_mask |= _PAGE_NX;
25889 else
25890 +#endif
25891 __supported_pte_mask &= ~_PAGE_NX;
25892 }
25893
25894 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
25895 index d6c0418..06a0ad5 100644
25896 --- a/arch/x86/mm/tlb.c
25897 +++ b/arch/x86/mm/tlb.c
25898 @@ -65,7 +65,11 @@ void leave_mm(int cpu)
25899 BUG();
25900 cpumask_clear_cpu(cpu,
25901 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
25902 +
25903 +#ifndef CONFIG_PAX_PER_CPU_PGD
25904 load_cr3(swapper_pg_dir);
25905 +#endif
25906 +
25907 }
25908 EXPORT_SYMBOL_GPL(leave_mm);
25909
25910 diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
25911 index 877b9a1..a8ecf42 100644
25912 --- a/arch/x86/net/bpf_jit.S
25913 +++ b/arch/x86/net/bpf_jit.S
25914 @@ -9,6 +9,7 @@
25915 */
25916 #include <linux/linkage.h>
25917 #include <asm/dwarf2.h>
25918 +#include <asm/alternative-asm.h>
25919
25920 /*
25921 * Calling convention :
25922 @@ -35,6 +36,7 @@ sk_load_word_positive_offset:
25923 jle bpf_slow_path_word
25924 mov (SKBDATA,%rsi),%eax
25925 bswap %eax /* ntohl() */
25926 + pax_force_retaddr
25927 ret
25928
25929 sk_load_half:
25930 @@ -52,6 +54,7 @@ sk_load_half_positive_offset:
25931 jle bpf_slow_path_half
25932 movzwl (SKBDATA,%rsi),%eax
25933 rol $8,%ax # ntohs()
25934 + pax_force_retaddr
25935 ret
25936
25937 sk_load_byte:
25938 @@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
25939 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
25940 jle bpf_slow_path_byte
25941 movzbl (SKBDATA,%rsi),%eax
25942 + pax_force_retaddr
25943 ret
25944
25945 /**
25946 @@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
25947 movzbl (SKBDATA,%rsi),%ebx
25948 and $15,%bl
25949 shl $2,%bl
25950 + pax_force_retaddr
25951 ret
25952
25953 /* rsi contains offset and can be scratched */
25954 @@ -109,6 +114,7 @@ bpf_slow_path_word:
25955 js bpf_error
25956 mov -12(%rbp),%eax
25957 bswap %eax
25958 + pax_force_retaddr
25959 ret
25960
25961 bpf_slow_path_half:
25962 @@ -117,12 +123,14 @@ bpf_slow_path_half:
25963 mov -12(%rbp),%ax
25964 rol $8,%ax
25965 movzwl %ax,%eax
25966 + pax_force_retaddr
25967 ret
25968
25969 bpf_slow_path_byte:
25970 bpf_slow_path_common(1)
25971 js bpf_error
25972 movzbl -12(%rbp),%eax
25973 + pax_force_retaddr
25974 ret
25975
25976 bpf_slow_path_byte_msh:
25977 @@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
25978 and $15,%al
25979 shl $2,%al
25980 xchg %eax,%ebx
25981 + pax_force_retaddr
25982 ret
25983
25984 #define sk_negative_common(SIZE) \
25985 @@ -157,6 +166,7 @@ sk_load_word_negative_offset:
25986 sk_negative_common(4)
25987 mov (%rax), %eax
25988 bswap %eax
25989 + pax_force_retaddr
25990 ret
25991
25992 bpf_slow_path_half_neg:
25993 @@ -168,6 +178,7 @@ sk_load_half_negative_offset:
25994 mov (%rax),%ax
25995 rol $8,%ax
25996 movzwl %ax,%eax
25997 + pax_force_retaddr
25998 ret
25999
26000 bpf_slow_path_byte_neg:
26001 @@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
26002 .globl sk_load_byte_negative_offset
26003 sk_negative_common(1)
26004 movzbl (%rax), %eax
26005 + pax_force_retaddr
26006 ret
26007
26008 bpf_slow_path_byte_msh_neg:
26009 @@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
26010 and $15,%al
26011 shl $2,%al
26012 xchg %eax,%ebx
26013 + pax_force_retaddr
26014 ret
26015
26016 bpf_error:
26017 @@ -197,4 +210,5 @@ bpf_error:
26018 xor %eax,%eax
26019 mov -8(%rbp),%rbx
26020 leaveq
26021 + pax_force_retaddr
26022 ret
26023 diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
26024 index 0597f95..a12c36e 100644
26025 --- a/arch/x86/net/bpf_jit_comp.c
26026 +++ b/arch/x86/net/bpf_jit_comp.c
26027 @@ -120,6 +120,11 @@ static inline void bpf_flush_icache(void *start, void *end)
26028 set_fs(old_fs);
26029 }
26030
26031 +struct bpf_jit_work {
26032 + struct work_struct work;
26033 + void *image;
26034 +};
26035 +
26036 #define CHOOSE_LOAD_FUNC(K, func) \
26037 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
26038
26039 @@ -146,6 +151,10 @@ void bpf_jit_compile(struct sk_filter *fp)
26040 if (addrs == NULL)
26041 return;
26042
26043 + fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
26044 + if (!fp->work)
26045 + goto out;
26046 +
26047 /* Before first pass, make a rough estimation of addrs[]
26048 * each bpf instruction is translated to less than 64 bytes
26049 */
26050 @@ -589,17 +598,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
26051 break;
26052 default:
26053 /* hmm, too complex filter, give up with jit compiler */
26054 - goto out;
26055 + goto error;
26056 }
26057 ilen = prog - temp;
26058 if (image) {
26059 if (unlikely(proglen + ilen > oldproglen)) {
26060 pr_err("bpb_jit_compile fatal error\n");
26061 - kfree(addrs);
26062 - module_free(NULL, image);
26063 - return;
26064 + module_free_exec(NULL, image);
26065 + goto error;
26066 }
26067 + pax_open_kernel();
26068 memcpy(image + proglen, temp, ilen);
26069 + pax_close_kernel();
26070 }
26071 proglen += ilen;
26072 addrs[i] = proglen;
26073 @@ -620,11 +630,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
26074 break;
26075 }
26076 if (proglen == oldproglen) {
26077 - image = module_alloc(max_t(unsigned int,
26078 - proglen,
26079 - sizeof(struct work_struct)));
26080 + image = module_alloc_exec(proglen);
26081 if (!image)
26082 - goto out;
26083 + goto error;
26084 }
26085 oldproglen = proglen;
26086 }
26087 @@ -640,7 +648,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
26088 bpf_flush_icache(image, image + proglen);
26089
26090 fp->bpf_func = (void *)image;
26091 - }
26092 + } else
26093 +error:
26094 + kfree(fp->work);
26095 +
26096 out:
26097 kfree(addrs);
26098 return;
26099 @@ -648,18 +659,20 @@ out:
26100
26101 static void jit_free_defer(struct work_struct *arg)
26102 {
26103 - module_free(NULL, arg);
26104 + module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
26105 + kfree(arg);
26106 }
26107
26108 /* run from softirq, we must use a work_struct to call
26109 - * module_free() from process context
26110 + * module_free_exec() from process context
26111 */
26112 void bpf_jit_free(struct sk_filter *fp)
26113 {
26114 if (fp->bpf_func != sk_run_filter) {
26115 - struct work_struct *work = (struct work_struct *)fp->bpf_func;
26116 + struct work_struct *work = &fp->work->work;
26117
26118 INIT_WORK(work, jit_free_defer);
26119 + fp->work->image = fp->bpf_func;
26120 schedule_work(work);
26121 }
26122 }
26123 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
26124 index d6aa6e8..266395a 100644
26125 --- a/arch/x86/oprofile/backtrace.c
26126 +++ b/arch/x86/oprofile/backtrace.c
26127 @@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
26128 struct stack_frame_ia32 *fp;
26129 unsigned long bytes;
26130
26131 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
26132 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
26133 if (bytes != sizeof(bufhead))
26134 return NULL;
26135
26136 - fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
26137 + fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
26138
26139 oprofile_add_trace(bufhead[0].return_address);
26140
26141 @@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
26142 struct stack_frame bufhead[2];
26143 unsigned long bytes;
26144
26145 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
26146 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
26147 if (bytes != sizeof(bufhead))
26148 return NULL;
26149
26150 @@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
26151 {
26152 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
26153
26154 - if (!user_mode_vm(regs)) {
26155 + if (!user_mode(regs)) {
26156 unsigned long stack = kernel_stack_pointer(regs);
26157 if (depth)
26158 dump_trace(NULL, regs, (unsigned long *)stack, 0,
26159 diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
26160 index 140942f..8a5cc55 100644
26161 --- a/arch/x86/pci/mrst.c
26162 +++ b/arch/x86/pci/mrst.c
26163 @@ -238,7 +238,9 @@ int __init pci_mrst_init(void)
26164 printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
26165 pci_mmcfg_late_init();
26166 pcibios_enable_irq = mrst_pci_irq_enable;
26167 - pci_root_ops = pci_mrst_ops;
26168 + pax_open_kernel();
26169 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
26170 + pax_close_kernel();
26171 pci_soc_mode = 1;
26172 /* Continue with standard init */
26173 return 1;
26174 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
26175 index da8fe05..7ee6704 100644
26176 --- a/arch/x86/pci/pcbios.c
26177 +++ b/arch/x86/pci/pcbios.c
26178 @@ -79,50 +79,93 @@ union bios32 {
26179 static struct {
26180 unsigned long address;
26181 unsigned short segment;
26182 -} bios32_indirect = { 0, __KERNEL_CS };
26183 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
26184
26185 /*
26186 * Returns the entry point for the given service, NULL on error
26187 */
26188
26189 -static unsigned long bios32_service(unsigned long service)
26190 +static unsigned long __devinit bios32_service(unsigned long service)
26191 {
26192 unsigned char return_code; /* %al */
26193 unsigned long address; /* %ebx */
26194 unsigned long length; /* %ecx */
26195 unsigned long entry; /* %edx */
26196 unsigned long flags;
26197 + struct desc_struct d, *gdt;
26198
26199 local_irq_save(flags);
26200 - __asm__("lcall *(%%edi); cld"
26201 +
26202 + gdt = get_cpu_gdt_table(smp_processor_id());
26203 +
26204 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
26205 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26206 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
26207 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26208 +
26209 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
26210 : "=a" (return_code),
26211 "=b" (address),
26212 "=c" (length),
26213 "=d" (entry)
26214 : "0" (service),
26215 "1" (0),
26216 - "D" (&bios32_indirect));
26217 + "D" (&bios32_indirect),
26218 + "r"(__PCIBIOS_DS)
26219 + : "memory");
26220 +
26221 + pax_open_kernel();
26222 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
26223 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
26224 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
26225 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
26226 + pax_close_kernel();
26227 +
26228 local_irq_restore(flags);
26229
26230 switch (return_code) {
26231 - case 0:
26232 - return address + entry;
26233 - case 0x80: /* Not present */
26234 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26235 - return 0;
26236 - default: /* Shouldn't happen */
26237 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26238 - service, return_code);
26239 + case 0: {
26240 + int cpu;
26241 + unsigned char flags;
26242 +
26243 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
26244 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
26245 + printk(KERN_WARNING "bios32_service: not valid\n");
26246 return 0;
26247 + }
26248 + address = address + PAGE_OFFSET;
26249 + length += 16UL; /* some BIOSs underreport this... */
26250 + flags = 4;
26251 + if (length >= 64*1024*1024) {
26252 + length >>= PAGE_SHIFT;
26253 + flags |= 8;
26254 + }
26255 +
26256 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
26257 + gdt = get_cpu_gdt_table(cpu);
26258 + pack_descriptor(&d, address, length, 0x9b, flags);
26259 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26260 + pack_descriptor(&d, address, length, 0x93, flags);
26261 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26262 + }
26263 + return entry;
26264 + }
26265 + case 0x80: /* Not present */
26266 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26267 + return 0;
26268 + default: /* Shouldn't happen */
26269 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26270 + service, return_code);
26271 + return 0;
26272 }
26273 }
26274
26275 static struct {
26276 unsigned long address;
26277 unsigned short segment;
26278 -} pci_indirect = { 0, __KERNEL_CS };
26279 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
26280
26281 -static int pci_bios_present;
26282 +static int pci_bios_present __read_only;
26283
26284 static int __devinit check_pcibios(void)
26285 {
26286 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
26287 unsigned long flags, pcibios_entry;
26288
26289 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
26290 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
26291 + pci_indirect.address = pcibios_entry;
26292
26293 local_irq_save(flags);
26294 - __asm__(
26295 - "lcall *(%%edi); cld\n\t"
26296 + __asm__("movw %w6, %%ds\n\t"
26297 + "lcall *%%ss:(%%edi); cld\n\t"
26298 + "push %%ss\n\t"
26299 + "pop %%ds\n\t"
26300 "jc 1f\n\t"
26301 "xor %%ah, %%ah\n"
26302 "1:"
26303 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
26304 "=b" (ebx),
26305 "=c" (ecx)
26306 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
26307 - "D" (&pci_indirect)
26308 + "D" (&pci_indirect),
26309 + "r" (__PCIBIOS_DS)
26310 : "memory");
26311 local_irq_restore(flags);
26312
26313 @@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26314
26315 switch (len) {
26316 case 1:
26317 - __asm__("lcall *(%%esi); cld\n\t"
26318 + __asm__("movw %w6, %%ds\n\t"
26319 + "lcall *%%ss:(%%esi); cld\n\t"
26320 + "push %%ss\n\t"
26321 + "pop %%ds\n\t"
26322 "jc 1f\n\t"
26323 "xor %%ah, %%ah\n"
26324 "1:"
26325 @@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26326 : "1" (PCIBIOS_READ_CONFIG_BYTE),
26327 "b" (bx),
26328 "D" ((long)reg),
26329 - "S" (&pci_indirect));
26330 + "S" (&pci_indirect),
26331 + "r" (__PCIBIOS_DS));
26332 /*
26333 * Zero-extend the result beyond 8 bits, do not trust the
26334 * BIOS having done it:
26335 @@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26336 *value &= 0xff;
26337 break;
26338 case 2:
26339 - __asm__("lcall *(%%esi); cld\n\t"
26340 + __asm__("movw %w6, %%ds\n\t"
26341 + "lcall *%%ss:(%%esi); cld\n\t"
26342 + "push %%ss\n\t"
26343 + "pop %%ds\n\t"
26344 "jc 1f\n\t"
26345 "xor %%ah, %%ah\n"
26346 "1:"
26347 @@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26348 : "1" (PCIBIOS_READ_CONFIG_WORD),
26349 "b" (bx),
26350 "D" ((long)reg),
26351 - "S" (&pci_indirect));
26352 + "S" (&pci_indirect),
26353 + "r" (__PCIBIOS_DS));
26354 /*
26355 * Zero-extend the result beyond 16 bits, do not trust the
26356 * BIOS having done it:
26357 @@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26358 *value &= 0xffff;
26359 break;
26360 case 4:
26361 - __asm__("lcall *(%%esi); cld\n\t"
26362 + __asm__("movw %w6, %%ds\n\t"
26363 + "lcall *%%ss:(%%esi); cld\n\t"
26364 + "push %%ss\n\t"
26365 + "pop %%ds\n\t"
26366 "jc 1f\n\t"
26367 "xor %%ah, %%ah\n"
26368 "1:"
26369 @@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26370 : "1" (PCIBIOS_READ_CONFIG_DWORD),
26371 "b" (bx),
26372 "D" ((long)reg),
26373 - "S" (&pci_indirect));
26374 + "S" (&pci_indirect),
26375 + "r" (__PCIBIOS_DS));
26376 break;
26377 }
26378
26379 @@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26380
26381 switch (len) {
26382 case 1:
26383 - __asm__("lcall *(%%esi); cld\n\t"
26384 + __asm__("movw %w6, %%ds\n\t"
26385 + "lcall *%%ss:(%%esi); cld\n\t"
26386 + "push %%ss\n\t"
26387 + "pop %%ds\n\t"
26388 "jc 1f\n\t"
26389 "xor %%ah, %%ah\n"
26390 "1:"
26391 @@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26392 "c" (value),
26393 "b" (bx),
26394 "D" ((long)reg),
26395 - "S" (&pci_indirect));
26396 + "S" (&pci_indirect),
26397 + "r" (__PCIBIOS_DS));
26398 break;
26399 case 2:
26400 - __asm__("lcall *(%%esi); cld\n\t"
26401 + __asm__("movw %w6, %%ds\n\t"
26402 + "lcall *%%ss:(%%esi); cld\n\t"
26403 + "push %%ss\n\t"
26404 + "pop %%ds\n\t"
26405 "jc 1f\n\t"
26406 "xor %%ah, %%ah\n"
26407 "1:"
26408 @@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26409 "c" (value),
26410 "b" (bx),
26411 "D" ((long)reg),
26412 - "S" (&pci_indirect));
26413 + "S" (&pci_indirect),
26414 + "r" (__PCIBIOS_DS));
26415 break;
26416 case 4:
26417 - __asm__("lcall *(%%esi); cld\n\t"
26418 + __asm__("movw %w6, %%ds\n\t"
26419 + "lcall *%%ss:(%%esi); cld\n\t"
26420 + "push %%ss\n\t"
26421 + "pop %%ds\n\t"
26422 "jc 1f\n\t"
26423 "xor %%ah, %%ah\n"
26424 "1:"
26425 @@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26426 "c" (value),
26427 "b" (bx),
26428 "D" ((long)reg),
26429 - "S" (&pci_indirect));
26430 + "S" (&pci_indirect),
26431 + "r" (__PCIBIOS_DS));
26432 break;
26433 }
26434
26435 @@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26436
26437 DBG("PCI: Fetching IRQ routing table... ");
26438 __asm__("push %%es\n\t"
26439 + "movw %w8, %%ds\n\t"
26440 "push %%ds\n\t"
26441 "pop %%es\n\t"
26442 - "lcall *(%%esi); cld\n\t"
26443 + "lcall *%%ss:(%%esi); cld\n\t"
26444 "pop %%es\n\t"
26445 + "push %%ss\n\t"
26446 + "pop %%ds\n"
26447 "jc 1f\n\t"
26448 "xor %%ah, %%ah\n"
26449 "1:"
26450 @@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26451 "1" (0),
26452 "D" ((long) &opt),
26453 "S" (&pci_indirect),
26454 - "m" (opt)
26455 + "m" (opt),
26456 + "r" (__PCIBIOS_DS)
26457 : "memory");
26458 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
26459 if (ret & 0xff00)
26460 @@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26461 {
26462 int ret;
26463
26464 - __asm__("lcall *(%%esi); cld\n\t"
26465 + __asm__("movw %w5, %%ds\n\t"
26466 + "lcall *%%ss:(%%esi); cld\n\t"
26467 + "push %%ss\n\t"
26468 + "pop %%ds\n"
26469 "jc 1f\n\t"
26470 "xor %%ah, %%ah\n"
26471 "1:"
26472 @@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26473 : "0" (PCIBIOS_SET_PCI_HW_INT),
26474 "b" ((dev->bus->number << 8) | dev->devfn),
26475 "c" ((irq << 8) | (pin + 10)),
26476 - "S" (&pci_indirect));
26477 + "S" (&pci_indirect),
26478 + "r" (__PCIBIOS_DS));
26479 return !(ret & 0xff00);
26480 }
26481 EXPORT_SYMBOL(pcibios_set_irq_routing);
26482 diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
26483 index 40e4469..1ab536e 100644
26484 --- a/arch/x86/platform/efi/efi_32.c
26485 +++ b/arch/x86/platform/efi/efi_32.c
26486 @@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
26487 {
26488 struct desc_ptr gdt_descr;
26489
26490 +#ifdef CONFIG_PAX_KERNEXEC
26491 + struct desc_struct d;
26492 +#endif
26493 +
26494 local_irq_save(efi_rt_eflags);
26495
26496 load_cr3(initial_page_table);
26497 __flush_tlb_all();
26498
26499 +#ifdef CONFIG_PAX_KERNEXEC
26500 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
26501 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
26502 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
26503 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
26504 +#endif
26505 +
26506 gdt_descr.address = __pa(get_cpu_gdt_table(0));
26507 gdt_descr.size = GDT_SIZE - 1;
26508 load_gdt(&gdt_descr);
26509 @@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
26510 {
26511 struct desc_ptr gdt_descr;
26512
26513 +#ifdef CONFIG_PAX_KERNEXEC
26514 + struct desc_struct d;
26515 +
26516 + memset(&d, 0, sizeof d);
26517 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
26518 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
26519 +#endif
26520 +
26521 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
26522 gdt_descr.size = GDT_SIZE - 1;
26523 load_gdt(&gdt_descr);
26524 diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
26525 index fbe66e6..c5c0dd2 100644
26526 --- a/arch/x86/platform/efi/efi_stub_32.S
26527 +++ b/arch/x86/platform/efi/efi_stub_32.S
26528 @@ -6,7 +6,9 @@
26529 */
26530
26531 #include <linux/linkage.h>
26532 +#include <linux/init.h>
26533 #include <asm/page_types.h>
26534 +#include <asm/segment.h>
26535
26536 /*
26537 * efi_call_phys(void *, ...) is a function with variable parameters.
26538 @@ -20,7 +22,7 @@
26539 * service functions will comply with gcc calling convention, too.
26540 */
26541
26542 -.text
26543 +__INIT
26544 ENTRY(efi_call_phys)
26545 /*
26546 * 0. The function can only be called in Linux kernel. So CS has been
26547 @@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
26548 * The mapping of lower virtual memory has been created in prelog and
26549 * epilog.
26550 */
26551 - movl $1f, %edx
26552 - subl $__PAGE_OFFSET, %edx
26553 - jmp *%edx
26554 + movl $(__KERNEXEC_EFI_DS), %edx
26555 + mov %edx, %ds
26556 + mov %edx, %es
26557 + mov %edx, %ss
26558 + ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
26559 1:
26560
26561 /*
26562 @@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
26563 * parameter 2, ..., param n. To make things easy, we save the return
26564 * address of efi_call_phys in a global variable.
26565 */
26566 - popl %edx
26567 - movl %edx, saved_return_addr
26568 - /* get the function pointer into ECX*/
26569 - popl %ecx
26570 - movl %ecx, efi_rt_function_ptr
26571 - movl $2f, %edx
26572 - subl $__PAGE_OFFSET, %edx
26573 - pushl %edx
26574 + popl (saved_return_addr)
26575 + popl (efi_rt_function_ptr)
26576
26577 /*
26578 * 3. Clear PG bit in %CR0.
26579 @@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
26580 /*
26581 * 5. Call the physical function.
26582 */
26583 - jmp *%ecx
26584 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
26585
26586 -2:
26587 /*
26588 * 6. After EFI runtime service returns, control will return to
26589 * following instruction. We'd better readjust stack pointer first.
26590 @@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
26591 movl %cr0, %edx
26592 orl $0x80000000, %edx
26593 movl %edx, %cr0
26594 - jmp 1f
26595 -1:
26596 +
26597 /*
26598 * 8. Now restore the virtual mode from flat mode by
26599 * adding EIP with PAGE_OFFSET.
26600 */
26601 - movl $1f, %edx
26602 - jmp *%edx
26603 + ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
26604 1:
26605 + movl $(__KERNEL_DS), %edx
26606 + mov %edx, %ds
26607 + mov %edx, %es
26608 + mov %edx, %ss
26609
26610 /*
26611 * 9. Balance the stack. And because EAX contain the return value,
26612 * we'd better not clobber it.
26613 */
26614 - leal efi_rt_function_ptr, %edx
26615 - movl (%edx), %ecx
26616 - pushl %ecx
26617 + pushl (efi_rt_function_ptr)
26618
26619 /*
26620 - * 10. Push the saved return address onto the stack and return.
26621 + * 10. Return to the saved return address.
26622 */
26623 - leal saved_return_addr, %edx
26624 - movl (%edx), %ecx
26625 - pushl %ecx
26626 - ret
26627 + jmpl *(saved_return_addr)
26628 ENDPROC(efi_call_phys)
26629 .previous
26630
26631 -.data
26632 +__INITDATA
26633 saved_return_addr:
26634 .long 0
26635 efi_rt_function_ptr:
26636 diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
26637 index 4c07cca..2c8427d 100644
26638 --- a/arch/x86/platform/efi/efi_stub_64.S
26639 +++ b/arch/x86/platform/efi/efi_stub_64.S
26640 @@ -7,6 +7,7 @@
26641 */
26642
26643 #include <linux/linkage.h>
26644 +#include <asm/alternative-asm.h>
26645
26646 #define SAVE_XMM \
26647 mov %rsp, %rax; \
26648 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
26649 call *%rdi
26650 addq $32, %rsp
26651 RESTORE_XMM
26652 + pax_force_retaddr 0, 1
26653 ret
26654 ENDPROC(efi_call0)
26655
26656 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
26657 call *%rdi
26658 addq $32, %rsp
26659 RESTORE_XMM
26660 + pax_force_retaddr 0, 1
26661 ret
26662 ENDPROC(efi_call1)
26663
26664 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
26665 call *%rdi
26666 addq $32, %rsp
26667 RESTORE_XMM
26668 + pax_force_retaddr 0, 1
26669 ret
26670 ENDPROC(efi_call2)
26671
26672 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
26673 call *%rdi
26674 addq $32, %rsp
26675 RESTORE_XMM
26676 + pax_force_retaddr 0, 1
26677 ret
26678 ENDPROC(efi_call3)
26679
26680 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
26681 call *%rdi
26682 addq $32, %rsp
26683 RESTORE_XMM
26684 + pax_force_retaddr 0, 1
26685 ret
26686 ENDPROC(efi_call4)
26687
26688 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
26689 call *%rdi
26690 addq $48, %rsp
26691 RESTORE_XMM
26692 + pax_force_retaddr 0, 1
26693 ret
26694 ENDPROC(efi_call5)
26695
26696 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
26697 call *%rdi
26698 addq $48, %rsp
26699 RESTORE_XMM
26700 + pax_force_retaddr 0, 1
26701 ret
26702 ENDPROC(efi_call6)
26703 diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
26704 index e31bcd8..f12dc46 100644
26705 --- a/arch/x86/platform/mrst/mrst.c
26706 +++ b/arch/x86/platform/mrst/mrst.c
26707 @@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
26708 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
26709 int sfi_mrtc_num;
26710
26711 -static void mrst_power_off(void)
26712 +static __noreturn void mrst_power_off(void)
26713 {
26714 + BUG();
26715 }
26716
26717 -static void mrst_reboot(void)
26718 +static __noreturn void mrst_reboot(void)
26719 {
26720 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
26721 + BUG();
26722 }
26723
26724 /* parse all the mtimer info to a static mtimer array */
26725 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
26726 index 218cdb1..fd55c08 100644
26727 --- a/arch/x86/power/cpu.c
26728 +++ b/arch/x86/power/cpu.c
26729 @@ -132,7 +132,7 @@ static void do_fpu_end(void)
26730 static void fix_processor_context(void)
26731 {
26732 int cpu = smp_processor_id();
26733 - struct tss_struct *t = &per_cpu(init_tss, cpu);
26734 + struct tss_struct *t = init_tss + cpu;
26735
26736 set_tss_desc(cpu, t); /*
26737 * This just modifies memory; should not be
26738 @@ -142,7 +142,9 @@ static void fix_processor_context(void)
26739 */
26740
26741 #ifdef CONFIG_X86_64
26742 + pax_open_kernel();
26743 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
26744 + pax_close_kernel();
26745
26746 syscall_init(); /* This sets MSR_*STAR and related */
26747 #endif
26748 diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
26749 index b685296..e00eb65 100644
26750 --- a/arch/x86/tools/relocs.c
26751 +++ b/arch/x86/tools/relocs.c
26752 @@ -12,10 +12,13 @@
26753 #include <regex.h>
26754 #include <tools/le_byteshift.h>
26755
26756 +#include "../../../include/generated/autoconf.h"
26757 +
26758 static void die(char *fmt, ...);
26759
26760 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
26761 static Elf32_Ehdr ehdr;
26762 +static Elf32_Phdr *phdr;
26763 static unsigned long reloc_count, reloc_idx;
26764 static unsigned long *relocs;
26765 static unsigned long reloc16_count, reloc16_idx;
26766 @@ -323,9 +326,39 @@ static void read_ehdr(FILE *fp)
26767 }
26768 }
26769
26770 +static void read_phdrs(FILE *fp)
26771 +{
26772 + unsigned int i;
26773 +
26774 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
26775 + if (!phdr) {
26776 + die("Unable to allocate %d program headers\n",
26777 + ehdr.e_phnum);
26778 + }
26779 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
26780 + die("Seek to %d failed: %s\n",
26781 + ehdr.e_phoff, strerror(errno));
26782 + }
26783 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
26784 + die("Cannot read ELF program headers: %s\n",
26785 + strerror(errno));
26786 + }
26787 + for(i = 0; i < ehdr.e_phnum; i++) {
26788 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
26789 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
26790 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
26791 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
26792 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
26793 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
26794 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
26795 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
26796 + }
26797 +
26798 +}
26799 +
26800 static void read_shdrs(FILE *fp)
26801 {
26802 - int i;
26803 + unsigned int i;
26804 Elf32_Shdr shdr;
26805
26806 secs = calloc(ehdr.e_shnum, sizeof(struct section));
26807 @@ -360,7 +393,7 @@ static void read_shdrs(FILE *fp)
26808
26809 static void read_strtabs(FILE *fp)
26810 {
26811 - int i;
26812 + unsigned int i;
26813 for (i = 0; i < ehdr.e_shnum; i++) {
26814 struct section *sec = &secs[i];
26815 if (sec->shdr.sh_type != SHT_STRTAB) {
26816 @@ -385,7 +418,7 @@ static void read_strtabs(FILE *fp)
26817
26818 static void read_symtabs(FILE *fp)
26819 {
26820 - int i,j;
26821 + unsigned int i,j;
26822 for (i = 0; i < ehdr.e_shnum; i++) {
26823 struct section *sec = &secs[i];
26824 if (sec->shdr.sh_type != SHT_SYMTAB) {
26825 @@ -418,7 +451,9 @@ static void read_symtabs(FILE *fp)
26826
26827 static void read_relocs(FILE *fp)
26828 {
26829 - int i,j;
26830 + unsigned int i,j;
26831 + uint32_t base;
26832 +
26833 for (i = 0; i < ehdr.e_shnum; i++) {
26834 struct section *sec = &secs[i];
26835 if (sec->shdr.sh_type != SHT_REL) {
26836 @@ -438,9 +473,22 @@ static void read_relocs(FILE *fp)
26837 die("Cannot read symbol table: %s\n",
26838 strerror(errno));
26839 }
26840 + base = 0;
26841 +
26842 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
26843 + for (j = 0; j < ehdr.e_phnum; j++) {
26844 + if (phdr[j].p_type != PT_LOAD )
26845 + continue;
26846 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
26847 + continue;
26848 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
26849 + break;
26850 + }
26851 +#endif
26852 +
26853 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
26854 Elf32_Rel *rel = &sec->reltab[j];
26855 - rel->r_offset = elf32_to_cpu(rel->r_offset);
26856 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
26857 rel->r_info = elf32_to_cpu(rel->r_info);
26858 }
26859 }
26860 @@ -449,13 +497,13 @@ static void read_relocs(FILE *fp)
26861
26862 static void print_absolute_symbols(void)
26863 {
26864 - int i;
26865 + unsigned int i;
26866 printf("Absolute symbols\n");
26867 printf(" Num: Value Size Type Bind Visibility Name\n");
26868 for (i = 0; i < ehdr.e_shnum; i++) {
26869 struct section *sec = &secs[i];
26870 char *sym_strtab;
26871 - int j;
26872 + unsigned int j;
26873
26874 if (sec->shdr.sh_type != SHT_SYMTAB) {
26875 continue;
26876 @@ -482,14 +530,14 @@ static void print_absolute_symbols(void)
26877
26878 static void print_absolute_relocs(void)
26879 {
26880 - int i, printed = 0;
26881 + unsigned int i, printed = 0;
26882
26883 for (i = 0; i < ehdr.e_shnum; i++) {
26884 struct section *sec = &secs[i];
26885 struct section *sec_applies, *sec_symtab;
26886 char *sym_strtab;
26887 Elf32_Sym *sh_symtab;
26888 - int j;
26889 + unsigned int j;
26890 if (sec->shdr.sh_type != SHT_REL) {
26891 continue;
26892 }
26893 @@ -551,13 +599,13 @@ static void print_absolute_relocs(void)
26894 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
26895 int use_real_mode)
26896 {
26897 - int i;
26898 + unsigned int i;
26899 /* Walk through the relocations */
26900 for (i = 0; i < ehdr.e_shnum; i++) {
26901 char *sym_strtab;
26902 Elf32_Sym *sh_symtab;
26903 struct section *sec_applies, *sec_symtab;
26904 - int j;
26905 + unsigned int j;
26906 struct section *sec = &secs[i];
26907
26908 if (sec->shdr.sh_type != SHT_REL) {
26909 @@ -581,6 +629,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
26910 sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
26911 r_type = ELF32_R_TYPE(rel->r_info);
26912
26913 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
26914 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
26915 + continue;
26916 +
26917 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
26918 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
26919 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
26920 + continue;
26921 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
26922 + continue;
26923 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
26924 + continue;
26925 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
26926 + continue;
26927 +#endif
26928 +
26929 shn_abs = sym->st_shndx == SHN_ABS;
26930
26931 switch (r_type) {
26932 @@ -674,7 +738,7 @@ static int write32(unsigned int v, FILE *f)
26933
26934 static void emit_relocs(int as_text, int use_real_mode)
26935 {
26936 - int i;
26937 + unsigned int i;
26938 /* Count how many relocations I have and allocate space for them. */
26939 reloc_count = 0;
26940 walk_relocs(count_reloc, use_real_mode);
26941 @@ -801,6 +865,7 @@ int main(int argc, char **argv)
26942 fname, strerror(errno));
26943 }
26944 read_ehdr(fp);
26945 + read_phdrs(fp);
26946 read_shdrs(fp);
26947 read_strtabs(fp);
26948 read_symtabs(fp);
26949 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
26950 index fd14be1..e3c79c0 100644
26951 --- a/arch/x86/vdso/Makefile
26952 +++ b/arch/x86/vdso/Makefile
26953 @@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
26954 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
26955 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
26956
26957 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26958 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26959 GCOV_PROFILE := n
26960
26961 #
26962 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
26963 index 66e6d93..587f435 100644
26964 --- a/arch/x86/vdso/vdso32-setup.c
26965 +++ b/arch/x86/vdso/vdso32-setup.c
26966 @@ -25,6 +25,7 @@
26967 #include <asm/tlbflush.h>
26968 #include <asm/vdso.h>
26969 #include <asm/proto.h>
26970 +#include <asm/mman.h>
26971
26972 enum {
26973 VDSO_DISABLED = 0,
26974 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
26975 void enable_sep_cpu(void)
26976 {
26977 int cpu = get_cpu();
26978 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
26979 + struct tss_struct *tss = init_tss + cpu;
26980
26981 if (!boot_cpu_has(X86_FEATURE_SEP)) {
26982 put_cpu();
26983 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
26984 gate_vma.vm_start = FIXADDR_USER_START;
26985 gate_vma.vm_end = FIXADDR_USER_END;
26986 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
26987 - gate_vma.vm_page_prot = __P101;
26988 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
26989
26990 return 0;
26991 }
26992 @@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26993 if (compat)
26994 addr = VDSO_HIGH_BASE;
26995 else {
26996 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
26997 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
26998 if (IS_ERR_VALUE(addr)) {
26999 ret = addr;
27000 goto up_fail;
27001 }
27002 }
27003
27004 - current->mm->context.vdso = (void *)addr;
27005 + current->mm->context.vdso = addr;
27006
27007 if (compat_uses_vma || !compat) {
27008 /*
27009 @@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27010 }
27011
27012 current_thread_info()->sysenter_return =
27013 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
27014 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
27015
27016 up_fail:
27017 if (ret)
27018 - current->mm->context.vdso = NULL;
27019 + current->mm->context.vdso = 0;
27020
27021 up_write(&mm->mmap_sem);
27022
27023 @@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
27024
27025 const char *arch_vma_name(struct vm_area_struct *vma)
27026 {
27027 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
27028 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
27029 return "[vdso]";
27030 +
27031 +#ifdef CONFIG_PAX_SEGMEXEC
27032 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
27033 + return "[vdso]";
27034 +#endif
27035 +
27036 return NULL;
27037 }
27038
27039 @@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
27040 * Check to see if the corresponding task was created in compat vdso
27041 * mode.
27042 */
27043 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
27044 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
27045 return &gate_vma;
27046 return NULL;
27047 }
27048 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
27049 index 00aaf04..4a26505 100644
27050 --- a/arch/x86/vdso/vma.c
27051 +++ b/arch/x86/vdso/vma.c
27052 @@ -16,8 +16,6 @@
27053 #include <asm/vdso.h>
27054 #include <asm/page.h>
27055
27056 -unsigned int __read_mostly vdso_enabled = 1;
27057 -
27058 extern char vdso_start[], vdso_end[];
27059 extern unsigned short vdso_sync_cpuid;
27060
27061 @@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
27062 * unaligned here as a result of stack start randomization.
27063 */
27064 addr = PAGE_ALIGN(addr);
27065 - addr = align_addr(addr, NULL, ALIGN_VDSO);
27066
27067 return addr;
27068 }
27069 @@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
27070 unsigned size)
27071 {
27072 struct mm_struct *mm = current->mm;
27073 - unsigned long addr;
27074 + unsigned long addr = 0;
27075 int ret;
27076
27077 - if (!vdso_enabled)
27078 - return 0;
27079 -
27080 down_write(&mm->mmap_sem);
27081 +
27082 +#ifdef CONFIG_PAX_RANDMMAP
27083 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27084 +#endif
27085 +
27086 addr = vdso_addr(mm->start_stack, size);
27087 + addr = align_addr(addr, NULL, ALIGN_VDSO);
27088 addr = get_unmapped_area(NULL, addr, size, 0, 0);
27089 if (IS_ERR_VALUE(addr)) {
27090 ret = addr;
27091 goto up_fail;
27092 }
27093
27094 - current->mm->context.vdso = (void *)addr;
27095 + mm->context.vdso = addr;
27096
27097 ret = install_special_mapping(mm, addr, size,
27098 VM_READ|VM_EXEC|
27099 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
27100 pages);
27101 - if (ret) {
27102 - current->mm->context.vdso = NULL;
27103 - goto up_fail;
27104 - }
27105 + if (ret)
27106 + mm->context.vdso = 0;
27107
27108 up_fail:
27109 up_write(&mm->mmap_sem);
27110 @@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27111 vdsox32_size);
27112 }
27113 #endif
27114 -
27115 -static __init int vdso_setup(char *s)
27116 -{
27117 - vdso_enabled = simple_strtoul(s, NULL, 0);
27118 - return 0;
27119 -}
27120 -__setup("vdso=", vdso_setup);
27121 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
27122 index 40edfc3..b4d80ac 100644
27123 --- a/arch/x86/xen/enlighten.c
27124 +++ b/arch/x86/xen/enlighten.c
27125 @@ -95,8 +95,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
27126
27127 struct shared_info xen_dummy_shared_info;
27128
27129 -void *xen_initial_gdt;
27130 -
27131 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
27132 __read_mostly int xen_have_vector_callback;
27133 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
27134 @@ -1165,30 +1163,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
27135 #endif
27136 };
27137
27138 -static void xen_reboot(int reason)
27139 +static __noreturn void xen_reboot(int reason)
27140 {
27141 struct sched_shutdown r = { .reason = reason };
27142
27143 - if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
27144 - BUG();
27145 + HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
27146 + BUG();
27147 }
27148
27149 -static void xen_restart(char *msg)
27150 +static __noreturn void xen_restart(char *msg)
27151 {
27152 xen_reboot(SHUTDOWN_reboot);
27153 }
27154
27155 -static void xen_emergency_restart(void)
27156 +static __noreturn void xen_emergency_restart(void)
27157 {
27158 xen_reboot(SHUTDOWN_reboot);
27159 }
27160
27161 -static void xen_machine_halt(void)
27162 +static __noreturn void xen_machine_halt(void)
27163 {
27164 xen_reboot(SHUTDOWN_poweroff);
27165 }
27166
27167 -static void xen_machine_power_off(void)
27168 +static __noreturn void xen_machine_power_off(void)
27169 {
27170 if (pm_power_off)
27171 pm_power_off();
27172 @@ -1291,7 +1289,17 @@ asmlinkage void __init xen_start_kernel(void)
27173 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
27174
27175 /* Work out if we support NX */
27176 - x86_configure_nx();
27177 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
27178 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
27179 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
27180 + unsigned l, h;
27181 +
27182 + __supported_pte_mask |= _PAGE_NX;
27183 + rdmsr(MSR_EFER, l, h);
27184 + l |= EFER_NX;
27185 + wrmsr(MSR_EFER, l, h);
27186 + }
27187 +#endif
27188
27189 xen_setup_features();
27190
27191 @@ -1322,13 +1330,6 @@ asmlinkage void __init xen_start_kernel(void)
27192
27193 machine_ops = xen_machine_ops;
27194
27195 - /*
27196 - * The only reliable way to retain the initial address of the
27197 - * percpu gdt_page is to remember it here, so we can go and
27198 - * mark it RW later, when the initial percpu area is freed.
27199 - */
27200 - xen_initial_gdt = &per_cpu(gdt_page, 0);
27201 -
27202 xen_smp_init();
27203
27204 #ifdef CONFIG_ACPI_NUMA
27205 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
27206 index 69f5857..0699dc5 100644
27207 --- a/arch/x86/xen/mmu.c
27208 +++ b/arch/x86/xen/mmu.c
27209 @@ -1738,6 +1738,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
27210 convert_pfn_mfn(init_level4_pgt);
27211 convert_pfn_mfn(level3_ident_pgt);
27212 convert_pfn_mfn(level3_kernel_pgt);
27213 + convert_pfn_mfn(level3_vmalloc_start_pgt);
27214 + convert_pfn_mfn(level3_vmalloc_end_pgt);
27215 + convert_pfn_mfn(level3_vmemmap_pgt);
27216
27217 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
27218 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
27219 @@ -1756,7 +1759,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
27220 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
27221 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
27222 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
27223 + set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
27224 + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
27225 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
27226 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
27227 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
27228 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
27229 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
27230
27231 @@ -1964,6 +1971,7 @@ static void __init xen_post_allocator_init(void)
27232 pv_mmu_ops.set_pud = xen_set_pud;
27233 #if PAGETABLE_LEVELS == 4
27234 pv_mmu_ops.set_pgd = xen_set_pgd;
27235 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
27236 #endif
27237
27238 /* This will work as long as patching hasn't happened yet
27239 @@ -2045,6 +2053,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
27240 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
27241 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
27242 .set_pgd = xen_set_pgd_hyper,
27243 + .set_pgd_batched = xen_set_pgd_hyper,
27244
27245 .alloc_pud = xen_alloc_pmd_init,
27246 .release_pud = xen_release_pmd_init,
27247 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
27248 index 0503c0c..ceb2d16 100644
27249 --- a/arch/x86/xen/smp.c
27250 +++ b/arch/x86/xen/smp.c
27251 @@ -215,11 +215,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
27252 {
27253 BUG_ON(smp_processor_id() != 0);
27254 native_smp_prepare_boot_cpu();
27255 -
27256 - /* We've switched to the "real" per-cpu gdt, so make sure the
27257 - old memory can be recycled */
27258 - make_lowmem_page_readwrite(xen_initial_gdt);
27259 -
27260 xen_filter_cpu_maps();
27261 xen_setup_vcpu_info_placement();
27262 }
27263 @@ -296,12 +291,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
27264 gdt = get_cpu_gdt_table(cpu);
27265
27266 ctxt->flags = VGCF_IN_KERNEL;
27267 - ctxt->user_regs.ds = __USER_DS;
27268 - ctxt->user_regs.es = __USER_DS;
27269 + ctxt->user_regs.ds = __KERNEL_DS;
27270 + ctxt->user_regs.es = __KERNEL_DS;
27271 ctxt->user_regs.ss = __KERNEL_DS;
27272 #ifdef CONFIG_X86_32
27273 ctxt->user_regs.fs = __KERNEL_PERCPU;
27274 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
27275 + savesegment(gs, ctxt->user_regs.gs);
27276 #else
27277 ctxt->gs_base_kernel = per_cpu_offset(cpu);
27278 #endif
27279 @@ -352,13 +347,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
27280 int rc;
27281
27282 per_cpu(current_task, cpu) = idle;
27283 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
27284 #ifdef CONFIG_X86_32
27285 irq_ctx_init(cpu);
27286 #else
27287 clear_tsk_thread_flag(idle, TIF_FORK);
27288 - per_cpu(kernel_stack, cpu) =
27289 - (unsigned long)task_stack_page(idle) -
27290 - KERNEL_STACK_OFFSET + THREAD_SIZE;
27291 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27292 #endif
27293 xen_setup_runstate_info(cpu);
27294 xen_setup_timer(cpu);
27295 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
27296 index b040b0e..8cc4fe0 100644
27297 --- a/arch/x86/xen/xen-asm_32.S
27298 +++ b/arch/x86/xen/xen-asm_32.S
27299 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
27300 ESP_OFFSET=4 # bytes pushed onto stack
27301
27302 /*
27303 - * Store vcpu_info pointer for easy access. Do it this way to
27304 - * avoid having to reload %fs
27305 + * Store vcpu_info pointer for easy access.
27306 */
27307 #ifdef CONFIG_SMP
27308 - GET_THREAD_INFO(%eax)
27309 - movl TI_cpu(%eax), %eax
27310 - movl __per_cpu_offset(,%eax,4), %eax
27311 - mov xen_vcpu(%eax), %eax
27312 + push %fs
27313 + mov $(__KERNEL_PERCPU), %eax
27314 + mov %eax, %fs
27315 + mov PER_CPU_VAR(xen_vcpu), %eax
27316 + pop %fs
27317 #else
27318 movl xen_vcpu, %eax
27319 #endif
27320 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
27321 index aaa7291..3f77960 100644
27322 --- a/arch/x86/xen/xen-head.S
27323 +++ b/arch/x86/xen/xen-head.S
27324 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
27325 #ifdef CONFIG_X86_32
27326 mov %esi,xen_start_info
27327 mov $init_thread_union+THREAD_SIZE,%esp
27328 +#ifdef CONFIG_SMP
27329 + movl $cpu_gdt_table,%edi
27330 + movl $__per_cpu_load,%eax
27331 + movw %ax,__KERNEL_PERCPU + 2(%edi)
27332 + rorl $16,%eax
27333 + movb %al,__KERNEL_PERCPU + 4(%edi)
27334 + movb %ah,__KERNEL_PERCPU + 7(%edi)
27335 + movl $__per_cpu_end - 1,%eax
27336 + subl $__per_cpu_start,%eax
27337 + movw %ax,__KERNEL_PERCPU + 0(%edi)
27338 +#endif
27339 #else
27340 mov %rsi,xen_start_info
27341 mov $init_thread_union+THREAD_SIZE,%rsp
27342 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
27343 index b095739..8c17bcd 100644
27344 --- a/arch/x86/xen/xen-ops.h
27345 +++ b/arch/x86/xen/xen-ops.h
27346 @@ -10,8 +10,6 @@
27347 extern const char xen_hypervisor_callback[];
27348 extern const char xen_failsafe_callback[];
27349
27350 -extern void *xen_initial_gdt;
27351 -
27352 struct trap_info;
27353 void xen_copy_trap_info(struct trap_info *traps);
27354
27355 diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
27356 index 525bd3d..ef888b1 100644
27357 --- a/arch/xtensa/variants/dc232b/include/variant/core.h
27358 +++ b/arch/xtensa/variants/dc232b/include/variant/core.h
27359 @@ -119,9 +119,9 @@
27360 ----------------------------------------------------------------------*/
27361
27362 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
27363 -#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
27364 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
27365 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
27366 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27367
27368 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
27369 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
27370 diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
27371 index 2f33760..835e50a 100644
27372 --- a/arch/xtensa/variants/fsf/include/variant/core.h
27373 +++ b/arch/xtensa/variants/fsf/include/variant/core.h
27374 @@ -11,6 +11,7 @@
27375 #ifndef _XTENSA_CORE_H
27376 #define _XTENSA_CORE_H
27377
27378 +#include <linux/const.h>
27379
27380 /****************************************************************************
27381 Parameters Useful for Any Code, USER or PRIVILEGED
27382 @@ -112,9 +113,9 @@
27383 ----------------------------------------------------------------------*/
27384
27385 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
27386 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
27387 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
27388 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
27389 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27390
27391 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
27392 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
27393 diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
27394 index af00795..2bb8105 100644
27395 --- a/arch/xtensa/variants/s6000/include/variant/core.h
27396 +++ b/arch/xtensa/variants/s6000/include/variant/core.h
27397 @@ -11,6 +11,7 @@
27398 #ifndef _XTENSA_CORE_CONFIGURATION_H
27399 #define _XTENSA_CORE_CONFIGURATION_H
27400
27401 +#include <linux/const.h>
27402
27403 /****************************************************************************
27404 Parameters Useful for Any Code, USER or PRIVILEGED
27405 @@ -118,9 +119,9 @@
27406 ----------------------------------------------------------------------*/
27407
27408 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
27409 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
27410 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
27411 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
27412 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27413
27414 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
27415 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
27416 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
27417 index 58916af..9cb880b 100644
27418 --- a/block/blk-iopoll.c
27419 +++ b/block/blk-iopoll.c
27420 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
27421 }
27422 EXPORT_SYMBOL(blk_iopoll_complete);
27423
27424 -static void blk_iopoll_softirq(struct softirq_action *h)
27425 +static void blk_iopoll_softirq(void)
27426 {
27427 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
27428 int rearm = 0, budget = blk_iopoll_budget;
27429 diff --git a/block/blk-map.c b/block/blk-map.c
27430 index 623e1cd..ca1e109 100644
27431 --- a/block/blk-map.c
27432 +++ b/block/blk-map.c
27433 @@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
27434 if (!len || !kbuf)
27435 return -EINVAL;
27436
27437 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
27438 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
27439 if (do_copy)
27440 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
27441 else
27442 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
27443 index 467c8de..4bddc6d 100644
27444 --- a/block/blk-softirq.c
27445 +++ b/block/blk-softirq.c
27446 @@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
27447 * Softirq action handler - move entries to local list and loop over them
27448 * while passing them to the queue registered handler.
27449 */
27450 -static void blk_done_softirq(struct softirq_action *h)
27451 +static void blk_done_softirq(void)
27452 {
27453 struct list_head *cpu_list, local_list;
27454
27455 diff --git a/block/bsg.c b/block/bsg.c
27456 index ff64ae3..593560c 100644
27457 --- a/block/bsg.c
27458 +++ b/block/bsg.c
27459 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
27460 struct sg_io_v4 *hdr, struct bsg_device *bd,
27461 fmode_t has_write_perm)
27462 {
27463 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27464 + unsigned char *cmdptr;
27465 +
27466 if (hdr->request_len > BLK_MAX_CDB) {
27467 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
27468 if (!rq->cmd)
27469 return -ENOMEM;
27470 - }
27471 + cmdptr = rq->cmd;
27472 + } else
27473 + cmdptr = tmpcmd;
27474
27475 - if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
27476 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
27477 hdr->request_len))
27478 return -EFAULT;
27479
27480 + if (cmdptr != rq->cmd)
27481 + memcpy(rq->cmd, cmdptr, hdr->request_len);
27482 +
27483 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
27484 if (blk_verify_command(rq->cmd, has_write_perm))
27485 return -EPERM;
27486 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
27487 index 7c668c8..db3521c 100644
27488 --- a/block/compat_ioctl.c
27489 +++ b/block/compat_ioctl.c
27490 @@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
27491 err |= __get_user(f->spec1, &uf->spec1);
27492 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
27493 err |= __get_user(name, &uf->name);
27494 - f->name = compat_ptr(name);
27495 + f->name = (void __force_kernel *)compat_ptr(name);
27496 if (err) {
27497 err = -EFAULT;
27498 goto out;
27499 diff --git a/block/partitions/efi.c b/block/partitions/efi.c
27500 index 6296b40..417c00f 100644
27501 --- a/block/partitions/efi.c
27502 +++ b/block/partitions/efi.c
27503 @@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
27504 if (!gpt)
27505 return NULL;
27506
27507 + if (!le32_to_cpu(gpt->num_partition_entries))
27508 + return NULL;
27509 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
27510 + if (!pte)
27511 + return NULL;
27512 +
27513 count = le32_to_cpu(gpt->num_partition_entries) *
27514 le32_to_cpu(gpt->sizeof_partition_entry);
27515 - if (!count)
27516 - return NULL;
27517 - pte = kzalloc(count, GFP_KERNEL);
27518 - if (!pte)
27519 - return NULL;
27520 -
27521 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
27522 (u8 *) pte,
27523 count) < count) {
27524 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
27525 index 260fa80..e8f3caf 100644
27526 --- a/block/scsi_ioctl.c
27527 +++ b/block/scsi_ioctl.c
27528 @@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
27529 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
27530 struct sg_io_hdr *hdr, fmode_t mode)
27531 {
27532 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
27533 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27534 + unsigned char *cmdptr;
27535 +
27536 + if (rq->cmd != rq->__cmd)
27537 + cmdptr = rq->cmd;
27538 + else
27539 + cmdptr = tmpcmd;
27540 +
27541 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
27542 return -EFAULT;
27543 +
27544 + if (cmdptr != rq->cmd)
27545 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
27546 +
27547 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
27548 return -EPERM;
27549
27550 @@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27551 int err;
27552 unsigned int in_len, out_len, bytes, opcode, cmdlen;
27553 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
27554 + unsigned char tmpcmd[sizeof(rq->__cmd)];
27555 + unsigned char *cmdptr;
27556
27557 if (!sic)
27558 return -EINVAL;
27559 @@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27560 */
27561 err = -EFAULT;
27562 rq->cmd_len = cmdlen;
27563 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
27564 +
27565 + if (rq->cmd != rq->__cmd)
27566 + cmdptr = rq->cmd;
27567 + else
27568 + cmdptr = tmpcmd;
27569 +
27570 + if (copy_from_user(cmdptr, sic->data, cmdlen))
27571 goto error;
27572
27573 + if (rq->cmd != cmdptr)
27574 + memcpy(rq->cmd, cmdptr, cmdlen);
27575 +
27576 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
27577 goto error;
27578
27579 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
27580 index 671d4d6..5f24030 100644
27581 --- a/crypto/cryptd.c
27582 +++ b/crypto/cryptd.c
27583 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
27584
27585 struct cryptd_blkcipher_request_ctx {
27586 crypto_completion_t complete;
27587 -};
27588 +} __no_const;
27589
27590 struct cryptd_hash_ctx {
27591 struct crypto_shash *child;
27592 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
27593
27594 struct cryptd_aead_request_ctx {
27595 crypto_completion_t complete;
27596 -};
27597 +} __no_const;
27598
27599 static void cryptd_queue_worker(struct work_struct *work);
27600
27601 diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
27602 index e6defd8..c26a225 100644
27603 --- a/drivers/acpi/apei/cper.c
27604 +++ b/drivers/acpi/apei/cper.c
27605 @@ -38,12 +38,12 @@
27606 */
27607 u64 cper_next_record_id(void)
27608 {
27609 - static atomic64_t seq;
27610 + static atomic64_unchecked_t seq;
27611
27612 - if (!atomic64_read(&seq))
27613 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
27614 + if (!atomic64_read_unchecked(&seq))
27615 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
27616
27617 - return atomic64_inc_return(&seq);
27618 + return atomic64_inc_return_unchecked(&seq);
27619 }
27620 EXPORT_SYMBOL_GPL(cper_next_record_id);
27621
27622 diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
27623 index 7586544..636a2f0 100644
27624 --- a/drivers/acpi/ec_sys.c
27625 +++ b/drivers/acpi/ec_sys.c
27626 @@ -12,6 +12,7 @@
27627 #include <linux/acpi.h>
27628 #include <linux/debugfs.h>
27629 #include <linux/module.h>
27630 +#include <linux/uaccess.h>
27631 #include "internal.h"
27632
27633 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
27634 @@ -34,7 +35,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
27635 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
27636 */
27637 unsigned int size = EC_SPACE_SIZE;
27638 - u8 *data = (u8 *) buf;
27639 + u8 data;
27640 loff_t init_off = *off;
27641 int err = 0;
27642
27643 @@ -47,9 +48,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
27644 size = count;
27645
27646 while (size) {
27647 - err = ec_read(*off, &data[*off - init_off]);
27648 + err = ec_read(*off, &data);
27649 if (err)
27650 return err;
27651 + if (put_user(data, &buf[*off - init_off]))
27652 + return -EFAULT;
27653 *off += 1;
27654 size--;
27655 }
27656 @@ -65,7 +68,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
27657
27658 unsigned int size = count;
27659 loff_t init_off = *off;
27660 - u8 *data = (u8 *) buf;
27661 int err = 0;
27662
27663 if (*off >= EC_SPACE_SIZE)
27664 @@ -76,7 +78,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
27665 }
27666
27667 while (size) {
27668 - u8 byte_write = data[*off - init_off];
27669 + u8 byte_write;
27670 + if (get_user(byte_write, &buf[*off - init_off]))
27671 + return -EFAULT;
27672 err = ec_write(*off, byte_write);
27673 if (err)
27674 return err;
27675 diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
27676 index 251c7b62..000462d 100644
27677 --- a/drivers/acpi/proc.c
27678 +++ b/drivers/acpi/proc.c
27679 @@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file,
27680 size_t count, loff_t * ppos)
27681 {
27682 struct list_head *node, *next;
27683 - char strbuf[5];
27684 - char str[5] = "";
27685 - unsigned int len = count;
27686 + char strbuf[5] = {0};
27687
27688 - if (len > 4)
27689 - len = 4;
27690 - if (len < 0)
27691 + if (count > 4)
27692 + count = 4;
27693 + if (copy_from_user(strbuf, buffer, count))
27694 return -EFAULT;
27695 -
27696 - if (copy_from_user(strbuf, buffer, len))
27697 - return -EFAULT;
27698 - strbuf[len] = '\0';
27699 - sscanf(strbuf, "%s", str);
27700 + strbuf[count] = '\0';
27701
27702 mutex_lock(&acpi_device_lock);
27703 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
27704 @@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file,
27705 if (!dev->wakeup.flags.valid)
27706 continue;
27707
27708 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
27709 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
27710 if (device_can_wakeup(&dev->dev)) {
27711 bool enable = !device_may_wakeup(&dev->dev);
27712 device_set_wakeup_enable(&dev->dev, enable);
27713 diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
27714 index 0734086..3ad3e4c 100644
27715 --- a/drivers/acpi/processor_driver.c
27716 +++ b/drivers/acpi/processor_driver.c
27717 @@ -556,7 +556,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
27718 return 0;
27719 #endif
27720
27721 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
27722 + BUG_ON(pr->id >= nr_cpu_ids);
27723
27724 /*
27725 * Buggy BIOS check
27726 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
27727 index d31ee55..8363a8b 100644
27728 --- a/drivers/ata/libata-core.c
27729 +++ b/drivers/ata/libata-core.c
27730 @@ -4742,7 +4742,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
27731 struct ata_port *ap;
27732 unsigned int tag;
27733
27734 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27735 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27736 ap = qc->ap;
27737
27738 qc->flags = 0;
27739 @@ -4758,7 +4758,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
27740 struct ata_port *ap;
27741 struct ata_link *link;
27742
27743 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27744 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27745 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
27746 ap = qc->ap;
27747 link = qc->dev->link;
27748 @@ -5822,6 +5822,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27749 return;
27750
27751 spin_lock(&lock);
27752 + pax_open_kernel();
27753
27754 for (cur = ops->inherits; cur; cur = cur->inherits) {
27755 void **inherit = (void **)cur;
27756 @@ -5835,8 +5836,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27757 if (IS_ERR(*pp))
27758 *pp = NULL;
27759
27760 - ops->inherits = NULL;
27761 + *(struct ata_port_operations **)&ops->inherits = NULL;
27762
27763 + pax_close_kernel();
27764 spin_unlock(&lock);
27765 }
27766
27767 diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
27768 index 3239517..343b5f6 100644
27769 --- a/drivers/ata/pata_arasan_cf.c
27770 +++ b/drivers/ata/pata_arasan_cf.c
27771 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
27772 /* Handle platform specific quirks */
27773 if (pdata->quirk) {
27774 if (pdata->quirk & CF_BROKEN_PIO) {
27775 - ap->ops->set_piomode = NULL;
27776 + pax_open_kernel();
27777 + *(void **)&ap->ops->set_piomode = NULL;
27778 + pax_close_kernel();
27779 ap->pio_mask = 0;
27780 }
27781 if (pdata->quirk & CF_BROKEN_MWDMA)
27782 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
27783 index f9b983a..887b9d8 100644
27784 --- a/drivers/atm/adummy.c
27785 +++ b/drivers/atm/adummy.c
27786 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
27787 vcc->pop(vcc, skb);
27788 else
27789 dev_kfree_skb_any(skb);
27790 - atomic_inc(&vcc->stats->tx);
27791 + atomic_inc_unchecked(&vcc->stats->tx);
27792
27793 return 0;
27794 }
27795 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
27796 index f8f41e0..1f987dd 100644
27797 --- a/drivers/atm/ambassador.c
27798 +++ b/drivers/atm/ambassador.c
27799 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
27800 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
27801
27802 // VC layer stats
27803 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27804 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27805
27806 // free the descriptor
27807 kfree (tx_descr);
27808 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
27809 dump_skb ("<<<", vc, skb);
27810
27811 // VC layer stats
27812 - atomic_inc(&atm_vcc->stats->rx);
27813 + atomic_inc_unchecked(&atm_vcc->stats->rx);
27814 __net_timestamp(skb);
27815 // end of our responsibility
27816 atm_vcc->push (atm_vcc, skb);
27817 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
27818 } else {
27819 PRINTK (KERN_INFO, "dropped over-size frame");
27820 // should we count this?
27821 - atomic_inc(&atm_vcc->stats->rx_drop);
27822 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27823 }
27824
27825 } else {
27826 @@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
27827 }
27828
27829 if (check_area (skb->data, skb->len)) {
27830 - atomic_inc(&atm_vcc->stats->tx_err);
27831 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
27832 return -ENOMEM; // ?
27833 }
27834
27835 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
27836 index b22d71c..d6e1049 100644
27837 --- a/drivers/atm/atmtcp.c
27838 +++ b/drivers/atm/atmtcp.c
27839 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27840 if (vcc->pop) vcc->pop(vcc,skb);
27841 else dev_kfree_skb(skb);
27842 if (dev_data) return 0;
27843 - atomic_inc(&vcc->stats->tx_err);
27844 + atomic_inc_unchecked(&vcc->stats->tx_err);
27845 return -ENOLINK;
27846 }
27847 size = skb->len+sizeof(struct atmtcp_hdr);
27848 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27849 if (!new_skb) {
27850 if (vcc->pop) vcc->pop(vcc,skb);
27851 else dev_kfree_skb(skb);
27852 - atomic_inc(&vcc->stats->tx_err);
27853 + atomic_inc_unchecked(&vcc->stats->tx_err);
27854 return -ENOBUFS;
27855 }
27856 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
27857 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
27858 if (vcc->pop) vcc->pop(vcc,skb);
27859 else dev_kfree_skb(skb);
27860 out_vcc->push(out_vcc,new_skb);
27861 - atomic_inc(&vcc->stats->tx);
27862 - atomic_inc(&out_vcc->stats->rx);
27863 + atomic_inc_unchecked(&vcc->stats->tx);
27864 + atomic_inc_unchecked(&out_vcc->stats->rx);
27865 return 0;
27866 }
27867
27868 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
27869 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
27870 read_unlock(&vcc_sklist_lock);
27871 if (!out_vcc) {
27872 - atomic_inc(&vcc->stats->tx_err);
27873 + atomic_inc_unchecked(&vcc->stats->tx_err);
27874 goto done;
27875 }
27876 skb_pull(skb,sizeof(struct atmtcp_hdr));
27877 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
27878 __net_timestamp(new_skb);
27879 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
27880 out_vcc->push(out_vcc,new_skb);
27881 - atomic_inc(&vcc->stats->tx);
27882 - atomic_inc(&out_vcc->stats->rx);
27883 + atomic_inc_unchecked(&vcc->stats->tx);
27884 + atomic_inc_unchecked(&out_vcc->stats->rx);
27885 done:
27886 if (vcc->pop) vcc->pop(vcc,skb);
27887 else dev_kfree_skb(skb);
27888 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
27889 index 2059ee4..faf51c7 100644
27890 --- a/drivers/atm/eni.c
27891 +++ b/drivers/atm/eni.c
27892 @@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
27893 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
27894 vcc->dev->number);
27895 length = 0;
27896 - atomic_inc(&vcc->stats->rx_err);
27897 + atomic_inc_unchecked(&vcc->stats->rx_err);
27898 }
27899 else {
27900 length = ATM_CELL_SIZE-1; /* no HEC */
27901 @@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
27902 size);
27903 }
27904 eff = length = 0;
27905 - atomic_inc(&vcc->stats->rx_err);
27906 + atomic_inc_unchecked(&vcc->stats->rx_err);
27907 }
27908 else {
27909 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
27910 @@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
27911 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
27912 vcc->dev->number,vcc->vci,length,size << 2,descr);
27913 length = eff = 0;
27914 - atomic_inc(&vcc->stats->rx_err);
27915 + atomic_inc_unchecked(&vcc->stats->rx_err);
27916 }
27917 }
27918 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
27919 @@ -767,7 +767,7 @@ rx_dequeued++;
27920 vcc->push(vcc,skb);
27921 pushed++;
27922 }
27923 - atomic_inc(&vcc->stats->rx);
27924 + atomic_inc_unchecked(&vcc->stats->rx);
27925 }
27926 wake_up(&eni_dev->rx_wait);
27927 }
27928 @@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
27929 PCI_DMA_TODEVICE);
27930 if (vcc->pop) vcc->pop(vcc,skb);
27931 else dev_kfree_skb_irq(skb);
27932 - atomic_inc(&vcc->stats->tx);
27933 + atomic_inc_unchecked(&vcc->stats->tx);
27934 wake_up(&eni_dev->tx_wait);
27935 dma_complete++;
27936 }
27937 @@ -1567,7 +1567,7 @@ tx_complete++;
27938 /*--------------------------------- entries ---------------------------------*/
27939
27940
27941 -static const char *media_name[] __devinitdata = {
27942 +static const char *media_name[] __devinitconst = {
27943 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
27944 "UTP", "05?", "06?", "07?", /* 4- 7 */
27945 "TAXI","09?", "10?", "11?", /* 8-11 */
27946 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
27947 index 86fed1b..6dc4721 100644
27948 --- a/drivers/atm/firestream.c
27949 +++ b/drivers/atm/firestream.c
27950 @@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
27951 }
27952 }
27953
27954 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27955 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27956
27957 fs_dprintk (FS_DEBUG_TXMEM, "i");
27958 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
27959 @@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
27960 #endif
27961 skb_put (skb, qe->p1 & 0xffff);
27962 ATM_SKB(skb)->vcc = atm_vcc;
27963 - atomic_inc(&atm_vcc->stats->rx);
27964 + atomic_inc_unchecked(&atm_vcc->stats->rx);
27965 __net_timestamp(skb);
27966 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
27967 atm_vcc->push (atm_vcc, skb);
27968 @@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
27969 kfree (pe);
27970 }
27971 if (atm_vcc)
27972 - atomic_inc(&atm_vcc->stats->rx_drop);
27973 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27974 break;
27975 case 0x1f: /* Reassembly abort: no buffers. */
27976 /* Silently increment error counter. */
27977 if (atm_vcc)
27978 - atomic_inc(&atm_vcc->stats->rx_drop);
27979 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27980 break;
27981 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
27982 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
27983 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
27984 index 361f5ae..7fc552d 100644
27985 --- a/drivers/atm/fore200e.c
27986 +++ b/drivers/atm/fore200e.c
27987 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
27988 #endif
27989 /* check error condition */
27990 if (*entry->status & STATUS_ERROR)
27991 - atomic_inc(&vcc->stats->tx_err);
27992 + atomic_inc_unchecked(&vcc->stats->tx_err);
27993 else
27994 - atomic_inc(&vcc->stats->tx);
27995 + atomic_inc_unchecked(&vcc->stats->tx);
27996 }
27997 }
27998
27999 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
28000 if (skb == NULL) {
28001 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
28002
28003 - atomic_inc(&vcc->stats->rx_drop);
28004 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28005 return -ENOMEM;
28006 }
28007
28008 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
28009
28010 dev_kfree_skb_any(skb);
28011
28012 - atomic_inc(&vcc->stats->rx_drop);
28013 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28014 return -ENOMEM;
28015 }
28016
28017 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
28018
28019 vcc->push(vcc, skb);
28020 - atomic_inc(&vcc->stats->rx);
28021 + atomic_inc_unchecked(&vcc->stats->rx);
28022
28023 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
28024
28025 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
28026 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
28027 fore200e->atm_dev->number,
28028 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
28029 - atomic_inc(&vcc->stats->rx_err);
28030 + atomic_inc_unchecked(&vcc->stats->rx_err);
28031 }
28032 }
28033
28034 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
28035 goto retry_here;
28036 }
28037
28038 - atomic_inc(&vcc->stats->tx_err);
28039 + atomic_inc_unchecked(&vcc->stats->tx_err);
28040
28041 fore200e->tx_sat++;
28042 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
28043 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
28044 index b182c2f..1c6fa8a 100644
28045 --- a/drivers/atm/he.c
28046 +++ b/drivers/atm/he.c
28047 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
28048
28049 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
28050 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
28051 - atomic_inc(&vcc->stats->rx_drop);
28052 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28053 goto return_host_buffers;
28054 }
28055
28056 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
28057 RBRQ_LEN_ERR(he_dev->rbrq_head)
28058 ? "LEN_ERR" : "",
28059 vcc->vpi, vcc->vci);
28060 - atomic_inc(&vcc->stats->rx_err);
28061 + atomic_inc_unchecked(&vcc->stats->rx_err);
28062 goto return_host_buffers;
28063 }
28064
28065 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
28066 vcc->push(vcc, skb);
28067 spin_lock(&he_dev->global_lock);
28068
28069 - atomic_inc(&vcc->stats->rx);
28070 + atomic_inc_unchecked(&vcc->stats->rx);
28071
28072 return_host_buffers:
28073 ++pdus_assembled;
28074 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
28075 tpd->vcc->pop(tpd->vcc, tpd->skb);
28076 else
28077 dev_kfree_skb_any(tpd->skb);
28078 - atomic_inc(&tpd->vcc->stats->tx_err);
28079 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
28080 }
28081 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
28082 return;
28083 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28084 vcc->pop(vcc, skb);
28085 else
28086 dev_kfree_skb_any(skb);
28087 - atomic_inc(&vcc->stats->tx_err);
28088 + atomic_inc_unchecked(&vcc->stats->tx_err);
28089 return -EINVAL;
28090 }
28091
28092 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28093 vcc->pop(vcc, skb);
28094 else
28095 dev_kfree_skb_any(skb);
28096 - atomic_inc(&vcc->stats->tx_err);
28097 + atomic_inc_unchecked(&vcc->stats->tx_err);
28098 return -EINVAL;
28099 }
28100 #endif
28101 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28102 vcc->pop(vcc, skb);
28103 else
28104 dev_kfree_skb_any(skb);
28105 - atomic_inc(&vcc->stats->tx_err);
28106 + atomic_inc_unchecked(&vcc->stats->tx_err);
28107 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28108 return -ENOMEM;
28109 }
28110 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28111 vcc->pop(vcc, skb);
28112 else
28113 dev_kfree_skb_any(skb);
28114 - atomic_inc(&vcc->stats->tx_err);
28115 + atomic_inc_unchecked(&vcc->stats->tx_err);
28116 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28117 return -ENOMEM;
28118 }
28119 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
28120 __enqueue_tpd(he_dev, tpd, cid);
28121 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28122
28123 - atomic_inc(&vcc->stats->tx);
28124 + atomic_inc_unchecked(&vcc->stats->tx);
28125
28126 return 0;
28127 }
28128 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
28129 index 75fd691..2d20b14 100644
28130 --- a/drivers/atm/horizon.c
28131 +++ b/drivers/atm/horizon.c
28132 @@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
28133 {
28134 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
28135 // VC layer stats
28136 - atomic_inc(&vcc->stats->rx);
28137 + atomic_inc_unchecked(&vcc->stats->rx);
28138 __net_timestamp(skb);
28139 // end of our responsibility
28140 vcc->push (vcc, skb);
28141 @@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
28142 dev->tx_iovec = NULL;
28143
28144 // VC layer stats
28145 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
28146 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
28147
28148 // free the skb
28149 hrz_kfree_skb (skb);
28150 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
28151 index 1c05212..c28e200 100644
28152 --- a/drivers/atm/idt77252.c
28153 +++ b/drivers/atm/idt77252.c
28154 @@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
28155 else
28156 dev_kfree_skb(skb);
28157
28158 - atomic_inc(&vcc->stats->tx);
28159 + atomic_inc_unchecked(&vcc->stats->tx);
28160 }
28161
28162 atomic_dec(&scq->used);
28163 @@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28164 if ((sb = dev_alloc_skb(64)) == NULL) {
28165 printk("%s: Can't allocate buffers for aal0.\n",
28166 card->name);
28167 - atomic_add(i, &vcc->stats->rx_drop);
28168 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
28169 break;
28170 }
28171 if (!atm_charge(vcc, sb->truesize)) {
28172 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
28173 card->name);
28174 - atomic_add(i - 1, &vcc->stats->rx_drop);
28175 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
28176 dev_kfree_skb(sb);
28177 break;
28178 }
28179 @@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28180 ATM_SKB(sb)->vcc = vcc;
28181 __net_timestamp(sb);
28182 vcc->push(vcc, sb);
28183 - atomic_inc(&vcc->stats->rx);
28184 + atomic_inc_unchecked(&vcc->stats->rx);
28185
28186 cell += ATM_CELL_PAYLOAD;
28187 }
28188 @@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28189 "(CDC: %08x)\n",
28190 card->name, len, rpp->len, readl(SAR_REG_CDC));
28191 recycle_rx_pool_skb(card, rpp);
28192 - atomic_inc(&vcc->stats->rx_err);
28193 + atomic_inc_unchecked(&vcc->stats->rx_err);
28194 return;
28195 }
28196 if (stat & SAR_RSQE_CRC) {
28197 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
28198 recycle_rx_pool_skb(card, rpp);
28199 - atomic_inc(&vcc->stats->rx_err);
28200 + atomic_inc_unchecked(&vcc->stats->rx_err);
28201 return;
28202 }
28203 if (skb_queue_len(&rpp->queue) > 1) {
28204 @@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28205 RXPRINTK("%s: Can't alloc RX skb.\n",
28206 card->name);
28207 recycle_rx_pool_skb(card, rpp);
28208 - atomic_inc(&vcc->stats->rx_err);
28209 + atomic_inc_unchecked(&vcc->stats->rx_err);
28210 return;
28211 }
28212 if (!atm_charge(vcc, skb->truesize)) {
28213 @@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28214 __net_timestamp(skb);
28215
28216 vcc->push(vcc, skb);
28217 - atomic_inc(&vcc->stats->rx);
28218 + atomic_inc_unchecked(&vcc->stats->rx);
28219
28220 return;
28221 }
28222 @@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
28223 __net_timestamp(skb);
28224
28225 vcc->push(vcc, skb);
28226 - atomic_inc(&vcc->stats->rx);
28227 + atomic_inc_unchecked(&vcc->stats->rx);
28228
28229 if (skb->truesize > SAR_FB_SIZE_3)
28230 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
28231 @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
28232 if (vcc->qos.aal != ATM_AAL0) {
28233 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
28234 card->name, vpi, vci);
28235 - atomic_inc(&vcc->stats->rx_drop);
28236 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28237 goto drop;
28238 }
28239
28240 if ((sb = dev_alloc_skb(64)) == NULL) {
28241 printk("%s: Can't allocate buffers for AAL0.\n",
28242 card->name);
28243 - atomic_inc(&vcc->stats->rx_err);
28244 + atomic_inc_unchecked(&vcc->stats->rx_err);
28245 goto drop;
28246 }
28247
28248 @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
28249 ATM_SKB(sb)->vcc = vcc;
28250 __net_timestamp(sb);
28251 vcc->push(vcc, sb);
28252 - atomic_inc(&vcc->stats->rx);
28253 + atomic_inc_unchecked(&vcc->stats->rx);
28254
28255 drop:
28256 skb_pull(queue, 64);
28257 @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28258
28259 if (vc == NULL) {
28260 printk("%s: NULL connection in send().\n", card->name);
28261 - atomic_inc(&vcc->stats->tx_err);
28262 + atomic_inc_unchecked(&vcc->stats->tx_err);
28263 dev_kfree_skb(skb);
28264 return -EINVAL;
28265 }
28266 if (!test_bit(VCF_TX, &vc->flags)) {
28267 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
28268 - atomic_inc(&vcc->stats->tx_err);
28269 + atomic_inc_unchecked(&vcc->stats->tx_err);
28270 dev_kfree_skb(skb);
28271 return -EINVAL;
28272 }
28273 @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28274 break;
28275 default:
28276 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
28277 - atomic_inc(&vcc->stats->tx_err);
28278 + atomic_inc_unchecked(&vcc->stats->tx_err);
28279 dev_kfree_skb(skb);
28280 return -EINVAL;
28281 }
28282
28283 if (skb_shinfo(skb)->nr_frags != 0) {
28284 printk("%s: No scatter-gather yet.\n", card->name);
28285 - atomic_inc(&vcc->stats->tx_err);
28286 + atomic_inc_unchecked(&vcc->stats->tx_err);
28287 dev_kfree_skb(skb);
28288 return -EINVAL;
28289 }
28290 @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
28291
28292 err = queue_skb(card, vc, skb, oam);
28293 if (err) {
28294 - atomic_inc(&vcc->stats->tx_err);
28295 + atomic_inc_unchecked(&vcc->stats->tx_err);
28296 dev_kfree_skb(skb);
28297 return err;
28298 }
28299 @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
28300 skb = dev_alloc_skb(64);
28301 if (!skb) {
28302 printk("%s: Out of memory in send_oam().\n", card->name);
28303 - atomic_inc(&vcc->stats->tx_err);
28304 + atomic_inc_unchecked(&vcc->stats->tx_err);
28305 return -ENOMEM;
28306 }
28307 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
28308 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
28309 index d438601..8b98495 100644
28310 --- a/drivers/atm/iphase.c
28311 +++ b/drivers/atm/iphase.c
28312 @@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
28313 status = (u_short) (buf_desc_ptr->desc_mode);
28314 if (status & (RX_CER | RX_PTE | RX_OFL))
28315 {
28316 - atomic_inc(&vcc->stats->rx_err);
28317 + atomic_inc_unchecked(&vcc->stats->rx_err);
28318 IF_ERR(printk("IA: bad packet, dropping it");)
28319 if (status & RX_CER) {
28320 IF_ERR(printk(" cause: packet CRC error\n");)
28321 @@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
28322 len = dma_addr - buf_addr;
28323 if (len > iadev->rx_buf_sz) {
28324 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
28325 - atomic_inc(&vcc->stats->rx_err);
28326 + atomic_inc_unchecked(&vcc->stats->rx_err);
28327 goto out_free_desc;
28328 }
28329
28330 @@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28331 ia_vcc = INPH_IA_VCC(vcc);
28332 if (ia_vcc == NULL)
28333 {
28334 - atomic_inc(&vcc->stats->rx_err);
28335 + atomic_inc_unchecked(&vcc->stats->rx_err);
28336 atm_return(vcc, skb->truesize);
28337 dev_kfree_skb_any(skb);
28338 goto INCR_DLE;
28339 @@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28340 if ((length > iadev->rx_buf_sz) || (length >
28341 (skb->len - sizeof(struct cpcs_trailer))))
28342 {
28343 - atomic_inc(&vcc->stats->rx_err);
28344 + atomic_inc_unchecked(&vcc->stats->rx_err);
28345 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
28346 length, skb->len);)
28347 atm_return(vcc, skb->truesize);
28348 @@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
28349
28350 IF_RX(printk("rx_dle_intr: skb push");)
28351 vcc->push(vcc,skb);
28352 - atomic_inc(&vcc->stats->rx);
28353 + atomic_inc_unchecked(&vcc->stats->rx);
28354 iadev->rx_pkt_cnt++;
28355 }
28356 INCR_DLE:
28357 @@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
28358 {
28359 struct k_sonet_stats *stats;
28360 stats = &PRIV(_ia_dev[board])->sonet_stats;
28361 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
28362 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
28363 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
28364 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
28365 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
28366 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
28367 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
28368 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
28369 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
28370 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
28371 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
28372 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
28373 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
28374 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
28375 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
28376 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
28377 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
28378 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
28379 }
28380 ia_cmds.status = 0;
28381 break;
28382 @@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
28383 if ((desc == 0) || (desc > iadev->num_tx_desc))
28384 {
28385 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
28386 - atomic_inc(&vcc->stats->tx);
28387 + atomic_inc_unchecked(&vcc->stats->tx);
28388 if (vcc->pop)
28389 vcc->pop(vcc, skb);
28390 else
28391 @@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
28392 ATM_DESC(skb) = vcc->vci;
28393 skb_queue_tail(&iadev->tx_dma_q, skb);
28394
28395 - atomic_inc(&vcc->stats->tx);
28396 + atomic_inc_unchecked(&vcc->stats->tx);
28397 iadev->tx_pkt_cnt++;
28398 /* Increment transaction counter */
28399 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
28400
28401 #if 0
28402 /* add flow control logic */
28403 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
28404 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
28405 if (iavcc->vc_desc_cnt > 10) {
28406 vcc->tx_quota = vcc->tx_quota * 3 / 4;
28407 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
28408 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
28409 index 68c7588..7036683 100644
28410 --- a/drivers/atm/lanai.c
28411 +++ b/drivers/atm/lanai.c
28412 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
28413 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
28414 lanai_endtx(lanai, lvcc);
28415 lanai_free_skb(lvcc->tx.atmvcc, skb);
28416 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
28417 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
28418 }
28419
28420 /* Try to fill the buffer - don't call unless there is backlog */
28421 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
28422 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
28423 __net_timestamp(skb);
28424 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
28425 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
28426 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
28427 out:
28428 lvcc->rx.buf.ptr = end;
28429 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
28430 @@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28431 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
28432 "vcc %d\n", lanai->number, (unsigned int) s, vci);
28433 lanai->stats.service_rxnotaal5++;
28434 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28435 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28436 return 0;
28437 }
28438 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
28439 @@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28440 int bytes;
28441 read_unlock(&vcc_sklist_lock);
28442 DPRINTK("got trashed rx pdu on vci %d\n", vci);
28443 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28444 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28445 lvcc->stats.x.aal5.service_trash++;
28446 bytes = (SERVICE_GET_END(s) * 16) -
28447 (((unsigned long) lvcc->rx.buf.ptr) -
28448 @@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28449 }
28450 if (s & SERVICE_STREAM) {
28451 read_unlock(&vcc_sklist_lock);
28452 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28453 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28454 lvcc->stats.x.aal5.service_stream++;
28455 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
28456 "PDU on VCI %d!\n", lanai->number, vci);
28457 @@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
28458 return 0;
28459 }
28460 DPRINTK("got rx crc error on vci %d\n", vci);
28461 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28462 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28463 lvcc->stats.x.aal5.service_rxcrc++;
28464 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
28465 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
28466 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
28467 index 1c70c45..300718d 100644
28468 --- a/drivers/atm/nicstar.c
28469 +++ b/drivers/atm/nicstar.c
28470 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28471 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
28472 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
28473 card->index);
28474 - atomic_inc(&vcc->stats->tx_err);
28475 + atomic_inc_unchecked(&vcc->stats->tx_err);
28476 dev_kfree_skb_any(skb);
28477 return -EINVAL;
28478 }
28479 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28480 if (!vc->tx) {
28481 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
28482 card->index);
28483 - atomic_inc(&vcc->stats->tx_err);
28484 + atomic_inc_unchecked(&vcc->stats->tx_err);
28485 dev_kfree_skb_any(skb);
28486 return -EINVAL;
28487 }
28488 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28489 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
28490 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
28491 card->index);
28492 - atomic_inc(&vcc->stats->tx_err);
28493 + atomic_inc_unchecked(&vcc->stats->tx_err);
28494 dev_kfree_skb_any(skb);
28495 return -EINVAL;
28496 }
28497
28498 if (skb_shinfo(skb)->nr_frags != 0) {
28499 printk("nicstar%d: No scatter-gather yet.\n", card->index);
28500 - atomic_inc(&vcc->stats->tx_err);
28501 + atomic_inc_unchecked(&vcc->stats->tx_err);
28502 dev_kfree_skb_any(skb);
28503 return -EINVAL;
28504 }
28505 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
28506 }
28507
28508 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
28509 - atomic_inc(&vcc->stats->tx_err);
28510 + atomic_inc_unchecked(&vcc->stats->tx_err);
28511 dev_kfree_skb_any(skb);
28512 return -EIO;
28513 }
28514 - atomic_inc(&vcc->stats->tx);
28515 + atomic_inc_unchecked(&vcc->stats->tx);
28516
28517 return 0;
28518 }
28519 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28520 printk
28521 ("nicstar%d: Can't allocate buffers for aal0.\n",
28522 card->index);
28523 - atomic_add(i, &vcc->stats->rx_drop);
28524 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
28525 break;
28526 }
28527 if (!atm_charge(vcc, sb->truesize)) {
28528 RXPRINTK
28529 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
28530 card->index);
28531 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
28532 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
28533 dev_kfree_skb_any(sb);
28534 break;
28535 }
28536 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28537 ATM_SKB(sb)->vcc = vcc;
28538 __net_timestamp(sb);
28539 vcc->push(vcc, sb);
28540 - atomic_inc(&vcc->stats->rx);
28541 + atomic_inc_unchecked(&vcc->stats->rx);
28542 cell += ATM_CELL_PAYLOAD;
28543 }
28544
28545 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28546 if (iovb == NULL) {
28547 printk("nicstar%d: Out of iovec buffers.\n",
28548 card->index);
28549 - atomic_inc(&vcc->stats->rx_drop);
28550 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28551 recycle_rx_buf(card, skb);
28552 return;
28553 }
28554 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28555 small or large buffer itself. */
28556 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
28557 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
28558 - atomic_inc(&vcc->stats->rx_err);
28559 + atomic_inc_unchecked(&vcc->stats->rx_err);
28560 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28561 NS_MAX_IOVECS);
28562 NS_PRV_IOVCNT(iovb) = 0;
28563 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28564 ("nicstar%d: Expected a small buffer, and this is not one.\n",
28565 card->index);
28566 which_list(card, skb);
28567 - atomic_inc(&vcc->stats->rx_err);
28568 + atomic_inc_unchecked(&vcc->stats->rx_err);
28569 recycle_rx_buf(card, skb);
28570 vc->rx_iov = NULL;
28571 recycle_iov_buf(card, iovb);
28572 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28573 ("nicstar%d: Expected a large buffer, and this is not one.\n",
28574 card->index);
28575 which_list(card, skb);
28576 - atomic_inc(&vcc->stats->rx_err);
28577 + atomic_inc_unchecked(&vcc->stats->rx_err);
28578 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28579 NS_PRV_IOVCNT(iovb));
28580 vc->rx_iov = NULL;
28581 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28582 printk(" - PDU size mismatch.\n");
28583 else
28584 printk(".\n");
28585 - atomic_inc(&vcc->stats->rx_err);
28586 + atomic_inc_unchecked(&vcc->stats->rx_err);
28587 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28588 NS_PRV_IOVCNT(iovb));
28589 vc->rx_iov = NULL;
28590 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28591 /* skb points to a small buffer */
28592 if (!atm_charge(vcc, skb->truesize)) {
28593 push_rxbufs(card, skb);
28594 - atomic_inc(&vcc->stats->rx_drop);
28595 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28596 } else {
28597 skb_put(skb, len);
28598 dequeue_sm_buf(card, skb);
28599 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28600 ATM_SKB(skb)->vcc = vcc;
28601 __net_timestamp(skb);
28602 vcc->push(vcc, skb);
28603 - atomic_inc(&vcc->stats->rx);
28604 + atomic_inc_unchecked(&vcc->stats->rx);
28605 }
28606 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
28607 struct sk_buff *sb;
28608 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28609 if (len <= NS_SMBUFSIZE) {
28610 if (!atm_charge(vcc, sb->truesize)) {
28611 push_rxbufs(card, sb);
28612 - atomic_inc(&vcc->stats->rx_drop);
28613 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28614 } else {
28615 skb_put(sb, len);
28616 dequeue_sm_buf(card, sb);
28617 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28618 ATM_SKB(sb)->vcc = vcc;
28619 __net_timestamp(sb);
28620 vcc->push(vcc, sb);
28621 - atomic_inc(&vcc->stats->rx);
28622 + atomic_inc_unchecked(&vcc->stats->rx);
28623 }
28624
28625 push_rxbufs(card, skb);
28626 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28627
28628 if (!atm_charge(vcc, skb->truesize)) {
28629 push_rxbufs(card, skb);
28630 - atomic_inc(&vcc->stats->rx_drop);
28631 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28632 } else {
28633 dequeue_lg_buf(card, skb);
28634 #ifdef NS_USE_DESTRUCTORS
28635 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28636 ATM_SKB(skb)->vcc = vcc;
28637 __net_timestamp(skb);
28638 vcc->push(vcc, skb);
28639 - atomic_inc(&vcc->stats->rx);
28640 + atomic_inc_unchecked(&vcc->stats->rx);
28641 }
28642
28643 push_rxbufs(card, sb);
28644 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28645 printk
28646 ("nicstar%d: Out of huge buffers.\n",
28647 card->index);
28648 - atomic_inc(&vcc->stats->rx_drop);
28649 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28650 recycle_iovec_rx_bufs(card,
28651 (struct iovec *)
28652 iovb->data,
28653 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28654 card->hbpool.count++;
28655 } else
28656 dev_kfree_skb_any(hb);
28657 - atomic_inc(&vcc->stats->rx_drop);
28658 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28659 } else {
28660 /* Copy the small buffer to the huge buffer */
28661 sb = (struct sk_buff *)iov->iov_base;
28662 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
28663 #endif /* NS_USE_DESTRUCTORS */
28664 __net_timestamp(hb);
28665 vcc->push(vcc, hb);
28666 - atomic_inc(&vcc->stats->rx);
28667 + atomic_inc_unchecked(&vcc->stats->rx);
28668 }
28669 }
28670
28671 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
28672 index 9851093..adb2b1e 100644
28673 --- a/drivers/atm/solos-pci.c
28674 +++ b/drivers/atm/solos-pci.c
28675 @@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
28676 }
28677 atm_charge(vcc, skb->truesize);
28678 vcc->push(vcc, skb);
28679 - atomic_inc(&vcc->stats->rx);
28680 + atomic_inc_unchecked(&vcc->stats->rx);
28681 break;
28682
28683 case PKT_STATUS:
28684 @@ -1009,7 +1009,7 @@ static uint32_t fpga_tx(struct solos_card *card)
28685 vcc = SKB_CB(oldskb)->vcc;
28686
28687 if (vcc) {
28688 - atomic_inc(&vcc->stats->tx);
28689 + atomic_inc_unchecked(&vcc->stats->tx);
28690 solos_pop(vcc, oldskb);
28691 } else
28692 dev_kfree_skb_irq(oldskb);
28693 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
28694 index 0215934..ce9f5b1 100644
28695 --- a/drivers/atm/suni.c
28696 +++ b/drivers/atm/suni.c
28697 @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
28698
28699
28700 #define ADD_LIMITED(s,v) \
28701 - atomic_add((v),&stats->s); \
28702 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
28703 + atomic_add_unchecked((v),&stats->s); \
28704 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
28705
28706
28707 static void suni_hz(unsigned long from_timer)
28708 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
28709 index 5120a96..e2572bd 100644
28710 --- a/drivers/atm/uPD98402.c
28711 +++ b/drivers/atm/uPD98402.c
28712 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
28713 struct sonet_stats tmp;
28714 int error = 0;
28715
28716 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
28717 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
28718 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
28719 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
28720 if (zero && !error) {
28721 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
28722
28723
28724 #define ADD_LIMITED(s,v) \
28725 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
28726 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
28727 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
28728 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
28729 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
28730 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
28731
28732
28733 static void stat_event(struct atm_dev *dev)
28734 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
28735 if (reason & uPD98402_INT_PFM) stat_event(dev);
28736 if (reason & uPD98402_INT_PCO) {
28737 (void) GET(PCOCR); /* clear interrupt cause */
28738 - atomic_add(GET(HECCT),
28739 + atomic_add_unchecked(GET(HECCT),
28740 &PRIV(dev)->sonet_stats.uncorr_hcs);
28741 }
28742 if ((reason & uPD98402_INT_RFO) &&
28743 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
28744 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
28745 uPD98402_INT_LOS),PIMR); /* enable them */
28746 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
28747 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
28748 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
28749 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
28750 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
28751 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
28752 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
28753 return 0;
28754 }
28755
28756 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
28757 index abe4e20..83c4727 100644
28758 --- a/drivers/atm/zatm.c
28759 +++ b/drivers/atm/zatm.c
28760 @@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
28761 }
28762 if (!size) {
28763 dev_kfree_skb_irq(skb);
28764 - if (vcc) atomic_inc(&vcc->stats->rx_err);
28765 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
28766 continue;
28767 }
28768 if (!atm_charge(vcc,skb->truesize)) {
28769 @@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
28770 skb->len = size;
28771 ATM_SKB(skb)->vcc = vcc;
28772 vcc->push(vcc,skb);
28773 - atomic_inc(&vcc->stats->rx);
28774 + atomic_inc_unchecked(&vcc->stats->rx);
28775 }
28776 zout(pos & 0xffff,MTA(mbx));
28777 #if 0 /* probably a stupid idea */
28778 @@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
28779 skb_queue_head(&zatm_vcc->backlog,skb);
28780 break;
28781 }
28782 - atomic_inc(&vcc->stats->tx);
28783 + atomic_inc_unchecked(&vcc->stats->tx);
28784 wake_up(&zatm_vcc->tx_wait);
28785 }
28786
28787 diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
28788 index 8493536..31adee0 100644
28789 --- a/drivers/base/devtmpfs.c
28790 +++ b/drivers/base/devtmpfs.c
28791 @@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
28792 if (!thread)
28793 return 0;
28794
28795 - err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
28796 + err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
28797 if (err)
28798 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
28799 else
28800 diff --git a/drivers/base/node.c b/drivers/base/node.c
28801 index 90aa2a1..af1a177 100644
28802 --- a/drivers/base/node.c
28803 +++ b/drivers/base/node.c
28804 @@ -592,11 +592,9 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
28805 {
28806 int n;
28807
28808 - n = nodelist_scnprintf(buf, PAGE_SIZE, node_states[state]);
28809 - if (n > 0 && PAGE_SIZE > n + 1) {
28810 - *(buf + n++) = '\n';
28811 - *(buf + n++) = '\0';
28812 - }
28813 + n = nodelist_scnprintf(buf, PAGE_SIZE-2, node_states[state]);
28814 + buf[n++] = '\n';
28815 + buf[n] = '\0';
28816 return n;
28817 }
28818
28819 diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
28820 index 2a3e581..3d6a73f 100644
28821 --- a/drivers/base/power/wakeup.c
28822 +++ b/drivers/base/power/wakeup.c
28823 @@ -30,14 +30,14 @@ bool events_check_enabled;
28824 * They need to be modified together atomically, so it's better to use one
28825 * atomic variable to hold them both.
28826 */
28827 -static atomic_t combined_event_count = ATOMIC_INIT(0);
28828 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
28829
28830 #define IN_PROGRESS_BITS (sizeof(int) * 4)
28831 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
28832
28833 static void split_counters(unsigned int *cnt, unsigned int *inpr)
28834 {
28835 - unsigned int comb = atomic_read(&combined_event_count);
28836 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
28837
28838 *cnt = (comb >> IN_PROGRESS_BITS);
28839 *inpr = comb & MAX_IN_PROGRESS;
28840 @@ -379,7 +379,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
28841 ws->last_time = ktime_get();
28842
28843 /* Increment the counter of events in progress. */
28844 - atomic_inc(&combined_event_count);
28845 + atomic_inc_unchecked(&combined_event_count);
28846 }
28847
28848 /**
28849 @@ -475,7 +475,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
28850 * Increment the counter of registered wakeup events and decrement the
28851 * couter of wakeup events in progress simultaneously.
28852 */
28853 - atomic_add(MAX_IN_PROGRESS, &combined_event_count);
28854 + atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
28855 }
28856
28857 /**
28858 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
28859 index b0f553b..77b928b 100644
28860 --- a/drivers/block/cciss.c
28861 +++ b/drivers/block/cciss.c
28862 @@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
28863 int err;
28864 u32 cp;
28865
28866 + memset(&arg64, 0, sizeof(arg64));
28867 +
28868 err = 0;
28869 err |=
28870 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
28871 @@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
28872 while (!list_empty(&h->reqQ)) {
28873 c = list_entry(h->reqQ.next, CommandList_struct, list);
28874 /* can't do anything if fifo is full */
28875 - if ((h->access.fifo_full(h))) {
28876 + if ((h->access->fifo_full(h))) {
28877 dev_warn(&h->pdev->dev, "fifo full\n");
28878 break;
28879 }
28880 @@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
28881 h->Qdepth--;
28882
28883 /* Tell the controller execute command */
28884 - h->access.submit_command(h, c);
28885 + h->access->submit_command(h, c);
28886
28887 /* Put job onto the completed Q */
28888 addQ(&h->cmpQ, c);
28889 @@ -3443,17 +3445,17 @@ startio:
28890
28891 static inline unsigned long get_next_completion(ctlr_info_t *h)
28892 {
28893 - return h->access.command_completed(h);
28894 + return h->access->command_completed(h);
28895 }
28896
28897 static inline int interrupt_pending(ctlr_info_t *h)
28898 {
28899 - return h->access.intr_pending(h);
28900 + return h->access->intr_pending(h);
28901 }
28902
28903 static inline long interrupt_not_for_us(ctlr_info_t *h)
28904 {
28905 - return ((h->access.intr_pending(h) == 0) ||
28906 + return ((h->access->intr_pending(h) == 0) ||
28907 (h->interrupts_enabled == 0));
28908 }
28909
28910 @@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
28911 u32 a;
28912
28913 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
28914 - return h->access.command_completed(h);
28915 + return h->access->command_completed(h);
28916
28917 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
28918 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
28919 @@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
28920 trans_support & CFGTBL_Trans_use_short_tags);
28921
28922 /* Change the access methods to the performant access methods */
28923 - h->access = SA5_performant_access;
28924 + h->access = &SA5_performant_access;
28925 h->transMethod = CFGTBL_Trans_Performant;
28926
28927 return;
28928 @@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
28929 if (prod_index < 0)
28930 return -ENODEV;
28931 h->product_name = products[prod_index].product_name;
28932 - h->access = *(products[prod_index].access);
28933 + h->access = products[prod_index].access;
28934
28935 if (cciss_board_disabled(h)) {
28936 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
28937 @@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
28938 }
28939
28940 /* make sure the board interrupts are off */
28941 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
28942 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
28943 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
28944 if (rc)
28945 goto clean2;
28946 @@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
28947 * fake ones to scoop up any residual completions.
28948 */
28949 spin_lock_irqsave(&h->lock, flags);
28950 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
28951 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
28952 spin_unlock_irqrestore(&h->lock, flags);
28953 free_irq(h->intr[h->intr_mode], h);
28954 rc = cciss_request_irq(h, cciss_msix_discard_completions,
28955 @@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
28956 dev_info(&h->pdev->dev, "Board READY.\n");
28957 dev_info(&h->pdev->dev,
28958 "Waiting for stale completions to drain.\n");
28959 - h->access.set_intr_mask(h, CCISS_INTR_ON);
28960 + h->access->set_intr_mask(h, CCISS_INTR_ON);
28961 msleep(10000);
28962 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
28963 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
28964
28965 rc = controller_reset_failed(h->cfgtable);
28966 if (rc)
28967 @@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
28968 cciss_scsi_setup(h);
28969
28970 /* Turn the interrupts on so we can service requests */
28971 - h->access.set_intr_mask(h, CCISS_INTR_ON);
28972 + h->access->set_intr_mask(h, CCISS_INTR_ON);
28973
28974 /* Get the firmware version */
28975 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
28976 @@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
28977 kfree(flush_buf);
28978 if (return_code != IO_OK)
28979 dev_warn(&h->pdev->dev, "Error flushing cache\n");
28980 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
28981 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
28982 free_irq(h->intr[h->intr_mode], h);
28983 }
28984
28985 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
28986 index 7fda30e..eb5dfe0 100644
28987 --- a/drivers/block/cciss.h
28988 +++ b/drivers/block/cciss.h
28989 @@ -101,7 +101,7 @@ struct ctlr_info
28990 /* information about each logical volume */
28991 drive_info_struct *drv[CISS_MAX_LUN];
28992
28993 - struct access_method access;
28994 + struct access_method *access;
28995
28996 /* queue and queue Info */
28997 struct list_head reqQ;
28998 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
28999 index 9125bbe..eede5c8 100644
29000 --- a/drivers/block/cpqarray.c
29001 +++ b/drivers/block/cpqarray.c
29002 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
29003 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
29004 goto Enomem4;
29005 }
29006 - hba[i]->access.set_intr_mask(hba[i], 0);
29007 + hba[i]->access->set_intr_mask(hba[i], 0);
29008 if (request_irq(hba[i]->intr, do_ida_intr,
29009 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
29010 {
29011 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
29012 add_timer(&hba[i]->timer);
29013
29014 /* Enable IRQ now that spinlock and rate limit timer are set up */
29015 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
29016 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
29017
29018 for(j=0; j<NWD; j++) {
29019 struct gendisk *disk = ida_gendisk[i][j];
29020 @@ -694,7 +694,7 @@ DBGINFO(
29021 for(i=0; i<NR_PRODUCTS; i++) {
29022 if (board_id == products[i].board_id) {
29023 c->product_name = products[i].product_name;
29024 - c->access = *(products[i].access);
29025 + c->access = products[i].access;
29026 break;
29027 }
29028 }
29029 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
29030 hba[ctlr]->intr = intr;
29031 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
29032 hba[ctlr]->product_name = products[j].product_name;
29033 - hba[ctlr]->access = *(products[j].access);
29034 + hba[ctlr]->access = products[j].access;
29035 hba[ctlr]->ctlr = ctlr;
29036 hba[ctlr]->board_id = board_id;
29037 hba[ctlr]->pci_dev = NULL; /* not PCI */
29038 @@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
29039
29040 while((c = h->reqQ) != NULL) {
29041 /* Can't do anything if we're busy */
29042 - if (h->access.fifo_full(h) == 0)
29043 + if (h->access->fifo_full(h) == 0)
29044 return;
29045
29046 /* Get the first entry from the request Q */
29047 @@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
29048 h->Qdepth--;
29049
29050 /* Tell the controller to do our bidding */
29051 - h->access.submit_command(h, c);
29052 + h->access->submit_command(h, c);
29053
29054 /* Get onto the completion Q */
29055 addQ(&h->cmpQ, c);
29056 @@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
29057 unsigned long flags;
29058 __u32 a,a1;
29059
29060 - istat = h->access.intr_pending(h);
29061 + istat = h->access->intr_pending(h);
29062 /* Is this interrupt for us? */
29063 if (istat == 0)
29064 return IRQ_NONE;
29065 @@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
29066 */
29067 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
29068 if (istat & FIFO_NOT_EMPTY) {
29069 - while((a = h->access.command_completed(h))) {
29070 + while((a = h->access->command_completed(h))) {
29071 a1 = a; a &= ~3;
29072 if ((c = h->cmpQ) == NULL)
29073 {
29074 @@ -1449,11 +1449,11 @@ static int sendcmd(
29075 /*
29076 * Disable interrupt
29077 */
29078 - info_p->access.set_intr_mask(info_p, 0);
29079 + info_p->access->set_intr_mask(info_p, 0);
29080 /* Make sure there is room in the command FIFO */
29081 /* Actually it should be completely empty at this time. */
29082 for (i = 200000; i > 0; i--) {
29083 - temp = info_p->access.fifo_full(info_p);
29084 + temp = info_p->access->fifo_full(info_p);
29085 if (temp != 0) {
29086 break;
29087 }
29088 @@ -1466,7 +1466,7 @@ DBG(
29089 /*
29090 * Send the cmd
29091 */
29092 - info_p->access.submit_command(info_p, c);
29093 + info_p->access->submit_command(info_p, c);
29094 complete = pollcomplete(ctlr);
29095
29096 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
29097 @@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
29098 * we check the new geometry. Then turn interrupts back on when
29099 * we're done.
29100 */
29101 - host->access.set_intr_mask(host, 0);
29102 + host->access->set_intr_mask(host, 0);
29103 getgeometry(ctlr);
29104 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
29105 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
29106
29107 for(i=0; i<NWD; i++) {
29108 struct gendisk *disk = ida_gendisk[ctlr][i];
29109 @@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
29110 /* Wait (up to 2 seconds) for a command to complete */
29111
29112 for (i = 200000; i > 0; i--) {
29113 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
29114 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
29115 if (done == 0) {
29116 udelay(10); /* a short fixed delay */
29117 } else
29118 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
29119 index be73e9d..7fbf140 100644
29120 --- a/drivers/block/cpqarray.h
29121 +++ b/drivers/block/cpqarray.h
29122 @@ -99,7 +99,7 @@ struct ctlr_info {
29123 drv_info_t drv[NWD];
29124 struct proc_dir_entry *proc;
29125
29126 - struct access_method access;
29127 + struct access_method *access;
29128
29129 cmdlist_t *reqQ;
29130 cmdlist_t *cmpQ;
29131 diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
29132 index 8d68056..e67050f 100644
29133 --- a/drivers/block/drbd/drbd_int.h
29134 +++ b/drivers/block/drbd/drbd_int.h
29135 @@ -736,7 +736,7 @@ struct drbd_request;
29136 struct drbd_epoch {
29137 struct list_head list;
29138 unsigned int barrier_nr;
29139 - atomic_t epoch_size; /* increased on every request added. */
29140 + atomic_unchecked_t epoch_size; /* increased on every request added. */
29141 atomic_t active; /* increased on every req. added, and dec on every finished. */
29142 unsigned long flags;
29143 };
29144 @@ -1108,7 +1108,7 @@ struct drbd_conf {
29145 void *int_dig_in;
29146 void *int_dig_vv;
29147 wait_queue_head_t seq_wait;
29148 - atomic_t packet_seq;
29149 + atomic_unchecked_t packet_seq;
29150 unsigned int peer_seq;
29151 spinlock_t peer_seq_lock;
29152 unsigned int minor;
29153 @@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
29154
29155 static inline void drbd_tcp_cork(struct socket *sock)
29156 {
29157 - int __user val = 1;
29158 + int val = 1;
29159 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
29160 - (char __user *)&val, sizeof(val));
29161 + (char __force_user *)&val, sizeof(val));
29162 }
29163
29164 static inline void drbd_tcp_uncork(struct socket *sock)
29165 {
29166 - int __user val = 0;
29167 + int val = 0;
29168 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
29169 - (char __user *)&val, sizeof(val));
29170 + (char __force_user *)&val, sizeof(val));
29171 }
29172
29173 static inline void drbd_tcp_nodelay(struct socket *sock)
29174 {
29175 - int __user val = 1;
29176 + int val = 1;
29177 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
29178 - (char __user *)&val, sizeof(val));
29179 + (char __force_user *)&val, sizeof(val));
29180 }
29181
29182 static inline void drbd_tcp_quickack(struct socket *sock)
29183 {
29184 - int __user val = 2;
29185 + int val = 2;
29186 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
29187 - (char __user *)&val, sizeof(val));
29188 + (char __force_user *)&val, sizeof(val));
29189 }
29190
29191 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
29192 diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
29193 index 211fc44..c5116f1 100644
29194 --- a/drivers/block/drbd/drbd_main.c
29195 +++ b/drivers/block/drbd/drbd_main.c
29196 @@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
29197 p.sector = sector;
29198 p.block_id = block_id;
29199 p.blksize = blksize;
29200 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
29201 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
29202
29203 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
29204 return false;
29205 @@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
29206 p.sector = cpu_to_be64(req->sector);
29207 p.block_id = (unsigned long)req;
29208 p.seq_num = cpu_to_be32(req->seq_num =
29209 - atomic_add_return(1, &mdev->packet_seq));
29210 + atomic_add_return_unchecked(1, &mdev->packet_seq));
29211
29212 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
29213
29214 @@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
29215 atomic_set(&mdev->unacked_cnt, 0);
29216 atomic_set(&mdev->local_cnt, 0);
29217 atomic_set(&mdev->net_cnt, 0);
29218 - atomic_set(&mdev->packet_seq, 0);
29219 + atomic_set_unchecked(&mdev->packet_seq, 0);
29220 atomic_set(&mdev->pp_in_use, 0);
29221 atomic_set(&mdev->pp_in_use_by_net, 0);
29222 atomic_set(&mdev->rs_sect_in, 0);
29223 @@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
29224 mdev->receiver.t_state);
29225
29226 /* no need to lock it, I'm the only thread alive */
29227 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
29228 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
29229 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
29230 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
29231 mdev->al_writ_cnt =
29232 mdev->bm_writ_cnt =
29233 mdev->read_cnt =
29234 diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
29235 index 946166e..356b39a 100644
29236 --- a/drivers/block/drbd/drbd_nl.c
29237 +++ b/drivers/block/drbd/drbd_nl.c
29238 @@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
29239 module_put(THIS_MODULE);
29240 }
29241
29242 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
29243 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
29244
29245 static unsigned short *
29246 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
29247 @@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
29248 cn_reply->id.idx = CN_IDX_DRBD;
29249 cn_reply->id.val = CN_VAL_DRBD;
29250
29251 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29252 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29253 cn_reply->ack = 0; /* not used here. */
29254 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29255 (int)((char *)tl - (char *)reply->tag_list);
29256 @@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
29257 cn_reply->id.idx = CN_IDX_DRBD;
29258 cn_reply->id.val = CN_VAL_DRBD;
29259
29260 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29261 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29262 cn_reply->ack = 0; /* not used here. */
29263 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29264 (int)((char *)tl - (char *)reply->tag_list);
29265 @@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
29266 cn_reply->id.idx = CN_IDX_DRBD;
29267 cn_reply->id.val = CN_VAL_DRBD;
29268
29269 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
29270 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
29271 cn_reply->ack = 0; // not used here.
29272 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29273 (int)((char*)tl - (char*)reply->tag_list);
29274 @@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
29275 cn_reply->id.idx = CN_IDX_DRBD;
29276 cn_reply->id.val = CN_VAL_DRBD;
29277
29278 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29279 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29280 cn_reply->ack = 0; /* not used here. */
29281 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29282 (int)((char *)tl - (char *)reply->tag_list);
29283 diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
29284 index 43beaca..4a5b1dd 100644
29285 --- a/drivers/block/drbd/drbd_receiver.c
29286 +++ b/drivers/block/drbd/drbd_receiver.c
29287 @@ -894,7 +894,7 @@ retry:
29288 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
29289 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
29290
29291 - atomic_set(&mdev->packet_seq, 0);
29292 + atomic_set_unchecked(&mdev->packet_seq, 0);
29293 mdev->peer_seq = 0;
29294
29295 drbd_thread_start(&mdev->asender);
29296 @@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
29297 do {
29298 next_epoch = NULL;
29299
29300 - epoch_size = atomic_read(&epoch->epoch_size);
29301 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
29302
29303 switch (ev & ~EV_CLEANUP) {
29304 case EV_PUT:
29305 @@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
29306 rv = FE_DESTROYED;
29307 } else {
29308 epoch->flags = 0;
29309 - atomic_set(&epoch->epoch_size, 0);
29310 + atomic_set_unchecked(&epoch->epoch_size, 0);
29311 /* atomic_set(&epoch->active, 0); is already zero */
29312 if (rv == FE_STILL_LIVE)
29313 rv = FE_RECYCLED;
29314 @@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
29315 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
29316 drbd_flush(mdev);
29317
29318 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
29319 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
29320 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
29321 if (epoch)
29322 break;
29323 }
29324
29325 epoch = mdev->current_epoch;
29326 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
29327 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
29328
29329 D_ASSERT(atomic_read(&epoch->active) == 0);
29330 D_ASSERT(epoch->flags == 0);
29331 @@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
29332 }
29333
29334 epoch->flags = 0;
29335 - atomic_set(&epoch->epoch_size, 0);
29336 + atomic_set_unchecked(&epoch->epoch_size, 0);
29337 atomic_set(&epoch->active, 0);
29338
29339 spin_lock(&mdev->epoch_lock);
29340 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
29341 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
29342 list_add(&epoch->list, &mdev->current_epoch->list);
29343 mdev->current_epoch = epoch;
29344 mdev->epochs++;
29345 @@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
29346 spin_unlock(&mdev->peer_seq_lock);
29347
29348 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
29349 - atomic_inc(&mdev->current_epoch->epoch_size);
29350 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
29351 return drbd_drain_block(mdev, data_size);
29352 }
29353
29354 @@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
29355
29356 spin_lock(&mdev->epoch_lock);
29357 e->epoch = mdev->current_epoch;
29358 - atomic_inc(&e->epoch->epoch_size);
29359 + atomic_inc_unchecked(&e->epoch->epoch_size);
29360 atomic_inc(&e->epoch->active);
29361 spin_unlock(&mdev->epoch_lock);
29362
29363 @@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
29364 D_ASSERT(list_empty(&mdev->done_ee));
29365
29366 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
29367 - atomic_set(&mdev->current_epoch->epoch_size, 0);
29368 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
29369 D_ASSERT(list_empty(&mdev->current_epoch->list));
29370 }
29371
29372 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
29373 index bbca966..65e37dd 100644
29374 --- a/drivers/block/loop.c
29375 +++ b/drivers/block/loop.c
29376 @@ -226,7 +226,7 @@ static int __do_lo_send_write(struct file *file,
29377 mm_segment_t old_fs = get_fs();
29378
29379 set_fs(get_ds());
29380 - bw = file->f_op->write(file, buf, len, &pos);
29381 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
29382 set_fs(old_fs);
29383 if (likely(bw == len))
29384 return 0;
29385 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
29386 index ee94686..3e09ad3 100644
29387 --- a/drivers/char/Kconfig
29388 +++ b/drivers/char/Kconfig
29389 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
29390
29391 config DEVKMEM
29392 bool "/dev/kmem virtual device support"
29393 - default y
29394 + default n
29395 + depends on !GRKERNSEC_KMEM
29396 help
29397 Say Y here if you want to support the /dev/kmem device. The
29398 /dev/kmem device is rarely used, but can be used for certain
29399 @@ -581,6 +582,7 @@ config DEVPORT
29400 bool
29401 depends on !M68K
29402 depends on ISA || PCI
29403 + depends on !GRKERNSEC_KMEM
29404 default y
29405
29406 source "drivers/s390/char/Kconfig"
29407 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
29408 index 2e04433..22afc64 100644
29409 --- a/drivers/char/agp/frontend.c
29410 +++ b/drivers/char/agp/frontend.c
29411 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
29412 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
29413 return -EFAULT;
29414
29415 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
29416 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
29417 return -EFAULT;
29418
29419 client = agp_find_client_by_pid(reserve.pid);
29420 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
29421 index 21cb980..f15107c 100644
29422 --- a/drivers/char/genrtc.c
29423 +++ b/drivers/char/genrtc.c
29424 @@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct file *file,
29425 switch (cmd) {
29426
29427 case RTC_PLL_GET:
29428 + memset(&pll, 0, sizeof(pll));
29429 if (get_rtc_pll(&pll))
29430 return -EINVAL;
29431 else
29432 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
29433 index dfd7876..c0b0885 100644
29434 --- a/drivers/char/hpet.c
29435 +++ b/drivers/char/hpet.c
29436 @@ -571,7 +571,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
29437 }
29438
29439 static int
29440 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
29441 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
29442 struct hpet_info *info)
29443 {
29444 struct hpet_timer __iomem *timer;
29445 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
29446 index 2c29942..604c5ba 100644
29447 --- a/drivers/char/ipmi/ipmi_msghandler.c
29448 +++ b/drivers/char/ipmi/ipmi_msghandler.c
29449 @@ -420,7 +420,7 @@ struct ipmi_smi {
29450 struct proc_dir_entry *proc_dir;
29451 char proc_dir_name[10];
29452
29453 - atomic_t stats[IPMI_NUM_STATS];
29454 + atomic_unchecked_t stats[IPMI_NUM_STATS];
29455
29456 /*
29457 * run_to_completion duplicate of smb_info, smi_info
29458 @@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
29459
29460
29461 #define ipmi_inc_stat(intf, stat) \
29462 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
29463 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
29464 #define ipmi_get_stat(intf, stat) \
29465 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
29466 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
29467
29468 static int is_lan_addr(struct ipmi_addr *addr)
29469 {
29470 @@ -2884,7 +2884,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
29471 INIT_LIST_HEAD(&intf->cmd_rcvrs);
29472 init_waitqueue_head(&intf->waitq);
29473 for (i = 0; i < IPMI_NUM_STATS; i++)
29474 - atomic_set(&intf->stats[i], 0);
29475 + atomic_set_unchecked(&intf->stats[i], 0);
29476
29477 intf->proc_dir = NULL;
29478
29479 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
29480 index 1e638ff..a869ef5 100644
29481 --- a/drivers/char/ipmi/ipmi_si_intf.c
29482 +++ b/drivers/char/ipmi/ipmi_si_intf.c
29483 @@ -275,7 +275,7 @@ struct smi_info {
29484 unsigned char slave_addr;
29485
29486 /* Counters and things for the proc filesystem. */
29487 - atomic_t stats[SI_NUM_STATS];
29488 + atomic_unchecked_t stats[SI_NUM_STATS];
29489
29490 struct task_struct *thread;
29491
29492 @@ -284,9 +284,9 @@ struct smi_info {
29493 };
29494
29495 #define smi_inc_stat(smi, stat) \
29496 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
29497 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
29498 #define smi_get_stat(smi, stat) \
29499 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
29500 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
29501
29502 #define SI_MAX_PARMS 4
29503
29504 @@ -3209,7 +3209,7 @@ static int try_smi_init(struct smi_info *new_smi)
29505 atomic_set(&new_smi->req_events, 0);
29506 new_smi->run_to_completion = 0;
29507 for (i = 0; i < SI_NUM_STATS; i++)
29508 - atomic_set(&new_smi->stats[i], 0);
29509 + atomic_set_unchecked(&new_smi->stats[i], 0);
29510
29511 new_smi->interrupt_disabled = 1;
29512 atomic_set(&new_smi->stop_operation, 0);
29513 diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
29514 index 47ff7e4..0c7d340 100644
29515 --- a/drivers/char/mbcs.c
29516 +++ b/drivers/char/mbcs.c
29517 @@ -799,7 +799,7 @@ static int mbcs_remove(struct cx_dev *dev)
29518 return 0;
29519 }
29520
29521 -static const struct cx_device_id __devinitdata mbcs_id_table[] = {
29522 +static const struct cx_device_id __devinitconst mbcs_id_table[] = {
29523 {
29524 .part_num = MBCS_PART_NUM,
29525 .mfg_num = MBCS_MFG_NUM,
29526 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
29527 index d6e9d08..4493e89 100644
29528 --- a/drivers/char/mem.c
29529 +++ b/drivers/char/mem.c
29530 @@ -18,6 +18,7 @@
29531 #include <linux/raw.h>
29532 #include <linux/tty.h>
29533 #include <linux/capability.h>
29534 +#include <linux/security.h>
29535 #include <linux/ptrace.h>
29536 #include <linux/device.h>
29537 #include <linux/highmem.h>
29538 @@ -35,6 +36,10 @@
29539 # include <linux/efi.h>
29540 #endif
29541
29542 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
29543 +extern const struct file_operations grsec_fops;
29544 +#endif
29545 +
29546 static inline unsigned long size_inside_page(unsigned long start,
29547 unsigned long size)
29548 {
29549 @@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29550
29551 while (cursor < to) {
29552 if (!devmem_is_allowed(pfn)) {
29553 +#ifdef CONFIG_GRKERNSEC_KMEM
29554 + gr_handle_mem_readwrite(from, to);
29555 +#else
29556 printk(KERN_INFO
29557 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
29558 current->comm, from, to);
29559 +#endif
29560 return 0;
29561 }
29562 cursor += PAGE_SIZE;
29563 @@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29564 }
29565 return 1;
29566 }
29567 +#elif defined(CONFIG_GRKERNSEC_KMEM)
29568 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29569 +{
29570 + return 0;
29571 +}
29572 #else
29573 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29574 {
29575 @@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
29576
29577 while (count > 0) {
29578 unsigned long remaining;
29579 + char *temp;
29580
29581 sz = size_inside_page(p, count);
29582
29583 @@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
29584 if (!ptr)
29585 return -EFAULT;
29586
29587 - remaining = copy_to_user(buf, ptr, sz);
29588 +#ifdef CONFIG_PAX_USERCOPY
29589 + temp = kmalloc(sz, GFP_KERNEL);
29590 + if (!temp) {
29591 + unxlate_dev_mem_ptr(p, ptr);
29592 + return -ENOMEM;
29593 + }
29594 + memcpy(temp, ptr, sz);
29595 +#else
29596 + temp = ptr;
29597 +#endif
29598 +
29599 + remaining = copy_to_user(buf, temp, sz);
29600 +
29601 +#ifdef CONFIG_PAX_USERCOPY
29602 + kfree(temp);
29603 +#endif
29604 +
29605 unxlate_dev_mem_ptr(p, ptr);
29606 if (remaining)
29607 return -EFAULT;
29608 @@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29609 size_t count, loff_t *ppos)
29610 {
29611 unsigned long p = *ppos;
29612 - ssize_t low_count, read, sz;
29613 + ssize_t low_count, read, sz, err = 0;
29614 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
29615 - int err = 0;
29616
29617 read = 0;
29618 if (p < (unsigned long) high_memory) {
29619 @@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29620 }
29621 #endif
29622 while (low_count > 0) {
29623 + char *temp;
29624 +
29625 sz = size_inside_page(p, low_count);
29626
29627 /*
29628 @@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
29629 */
29630 kbuf = xlate_dev_kmem_ptr((char *)p);
29631
29632 - if (copy_to_user(buf, kbuf, sz))
29633 +#ifdef CONFIG_PAX_USERCOPY
29634 + temp = kmalloc(sz, GFP_KERNEL);
29635 + if (!temp)
29636 + return -ENOMEM;
29637 + memcpy(temp, kbuf, sz);
29638 +#else
29639 + temp = kbuf;
29640 +#endif
29641 +
29642 + err = copy_to_user(buf, temp, sz);
29643 +
29644 +#ifdef CONFIG_PAX_USERCOPY
29645 + kfree(temp);
29646 +#endif
29647 +
29648 + if (err)
29649 return -EFAULT;
29650 buf += sz;
29651 p += sz;
29652 @@ -867,6 +914,9 @@ static const struct memdev {
29653 #ifdef CONFIG_CRASH_DUMP
29654 [12] = { "oldmem", 0, &oldmem_fops, NULL },
29655 #endif
29656 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
29657 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
29658 +#endif
29659 };
29660
29661 static int memory_open(struct inode *inode, struct file *filp)
29662 diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
29663 index 9df78e2..01ba9ae 100644
29664 --- a/drivers/char/nvram.c
29665 +++ b/drivers/char/nvram.c
29666 @@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
29667
29668 spin_unlock_irq(&rtc_lock);
29669
29670 - if (copy_to_user(buf, contents, tmp - contents))
29671 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
29672 return -EFAULT;
29673
29674 *ppos = i;
29675 diff --git a/drivers/char/random.c b/drivers/char/random.c
29676 index 4ec04a7..9918387 100644
29677 --- a/drivers/char/random.c
29678 +++ b/drivers/char/random.c
29679 @@ -261,8 +261,13 @@
29680 /*
29681 * Configuration information
29682 */
29683 +#ifdef CONFIG_GRKERNSEC_RANDNET
29684 +#define INPUT_POOL_WORDS 512
29685 +#define OUTPUT_POOL_WORDS 128
29686 +#else
29687 #define INPUT_POOL_WORDS 128
29688 #define OUTPUT_POOL_WORDS 32
29689 +#endif
29690 #define SEC_XFER_SIZE 512
29691 #define EXTRACT_SIZE 10
29692
29693 @@ -300,10 +305,17 @@ static struct poolinfo {
29694 int poolwords;
29695 int tap1, tap2, tap3, tap4, tap5;
29696 } poolinfo_table[] = {
29697 +#ifdef CONFIG_GRKERNSEC_RANDNET
29698 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
29699 + { 512, 411, 308, 208, 104, 1 },
29700 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
29701 + { 128, 103, 76, 51, 25, 1 },
29702 +#else
29703 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
29704 { 128, 103, 76, 51, 25, 1 },
29705 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
29706 { 32, 26, 20, 14, 7, 1 },
29707 +#endif
29708 #if 0
29709 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
29710 { 2048, 1638, 1231, 819, 411, 1 },
29711 @@ -726,6 +738,17 @@ void add_disk_randomness(struct gendisk *disk)
29712 }
29713 #endif
29714
29715 +#ifdef CONFIG_PAX_LATENT_ENTROPY
29716 +u64 latent_entropy;
29717 +
29718 +__init void transfer_latent_entropy(void)
29719 +{
29720 + mix_pool_bytes(&input_pool, &latent_entropy, sizeof(latent_entropy));
29721 + mix_pool_bytes(&nonblocking_pool, &latent_entropy, sizeof(latent_entropy));
29722 +// printk(KERN_INFO "PAX: transferring latent entropy: %16llx\n", latent_entropy);
29723 +}
29724 +#endif
29725 +
29726 /*********************************************************************
29727 *
29728 * Entropy extraction routines
29729 @@ -913,7 +936,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
29730
29731 extract_buf(r, tmp);
29732 i = min_t(int, nbytes, EXTRACT_SIZE);
29733 - if (copy_to_user(buf, tmp, i)) {
29734 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
29735 ret = -EFAULT;
29736 break;
29737 }
29738 @@ -1238,7 +1261,7 @@ EXPORT_SYMBOL(generate_random_uuid);
29739 #include <linux/sysctl.h>
29740
29741 static int min_read_thresh = 8, min_write_thresh;
29742 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
29743 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
29744 static int max_write_thresh = INPUT_POOL_WORDS * 32;
29745 static char sysctl_bootid[16];
29746
29747 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
29748 index 45713f0..8286d21 100644
29749 --- a/drivers/char/sonypi.c
29750 +++ b/drivers/char/sonypi.c
29751 @@ -54,6 +54,7 @@
29752
29753 #include <asm/uaccess.h>
29754 #include <asm/io.h>
29755 +#include <asm/local.h>
29756
29757 #include <linux/sonypi.h>
29758
29759 @@ -490,7 +491,7 @@ static struct sonypi_device {
29760 spinlock_t fifo_lock;
29761 wait_queue_head_t fifo_proc_list;
29762 struct fasync_struct *fifo_async;
29763 - int open_count;
29764 + local_t open_count;
29765 int model;
29766 struct input_dev *input_jog_dev;
29767 struct input_dev *input_key_dev;
29768 @@ -897,7 +898,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
29769 static int sonypi_misc_release(struct inode *inode, struct file *file)
29770 {
29771 mutex_lock(&sonypi_device.lock);
29772 - sonypi_device.open_count--;
29773 + local_dec(&sonypi_device.open_count);
29774 mutex_unlock(&sonypi_device.lock);
29775 return 0;
29776 }
29777 @@ -906,9 +907,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
29778 {
29779 mutex_lock(&sonypi_device.lock);
29780 /* Flush input queue on first open */
29781 - if (!sonypi_device.open_count)
29782 + if (!local_read(&sonypi_device.open_count))
29783 kfifo_reset(&sonypi_device.fifo);
29784 - sonypi_device.open_count++;
29785 + local_inc(&sonypi_device.open_count);
29786 mutex_unlock(&sonypi_device.lock);
29787
29788 return 0;
29789 diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
29790 index ad7c732..5aa8054 100644
29791 --- a/drivers/char/tpm/tpm.c
29792 +++ b/drivers/char/tpm/tpm.c
29793 @@ -415,7 +415,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
29794 chip->vendor.req_complete_val)
29795 goto out_recv;
29796
29797 - if ((status == chip->vendor.req_canceled)) {
29798 + if (status == chip->vendor.req_canceled) {
29799 dev_err(chip->dev, "Operation Canceled\n");
29800 rc = -ECANCELED;
29801 goto out;
29802 diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
29803 index 0636520..169c1d0 100644
29804 --- a/drivers/char/tpm/tpm_bios.c
29805 +++ b/drivers/char/tpm/tpm_bios.c
29806 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
29807 event = addr;
29808
29809 if ((event->event_type == 0 && event->event_size == 0) ||
29810 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
29811 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
29812 return NULL;
29813
29814 return addr;
29815 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
29816 return NULL;
29817
29818 if ((event->event_type == 0 && event->event_size == 0) ||
29819 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
29820 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
29821 return NULL;
29822
29823 (*pos)++;
29824 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
29825 int i;
29826
29827 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
29828 - seq_putc(m, data[i]);
29829 + if (!seq_putc(m, data[i]))
29830 + return -EFAULT;
29831
29832 return 0;
29833 }
29834 @@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
29835 log->bios_event_log_end = log->bios_event_log + len;
29836
29837 virt = acpi_os_map_memory(start, len);
29838 + if (!virt) {
29839 + kfree(log->bios_event_log);
29840 + log->bios_event_log = NULL;
29841 + return -EFAULT;
29842 + }
29843
29844 - memcpy(log->bios_event_log, virt, len);
29845 + memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
29846
29847 acpi_os_unmap_memory(virt, len);
29848 return 0;
29849 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
29850 index cdf2f54..e55c197 100644
29851 --- a/drivers/char/virtio_console.c
29852 +++ b/drivers/char/virtio_console.c
29853 @@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
29854 if (to_user) {
29855 ssize_t ret;
29856
29857 - ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
29858 + ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
29859 if (ret)
29860 return -EFAULT;
29861 } else {
29862 @@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
29863 if (!port_has_data(port) && !port->host_connected)
29864 return 0;
29865
29866 - return fill_readbuf(port, ubuf, count, true);
29867 + return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
29868 }
29869
29870 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
29871 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
29872 index 97f5064..202b6e6 100644
29873 --- a/drivers/edac/edac_pci_sysfs.c
29874 +++ b/drivers/edac/edac_pci_sysfs.c
29875 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
29876 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
29877 static int edac_pci_poll_msec = 1000; /* one second workq period */
29878
29879 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
29880 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
29881 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
29882 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
29883
29884 static struct kobject *edac_pci_top_main_kobj;
29885 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
29886 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29887 edac_printk(KERN_CRIT, EDAC_PCI,
29888 "Signaled System Error on %s\n",
29889 pci_name(dev));
29890 - atomic_inc(&pci_nonparity_count);
29891 + atomic_inc_unchecked(&pci_nonparity_count);
29892 }
29893
29894 if (status & (PCI_STATUS_PARITY)) {
29895 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29896 "Master Data Parity Error on %s\n",
29897 pci_name(dev));
29898
29899 - atomic_inc(&pci_parity_count);
29900 + atomic_inc_unchecked(&pci_parity_count);
29901 }
29902
29903 if (status & (PCI_STATUS_DETECTED_PARITY)) {
29904 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29905 "Detected Parity Error on %s\n",
29906 pci_name(dev));
29907
29908 - atomic_inc(&pci_parity_count);
29909 + atomic_inc_unchecked(&pci_parity_count);
29910 }
29911 }
29912
29913 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29914 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
29915 "Signaled System Error on %s\n",
29916 pci_name(dev));
29917 - atomic_inc(&pci_nonparity_count);
29918 + atomic_inc_unchecked(&pci_nonparity_count);
29919 }
29920
29921 if (status & (PCI_STATUS_PARITY)) {
29922 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29923 "Master Data Parity Error on "
29924 "%s\n", pci_name(dev));
29925
29926 - atomic_inc(&pci_parity_count);
29927 + atomic_inc_unchecked(&pci_parity_count);
29928 }
29929
29930 if (status & (PCI_STATUS_DETECTED_PARITY)) {
29931 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
29932 "Detected Parity Error on %s\n",
29933 pci_name(dev));
29934
29935 - atomic_inc(&pci_parity_count);
29936 + atomic_inc_unchecked(&pci_parity_count);
29937 }
29938 }
29939 }
29940 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
29941 if (!check_pci_errors)
29942 return;
29943
29944 - before_count = atomic_read(&pci_parity_count);
29945 + before_count = atomic_read_unchecked(&pci_parity_count);
29946
29947 /* scan all PCI devices looking for a Parity Error on devices and
29948 * bridges.
29949 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
29950 /* Only if operator has selected panic on PCI Error */
29951 if (edac_pci_get_panic_on_pe()) {
29952 /* If the count is different 'after' from 'before' */
29953 - if (before_count != atomic_read(&pci_parity_count))
29954 + if (before_count != atomic_read_unchecked(&pci_parity_count))
29955 panic("EDAC: PCI Parity Error");
29956 }
29957 }
29958 diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
29959 index c6074c5..88a9e2e 100644
29960 --- a/drivers/edac/mce_amd.h
29961 +++ b/drivers/edac/mce_amd.h
29962 @@ -82,7 +82,7 @@ extern const char * const ii_msgs[];
29963 struct amd_decoder_ops {
29964 bool (*dc_mce)(u16, u8);
29965 bool (*ic_mce)(u16, u8);
29966 -};
29967 +} __no_const;
29968
29969 void amd_report_gart_errors(bool);
29970 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
29971 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
29972 index cc595eb..4ec702a 100644
29973 --- a/drivers/firewire/core-card.c
29974 +++ b/drivers/firewire/core-card.c
29975 @@ -679,7 +679,7 @@ void fw_card_release(struct kref *kref)
29976
29977 void fw_core_remove_card(struct fw_card *card)
29978 {
29979 - struct fw_card_driver dummy_driver = dummy_driver_template;
29980 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
29981
29982 card->driver->update_phy_reg(card, 4,
29983 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
29984 diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
29985 index 2e6b245..c3857d9 100644
29986 --- a/drivers/firewire/core-cdev.c
29987 +++ b/drivers/firewire/core-cdev.c
29988 @@ -1341,8 +1341,7 @@ static int init_iso_resource(struct client *client,
29989 int ret;
29990
29991 if ((request->channels == 0 && request->bandwidth == 0) ||
29992 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
29993 - request->bandwidth < 0)
29994 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
29995 return -EINVAL;
29996
29997 r = kmalloc(sizeof(*r), GFP_KERNEL);
29998 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
29999 index dea2dcc..a4fb978 100644
30000 --- a/drivers/firewire/core-transaction.c
30001 +++ b/drivers/firewire/core-transaction.c
30002 @@ -37,6 +37,7 @@
30003 #include <linux/timer.h>
30004 #include <linux/types.h>
30005 #include <linux/workqueue.h>
30006 +#include <linux/sched.h>
30007
30008 #include <asm/byteorder.h>
30009
30010 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
30011 index 9047f55..e47c7ff 100644
30012 --- a/drivers/firewire/core.h
30013 +++ b/drivers/firewire/core.h
30014 @@ -110,6 +110,7 @@ struct fw_card_driver {
30015
30016 int (*stop_iso)(struct fw_iso_context *ctx);
30017 };
30018 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
30019
30020 void fw_card_initialize(struct fw_card *card,
30021 const struct fw_card_driver *driver, struct device *device);
30022 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
30023 index 153980b..4b4d046 100644
30024 --- a/drivers/firmware/dmi_scan.c
30025 +++ b/drivers/firmware/dmi_scan.c
30026 @@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
30027 }
30028 }
30029 else {
30030 - /*
30031 - * no iounmap() for that ioremap(); it would be a no-op, but
30032 - * it's so early in setup that sucker gets confused into doing
30033 - * what it shouldn't if we actually call it.
30034 - */
30035 p = dmi_ioremap(0xF0000, 0x10000);
30036 if (p == NULL)
30037 goto error;
30038 @@ -723,7 +718,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
30039 if (buf == NULL)
30040 return -1;
30041
30042 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
30043 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
30044
30045 iounmap(buf);
30046 return 0;
30047 diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
30048 index 82d5c20..44a7177 100644
30049 --- a/drivers/gpio/gpio-vr41xx.c
30050 +++ b/drivers/gpio/gpio-vr41xx.c
30051 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
30052 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
30053 maskl, pendl, maskh, pendh);
30054
30055 - atomic_inc(&irq_err_count);
30056 + atomic_inc_unchecked(&irq_err_count);
30057
30058 return -EINVAL;
30059 }
30060 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
30061 index 8111889..367b253 100644
30062 --- a/drivers/gpu/drm/drm_crtc_helper.c
30063 +++ b/drivers/gpu/drm/drm_crtc_helper.c
30064 @@ -286,7 +286,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
30065 struct drm_crtc *tmp;
30066 int crtc_mask = 1;
30067
30068 - WARN(!crtc, "checking null crtc?\n");
30069 + BUG_ON(!crtc);
30070
30071 dev = crtc->dev;
30072
30073 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
30074 index 6116e3b..c29dd16 100644
30075 --- a/drivers/gpu/drm/drm_drv.c
30076 +++ b/drivers/gpu/drm/drm_drv.c
30077 @@ -316,7 +316,7 @@ module_exit(drm_core_exit);
30078 /**
30079 * Copy and IOCTL return string to user space
30080 */
30081 -static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
30082 +static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
30083 {
30084 int len;
30085
30086 @@ -399,7 +399,7 @@ long drm_ioctl(struct file *filp,
30087 return -ENODEV;
30088
30089 atomic_inc(&dev->ioctl_count);
30090 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
30091 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
30092 ++file_priv->ioctl_count;
30093
30094 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
30095 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
30096 index 123de28..43a0897 100644
30097 --- a/drivers/gpu/drm/drm_fops.c
30098 +++ b/drivers/gpu/drm/drm_fops.c
30099 @@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
30100 }
30101
30102 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
30103 - atomic_set(&dev->counts[i], 0);
30104 + atomic_set_unchecked(&dev->counts[i], 0);
30105
30106 dev->sigdata.lock = NULL;
30107
30108 @@ -138,8 +138,8 @@ int drm_open(struct inode *inode, struct file *filp)
30109
30110 retcode = drm_open_helper(inode, filp, dev);
30111 if (!retcode) {
30112 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
30113 - if (!dev->open_count++)
30114 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
30115 + if (local_inc_return(&dev->open_count) == 1)
30116 retcode = drm_setup(dev);
30117 }
30118 if (!retcode) {
30119 @@ -482,7 +482,7 @@ int drm_release(struct inode *inode, struct file *filp)
30120
30121 mutex_lock(&drm_global_mutex);
30122
30123 - DRM_DEBUG("open_count = %d\n", dev->open_count);
30124 + DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
30125
30126 if (dev->driver->preclose)
30127 dev->driver->preclose(dev, file_priv);
30128 @@ -491,10 +491,10 @@ int drm_release(struct inode *inode, struct file *filp)
30129 * Begin inline drm_release
30130 */
30131
30132 - DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
30133 + DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
30134 task_pid_nr(current),
30135 (long)old_encode_dev(file_priv->minor->device),
30136 - dev->open_count);
30137 + local_read(&dev->open_count));
30138
30139 /* Release any auth tokens that might point to this file_priv,
30140 (do that under the drm_global_mutex) */
30141 @@ -584,8 +584,8 @@ int drm_release(struct inode *inode, struct file *filp)
30142 * End inline drm_release
30143 */
30144
30145 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
30146 - if (!--dev->open_count) {
30147 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
30148 + if (local_dec_and_test(&dev->open_count)) {
30149 if (atomic_read(&dev->ioctl_count)) {
30150 DRM_ERROR("Device busy: %d\n",
30151 atomic_read(&dev->ioctl_count));
30152 diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
30153 index c87dc96..326055d 100644
30154 --- a/drivers/gpu/drm/drm_global.c
30155 +++ b/drivers/gpu/drm/drm_global.c
30156 @@ -36,7 +36,7 @@
30157 struct drm_global_item {
30158 struct mutex mutex;
30159 void *object;
30160 - int refcount;
30161 + atomic_t refcount;
30162 };
30163
30164 static struct drm_global_item glob[DRM_GLOBAL_NUM];
30165 @@ -49,7 +49,7 @@ void drm_global_init(void)
30166 struct drm_global_item *item = &glob[i];
30167 mutex_init(&item->mutex);
30168 item->object = NULL;
30169 - item->refcount = 0;
30170 + atomic_set(&item->refcount, 0);
30171 }
30172 }
30173
30174 @@ -59,7 +59,7 @@ void drm_global_release(void)
30175 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
30176 struct drm_global_item *item = &glob[i];
30177 BUG_ON(item->object != NULL);
30178 - BUG_ON(item->refcount != 0);
30179 + BUG_ON(atomic_read(&item->refcount) != 0);
30180 }
30181 }
30182
30183 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
30184 void *object;
30185
30186 mutex_lock(&item->mutex);
30187 - if (item->refcount == 0) {
30188 + if (atomic_read(&item->refcount) == 0) {
30189 item->object = kzalloc(ref->size, GFP_KERNEL);
30190 if (unlikely(item->object == NULL)) {
30191 ret = -ENOMEM;
30192 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
30193 goto out_err;
30194
30195 }
30196 - ++item->refcount;
30197 + atomic_inc(&item->refcount);
30198 ref->object = item->object;
30199 object = item->object;
30200 mutex_unlock(&item->mutex);
30201 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
30202 struct drm_global_item *item = &glob[ref->global_type];
30203
30204 mutex_lock(&item->mutex);
30205 - BUG_ON(item->refcount == 0);
30206 + BUG_ON(atomic_read(&item->refcount) == 0);
30207 BUG_ON(ref->object != item->object);
30208 - if (--item->refcount == 0) {
30209 + if (atomic_dec_and_test(&item->refcount)) {
30210 ref->release(ref);
30211 item->object = NULL;
30212 }
30213 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
30214 index ab1162d..42587b2 100644
30215 --- a/drivers/gpu/drm/drm_info.c
30216 +++ b/drivers/gpu/drm/drm_info.c
30217 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
30218 struct drm_local_map *map;
30219 struct drm_map_list *r_list;
30220
30221 - /* Hardcoded from _DRM_FRAME_BUFFER,
30222 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
30223 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
30224 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
30225 + static const char * const types[] = {
30226 + [_DRM_FRAME_BUFFER] = "FB",
30227 + [_DRM_REGISTERS] = "REG",
30228 + [_DRM_SHM] = "SHM",
30229 + [_DRM_AGP] = "AGP",
30230 + [_DRM_SCATTER_GATHER] = "SG",
30231 + [_DRM_CONSISTENT] = "PCI",
30232 + [_DRM_GEM] = "GEM" };
30233 const char *type;
30234 int i;
30235
30236 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
30237 map = r_list->map;
30238 if (!map)
30239 continue;
30240 - if (map->type < 0 || map->type > 5)
30241 + if (map->type >= ARRAY_SIZE(types))
30242 type = "??";
30243 else
30244 type = types[map->type];
30245 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
30246 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
30247 vma->vm_flags & VM_LOCKED ? 'l' : '-',
30248 vma->vm_flags & VM_IO ? 'i' : '-',
30249 +#ifdef CONFIG_GRKERNSEC_HIDESYM
30250 + 0);
30251 +#else
30252 vma->vm_pgoff);
30253 +#endif
30254
30255 #if defined(__i386__)
30256 pgprot = pgprot_val(vma->vm_page_prot);
30257 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
30258 index 637fcc3..e890b33 100644
30259 --- a/drivers/gpu/drm/drm_ioc32.c
30260 +++ b/drivers/gpu/drm/drm_ioc32.c
30261 @@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
30262 request = compat_alloc_user_space(nbytes);
30263 if (!access_ok(VERIFY_WRITE, request, nbytes))
30264 return -EFAULT;
30265 - list = (struct drm_buf_desc *) (request + 1);
30266 + list = (struct drm_buf_desc __user *) (request + 1);
30267
30268 if (__put_user(count, &request->count)
30269 || __put_user(list, &request->list))
30270 @@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
30271 request = compat_alloc_user_space(nbytes);
30272 if (!access_ok(VERIFY_WRITE, request, nbytes))
30273 return -EFAULT;
30274 - list = (struct drm_buf_pub *) (request + 1);
30275 + list = (struct drm_buf_pub __user *) (request + 1);
30276
30277 if (__put_user(count, &request->count)
30278 || __put_user(list, &request->list))
30279 diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
30280 index cf85155..f2665cb 100644
30281 --- a/drivers/gpu/drm/drm_ioctl.c
30282 +++ b/drivers/gpu/drm/drm_ioctl.c
30283 @@ -252,7 +252,7 @@ int drm_getstats(struct drm_device *dev, void *data,
30284 stats->data[i].value =
30285 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
30286 else
30287 - stats->data[i].value = atomic_read(&dev->counts[i]);
30288 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
30289 stats->data[i].type = dev->types[i];
30290 }
30291
30292 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
30293 index c79c713..2048588 100644
30294 --- a/drivers/gpu/drm/drm_lock.c
30295 +++ b/drivers/gpu/drm/drm_lock.c
30296 @@ -90,7 +90,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
30297 if (drm_lock_take(&master->lock, lock->context)) {
30298 master->lock.file_priv = file_priv;
30299 master->lock.lock_time = jiffies;
30300 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
30301 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
30302 break; /* Got lock */
30303 }
30304
30305 @@ -161,7 +161,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
30306 return -EINVAL;
30307 }
30308
30309 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
30310 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
30311
30312 if (drm_lock_free(&master->lock, lock->context)) {
30313 /* FIXME: Should really bail out here. */
30314 diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
30315 index aa454f8..6d38580 100644
30316 --- a/drivers/gpu/drm/drm_stub.c
30317 +++ b/drivers/gpu/drm/drm_stub.c
30318 @@ -512,7 +512,7 @@ void drm_unplug_dev(struct drm_device *dev)
30319
30320 drm_device_set_unplugged(dev);
30321
30322 - if (dev->open_count == 0) {
30323 + if (local_read(&dev->open_count) == 0) {
30324 drm_put_dev(dev);
30325 }
30326 mutex_unlock(&drm_global_mutex);
30327 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
30328 index f920fb5..001c52d 100644
30329 --- a/drivers/gpu/drm/i810/i810_dma.c
30330 +++ b/drivers/gpu/drm/i810/i810_dma.c
30331 @@ -945,8 +945,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
30332 dma->buflist[vertex->idx],
30333 vertex->discard, vertex->used);
30334
30335 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30336 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30337 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30338 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30339 sarea_priv->last_enqueue = dev_priv->counter - 1;
30340 sarea_priv->last_dispatch = (int)hw_status[5];
30341
30342 @@ -1106,8 +1106,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
30343 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
30344 mc->last_render);
30345
30346 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30347 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30348 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30349 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30350 sarea_priv->last_enqueue = dev_priv->counter - 1;
30351 sarea_priv->last_dispatch = (int)hw_status[5];
30352
30353 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
30354 index c9339f4..f5e1b9d 100644
30355 --- a/drivers/gpu/drm/i810/i810_drv.h
30356 +++ b/drivers/gpu/drm/i810/i810_drv.h
30357 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
30358 int page_flipping;
30359
30360 wait_queue_head_t irq_queue;
30361 - atomic_t irq_received;
30362 - atomic_t irq_emitted;
30363 + atomic_unchecked_t irq_received;
30364 + atomic_unchecked_t irq_emitted;
30365
30366 int front_offset;
30367 } drm_i810_private_t;
30368 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
30369 index e6162a1..b2ff486 100644
30370 --- a/drivers/gpu/drm/i915/i915_debugfs.c
30371 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
30372 @@ -500,7 +500,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
30373 I915_READ(GTIMR));
30374 }
30375 seq_printf(m, "Interrupts received: %d\n",
30376 - atomic_read(&dev_priv->irq_received));
30377 + atomic_read_unchecked(&dev_priv->irq_received));
30378 for (i = 0; i < I915_NUM_RINGS; i++) {
30379 if (IS_GEN6(dev) || IS_GEN7(dev)) {
30380 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
30381 @@ -1313,7 +1313,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
30382 return ret;
30383
30384 if (opregion->header)
30385 - seq_write(m, opregion->header, OPREGION_SIZE);
30386 + seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
30387
30388 mutex_unlock(&dev->struct_mutex);
30389
30390 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
30391 index ba60f3c..e2dff7f 100644
30392 --- a/drivers/gpu/drm/i915/i915_dma.c
30393 +++ b/drivers/gpu/drm/i915/i915_dma.c
30394 @@ -1178,7 +1178,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
30395 bool can_switch;
30396
30397 spin_lock(&dev->count_lock);
30398 - can_switch = (dev->open_count == 0);
30399 + can_switch = (local_read(&dev->open_count) == 0);
30400 spin_unlock(&dev->count_lock);
30401 return can_switch;
30402 }
30403 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
30404 index 5fabc6c..0b08aa1 100644
30405 --- a/drivers/gpu/drm/i915/i915_drv.h
30406 +++ b/drivers/gpu/drm/i915/i915_drv.h
30407 @@ -240,7 +240,7 @@ struct drm_i915_display_funcs {
30408 /* render clock increase/decrease */
30409 /* display clock increase/decrease */
30410 /* pll clock increase/decrease */
30411 -};
30412 +} __no_const;
30413
30414 struct intel_device_info {
30415 u8 gen;
30416 @@ -350,7 +350,7 @@ typedef struct drm_i915_private {
30417 int current_page;
30418 int page_flipping;
30419
30420 - atomic_t irq_received;
30421 + atomic_unchecked_t irq_received;
30422
30423 /* protects the irq masks */
30424 spinlock_t irq_lock;
30425 @@ -937,7 +937,7 @@ struct drm_i915_gem_object {
30426 * will be page flipped away on the next vblank. When it
30427 * reaches 0, dev_priv->pending_flip_queue will be woken up.
30428 */
30429 - atomic_t pending_flip;
30430 + atomic_unchecked_t pending_flip;
30431 };
30432
30433 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
30434 @@ -1359,7 +1359,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
30435 extern void intel_teardown_gmbus(struct drm_device *dev);
30436 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
30437 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
30438 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
30439 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
30440 {
30441 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
30442 }
30443 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30444 index de43194..a14c4cc 100644
30445 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30446 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30447 @@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
30448 i915_gem_clflush_object(obj);
30449
30450 if (obj->base.pending_write_domain)
30451 - cd->flips |= atomic_read(&obj->pending_flip);
30452 + cd->flips |= atomic_read_unchecked(&obj->pending_flip);
30453
30454 /* The actual obj->write_domain will be updated with
30455 * pending_write_domain after we emit the accumulated flush for all
30456 @@ -933,9 +933,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
30457
30458 static int
30459 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
30460 - int count)
30461 + unsigned int count)
30462 {
30463 - int i;
30464 + unsigned int i;
30465
30466 for (i = 0; i < count; i++) {
30467 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
30468 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
30469 index 26c67a7..8d4cbcb 100644
30470 --- a/drivers/gpu/drm/i915/i915_irq.c
30471 +++ b/drivers/gpu/drm/i915/i915_irq.c
30472 @@ -496,7 +496,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
30473 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
30474 struct drm_i915_master_private *master_priv;
30475
30476 - atomic_inc(&dev_priv->irq_received);
30477 + atomic_inc_unchecked(&dev_priv->irq_received);
30478
30479 /* disable master interrupt before clearing iir */
30480 de_ier = I915_READ(DEIER);
30481 @@ -579,7 +579,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
30482 struct drm_i915_master_private *master_priv;
30483 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
30484
30485 - atomic_inc(&dev_priv->irq_received);
30486 + atomic_inc_unchecked(&dev_priv->irq_received);
30487
30488 if (IS_GEN6(dev))
30489 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
30490 @@ -1291,7 +1291,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
30491 int ret = IRQ_NONE, pipe;
30492 bool blc_event = false;
30493
30494 - atomic_inc(&dev_priv->irq_received);
30495 + atomic_inc_unchecked(&dev_priv->irq_received);
30496
30497 iir = I915_READ(IIR);
30498
30499 @@ -1802,7 +1802,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
30500 {
30501 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30502
30503 - atomic_set(&dev_priv->irq_received, 0);
30504 + atomic_set_unchecked(&dev_priv->irq_received, 0);
30505
30506 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
30507 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
30508 @@ -1979,7 +1979,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
30509 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30510 int pipe;
30511
30512 - atomic_set(&dev_priv->irq_received, 0);
30513 + atomic_set_unchecked(&dev_priv->irq_received, 0);
30514
30515 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
30516 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
30517 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
30518 index d4d162f..b49a04e 100644
30519 --- a/drivers/gpu/drm/i915/intel_display.c
30520 +++ b/drivers/gpu/drm/i915/intel_display.c
30521 @@ -2254,7 +2254,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
30522
30523 wait_event(dev_priv->pending_flip_queue,
30524 atomic_read(&dev_priv->mm.wedged) ||
30525 - atomic_read(&obj->pending_flip) == 0);
30526 + atomic_read_unchecked(&obj->pending_flip) == 0);
30527
30528 /* Big Hammer, we also need to ensure that any pending
30529 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
30530 @@ -2919,7 +2919,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
30531 obj = to_intel_framebuffer(crtc->fb)->obj;
30532 dev_priv = crtc->dev->dev_private;
30533 wait_event(dev_priv->pending_flip_queue,
30534 - atomic_read(&obj->pending_flip) == 0);
30535 + atomic_read_unchecked(&obj->pending_flip) == 0);
30536 }
30537
30538 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
30539 @@ -7284,9 +7284,8 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
30540
30541 obj = work->old_fb_obj;
30542
30543 - atomic_clear_mask(1 << intel_crtc->plane,
30544 - &obj->pending_flip.counter);
30545 - if (atomic_read(&obj->pending_flip) == 0)
30546 + atomic_clear_mask_unchecked(1 << intel_crtc->plane, &obj->pending_flip);
30547 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
30548 wake_up(&dev_priv->pending_flip_queue);
30549
30550 schedule_work(&work->work);
30551 @@ -7582,7 +7581,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
30552 /* Block clients from rendering to the new back buffer until
30553 * the flip occurs and the object is no longer visible.
30554 */
30555 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30556 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30557
30558 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
30559 if (ret)
30560 @@ -7596,7 +7595,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
30561 return 0;
30562
30563 cleanup_pending:
30564 - atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30565 + atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30566 drm_gem_object_unreference(&work->old_fb_obj->base);
30567 drm_gem_object_unreference(&obj->base);
30568 mutex_unlock(&dev->struct_mutex);
30569 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
30570 index 54558a0..2d97005 100644
30571 --- a/drivers/gpu/drm/mga/mga_drv.h
30572 +++ b/drivers/gpu/drm/mga/mga_drv.h
30573 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
30574 u32 clear_cmd;
30575 u32 maccess;
30576
30577 - atomic_t vbl_received; /**< Number of vblanks received. */
30578 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
30579 wait_queue_head_t fence_queue;
30580 - atomic_t last_fence_retired;
30581 + atomic_unchecked_t last_fence_retired;
30582 u32 next_fence_to_post;
30583
30584 unsigned int fb_cpp;
30585 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
30586 index 2581202..f230a8d9 100644
30587 --- a/drivers/gpu/drm/mga/mga_irq.c
30588 +++ b/drivers/gpu/drm/mga/mga_irq.c
30589 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
30590 if (crtc != 0)
30591 return 0;
30592
30593 - return atomic_read(&dev_priv->vbl_received);
30594 + return atomic_read_unchecked(&dev_priv->vbl_received);
30595 }
30596
30597
30598 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
30599 /* VBLANK interrupt */
30600 if (status & MGA_VLINEPEN) {
30601 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
30602 - atomic_inc(&dev_priv->vbl_received);
30603 + atomic_inc_unchecked(&dev_priv->vbl_received);
30604 drm_handle_vblank(dev, 0);
30605 handled = 1;
30606 }
30607 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
30608 if ((prim_start & ~0x03) != (prim_end & ~0x03))
30609 MGA_WRITE(MGA_PRIMEND, prim_end);
30610
30611 - atomic_inc(&dev_priv->last_fence_retired);
30612 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
30613 DRM_WAKEUP(&dev_priv->fence_queue);
30614 handled = 1;
30615 }
30616 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
30617 * using fences.
30618 */
30619 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
30620 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
30621 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
30622 - *sequence) <= (1 << 23)));
30623
30624 *sequence = cur_fence;
30625 diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
30626 index 0be4a81..7464804 100644
30627 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c
30628 +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
30629 @@ -5329,7 +5329,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
30630 struct bit_table {
30631 const char id;
30632 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
30633 -};
30634 +} __no_const;
30635
30636 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
30637
30638 diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
30639 index 3aef353..0ad1322 100644
30640 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h
30641 +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
30642 @@ -240,7 +240,7 @@ struct nouveau_channel {
30643 struct list_head pending;
30644 uint32_t sequence;
30645 uint32_t sequence_ack;
30646 - atomic_t last_sequence_irq;
30647 + atomic_unchecked_t last_sequence_irq;
30648 struct nouveau_vma vma;
30649 } fence;
30650
30651 @@ -321,7 +321,7 @@ struct nouveau_exec_engine {
30652 u32 handle, u16 class);
30653 void (*set_tile_region)(struct drm_device *dev, int i);
30654 void (*tlb_flush)(struct drm_device *, int engine);
30655 -};
30656 +} __no_const;
30657
30658 struct nouveau_instmem_engine {
30659 void *priv;
30660 @@ -343,13 +343,13 @@ struct nouveau_instmem_engine {
30661 struct nouveau_mc_engine {
30662 int (*init)(struct drm_device *dev);
30663 void (*takedown)(struct drm_device *dev);
30664 -};
30665 +} __no_const;
30666
30667 struct nouveau_timer_engine {
30668 int (*init)(struct drm_device *dev);
30669 void (*takedown)(struct drm_device *dev);
30670 uint64_t (*read)(struct drm_device *dev);
30671 -};
30672 +} __no_const;
30673
30674 struct nouveau_fb_engine {
30675 int num_tiles;
30676 @@ -590,7 +590,7 @@ struct nouveau_vram_engine {
30677 void (*put)(struct drm_device *, struct nouveau_mem **);
30678
30679 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
30680 -};
30681 +} __no_const;
30682
30683 struct nouveau_engine {
30684 struct nouveau_instmem_engine instmem;
30685 @@ -739,7 +739,7 @@ struct drm_nouveau_private {
30686 struct drm_global_reference mem_global_ref;
30687 struct ttm_bo_global_ref bo_global_ref;
30688 struct ttm_bo_device bdev;
30689 - atomic_t validate_sequence;
30690 + atomic_unchecked_t validate_sequence;
30691 } ttm;
30692
30693 struct {
30694 diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
30695 index c1dc20f..4df673c 100644
30696 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c
30697 +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
30698 @@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
30699 if (USE_REFCNT(dev))
30700 sequence = nvchan_rd32(chan, 0x48);
30701 else
30702 - sequence = atomic_read(&chan->fence.last_sequence_irq);
30703 + sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
30704
30705 if (chan->fence.sequence_ack == sequence)
30706 goto out;
30707 @@ -538,7 +538,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
30708 return ret;
30709 }
30710
30711 - atomic_set(&chan->fence.last_sequence_irq, 0);
30712 + atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
30713 return 0;
30714 }
30715
30716 diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
30717 index ed52a6f..484acdc 100644
30718 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c
30719 +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
30720 @@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
30721 int trycnt = 0;
30722 int ret, i;
30723
30724 - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
30725 + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
30726 retry:
30727 if (++trycnt > 100000) {
30728 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
30729 diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
30730 index c2a8511..4b996f9 100644
30731 --- a/drivers/gpu/drm/nouveau/nouveau_state.c
30732 +++ b/drivers/gpu/drm/nouveau/nouveau_state.c
30733 @@ -588,7 +588,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
30734 bool can_switch;
30735
30736 spin_lock(&dev->count_lock);
30737 - can_switch = (dev->open_count == 0);
30738 + can_switch = (local_read(&dev->open_count) == 0);
30739 spin_unlock(&dev->count_lock);
30740 return can_switch;
30741 }
30742 diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
30743 index dbdea8e..cd6eeeb 100644
30744 --- a/drivers/gpu/drm/nouveau/nv04_graph.c
30745 +++ b/drivers/gpu/drm/nouveau/nv04_graph.c
30746 @@ -554,7 +554,7 @@ static int
30747 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
30748 u32 class, u32 mthd, u32 data)
30749 {
30750 - atomic_set(&chan->fence.last_sequence_irq, data);
30751 + atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
30752 return 0;
30753 }
30754
30755 diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
30756 index 2746402..c8dc4a4 100644
30757 --- a/drivers/gpu/drm/nouveau/nv50_sor.c
30758 +++ b/drivers/gpu/drm/nouveau/nv50_sor.c
30759 @@ -304,7 +304,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
30760 }
30761
30762 if (nv_encoder->dcb->type == OUTPUT_DP) {
30763 - struct dp_train_func func = {
30764 + static struct dp_train_func func = {
30765 .link_set = nv50_sor_dp_link_set,
30766 .train_set = nv50_sor_dp_train_set,
30767 .train_adj = nv50_sor_dp_train_adj
30768 diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
30769 index 0247250..d2f6aaf 100644
30770 --- a/drivers/gpu/drm/nouveau/nvd0_display.c
30771 +++ b/drivers/gpu/drm/nouveau/nvd0_display.c
30772 @@ -1366,7 +1366,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
30773 nv_wait(dev, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
30774
30775 if (nv_encoder->dcb->type == OUTPUT_DP) {
30776 - struct dp_train_func func = {
30777 + static struct dp_train_func func = {
30778 .link_set = nvd0_sor_dp_link_set,
30779 .train_set = nvd0_sor_dp_train_set,
30780 .train_adj = nvd0_sor_dp_train_adj
30781 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
30782 index bcac90b..53bfc76 100644
30783 --- a/drivers/gpu/drm/r128/r128_cce.c
30784 +++ b/drivers/gpu/drm/r128/r128_cce.c
30785 @@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
30786
30787 /* GH: Simple idle check.
30788 */
30789 - atomic_set(&dev_priv->idle_count, 0);
30790 + atomic_set_unchecked(&dev_priv->idle_count, 0);
30791
30792 /* We don't support anything other than bus-mastering ring mode,
30793 * but the ring can be in either AGP or PCI space for the ring
30794 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
30795 index 930c71b..499aded 100644
30796 --- a/drivers/gpu/drm/r128/r128_drv.h
30797 +++ b/drivers/gpu/drm/r128/r128_drv.h
30798 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
30799 int is_pci;
30800 unsigned long cce_buffers_offset;
30801
30802 - atomic_t idle_count;
30803 + atomic_unchecked_t idle_count;
30804
30805 int page_flipping;
30806 int current_page;
30807 u32 crtc_offset;
30808 u32 crtc_offset_cntl;
30809
30810 - atomic_t vbl_received;
30811 + atomic_unchecked_t vbl_received;
30812
30813 u32 color_fmt;
30814 unsigned int front_offset;
30815 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
30816 index 429d5a0..7e899ed 100644
30817 --- a/drivers/gpu/drm/r128/r128_irq.c
30818 +++ b/drivers/gpu/drm/r128/r128_irq.c
30819 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
30820 if (crtc != 0)
30821 return 0;
30822
30823 - return atomic_read(&dev_priv->vbl_received);
30824 + return atomic_read_unchecked(&dev_priv->vbl_received);
30825 }
30826
30827 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30828 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30829 /* VBLANK interrupt */
30830 if (status & R128_CRTC_VBLANK_INT) {
30831 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
30832 - atomic_inc(&dev_priv->vbl_received);
30833 + atomic_inc_unchecked(&dev_priv->vbl_received);
30834 drm_handle_vblank(dev, 0);
30835 return IRQ_HANDLED;
30836 }
30837 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
30838 index a9e33ce..09edd4b 100644
30839 --- a/drivers/gpu/drm/r128/r128_state.c
30840 +++ b/drivers/gpu/drm/r128/r128_state.c
30841 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
30842
30843 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
30844 {
30845 - if (atomic_read(&dev_priv->idle_count) == 0)
30846 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
30847 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
30848 else
30849 - atomic_set(&dev_priv->idle_count, 0);
30850 + atomic_set_unchecked(&dev_priv->idle_count, 0);
30851 }
30852
30853 #endif
30854 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
30855 index 5a82b6b..9e69c73 100644
30856 --- a/drivers/gpu/drm/radeon/mkregtable.c
30857 +++ b/drivers/gpu/drm/radeon/mkregtable.c
30858 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
30859 regex_t mask_rex;
30860 regmatch_t match[4];
30861 char buf[1024];
30862 - size_t end;
30863 + long end;
30864 int len;
30865 int done = 0;
30866 int r;
30867 unsigned o;
30868 struct offset *offset;
30869 char last_reg_s[10];
30870 - int last_reg;
30871 + unsigned long last_reg;
30872
30873 if (regcomp
30874 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
30875 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
30876 index 138b952..d74f9cb 100644
30877 --- a/drivers/gpu/drm/radeon/radeon.h
30878 +++ b/drivers/gpu/drm/radeon/radeon.h
30879 @@ -253,7 +253,7 @@ struct radeon_fence_driver {
30880 uint32_t scratch_reg;
30881 uint64_t gpu_addr;
30882 volatile uint32_t *cpu_addr;
30883 - atomic_t seq;
30884 + atomic_unchecked_t seq;
30885 uint32_t last_seq;
30886 unsigned long last_jiffies;
30887 unsigned long last_timeout;
30888 @@ -753,7 +753,7 @@ struct r600_blit_cp_primitives {
30889 int x2, int y2);
30890 void (*draw_auto)(struct radeon_device *rdev);
30891 void (*set_default_state)(struct radeon_device *rdev);
30892 -};
30893 +} __no_const;
30894
30895 struct r600_blit {
30896 struct mutex mutex;
30897 @@ -1246,7 +1246,7 @@ struct radeon_asic {
30898 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
30899 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
30900 } pflip;
30901 -};
30902 +} __no_const;
30903
30904 /*
30905 * Asic structures
30906 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
30907 index 5992502..c19c633 100644
30908 --- a/drivers/gpu/drm/radeon/radeon_device.c
30909 +++ b/drivers/gpu/drm/radeon/radeon_device.c
30910 @@ -691,7 +691,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
30911 bool can_switch;
30912
30913 spin_lock(&dev->count_lock);
30914 - can_switch = (dev->open_count == 0);
30915 + can_switch = (local_read(&dev->open_count) == 0);
30916 spin_unlock(&dev->count_lock);
30917 return can_switch;
30918 }
30919 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
30920 index a1b59ca..86f2d44 100644
30921 --- a/drivers/gpu/drm/radeon/radeon_drv.h
30922 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
30923 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
30924
30925 /* SW interrupt */
30926 wait_queue_head_t swi_queue;
30927 - atomic_t swi_emitted;
30928 + atomic_unchecked_t swi_emitted;
30929 int vblank_crtc;
30930 uint32_t irq_enable_reg;
30931 uint32_t r500_disp_irq_reg;
30932 diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
30933 index 4bd36a3..e66fe9c 100644
30934 --- a/drivers/gpu/drm/radeon/radeon_fence.c
30935 +++ b/drivers/gpu/drm/radeon/radeon_fence.c
30936 @@ -70,7 +70,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
30937 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
30938 return 0;
30939 }
30940 - fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
30941 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv[fence->ring].seq);
30942 if (!rdev->ring[fence->ring].ready)
30943 /* FIXME: cp is not running assume everythings is done right
30944 * away
30945 @@ -405,7 +405,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
30946 }
30947 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
30948 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
30949 - radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
30950 + radeon_fence_write(rdev, atomic_read_unchecked(&rdev->fence_drv[ring].seq), ring);
30951 rdev->fence_drv[ring].initialized = true;
30952 DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
30953 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
30954 @@ -418,7 +418,7 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
30955 rdev->fence_drv[ring].scratch_reg = -1;
30956 rdev->fence_drv[ring].cpu_addr = NULL;
30957 rdev->fence_drv[ring].gpu_addr = 0;
30958 - atomic_set(&rdev->fence_drv[ring].seq, 0);
30959 + atomic_set_unchecked(&rdev->fence_drv[ring].seq, 0);
30960 INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
30961 INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
30962 INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
30963 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
30964 index 48b7cea..342236f 100644
30965 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
30966 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
30967 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
30968 request = compat_alloc_user_space(sizeof(*request));
30969 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
30970 || __put_user(req32.param, &request->param)
30971 - || __put_user((void __user *)(unsigned long)req32.value,
30972 + || __put_user((unsigned long)req32.value,
30973 &request->value))
30974 return -EFAULT;
30975
30976 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
30977 index 00da384..32f972d 100644
30978 --- a/drivers/gpu/drm/radeon/radeon_irq.c
30979 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
30980 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
30981 unsigned int ret;
30982 RING_LOCALS;
30983
30984 - atomic_inc(&dev_priv->swi_emitted);
30985 - ret = atomic_read(&dev_priv->swi_emitted);
30986 + atomic_inc_unchecked(&dev_priv->swi_emitted);
30987 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
30988
30989 BEGIN_RING(4);
30990 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
30991 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
30992 drm_radeon_private_t *dev_priv =
30993 (drm_radeon_private_t *) dev->dev_private;
30994
30995 - atomic_set(&dev_priv->swi_emitted, 0);
30996 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
30997 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
30998
30999 dev->max_vblank_count = 0x001fffff;
31000 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
31001 index e8422ae..d22d4a8 100644
31002 --- a/drivers/gpu/drm/radeon/radeon_state.c
31003 +++ b/drivers/gpu/drm/radeon/radeon_state.c
31004 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
31005 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
31006 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
31007
31008 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
31009 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
31010 sarea_priv->nbox * sizeof(depth_boxes[0])))
31011 return -EFAULT;
31012
31013 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
31014 {
31015 drm_radeon_private_t *dev_priv = dev->dev_private;
31016 drm_radeon_getparam_t *param = data;
31017 - int value;
31018 + int value = 0;
31019
31020 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
31021
31022 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
31023 index f493c64..524ab6b 100644
31024 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
31025 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
31026 @@ -843,8 +843,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
31027 }
31028 if (unlikely(ttm_vm_ops == NULL)) {
31029 ttm_vm_ops = vma->vm_ops;
31030 - radeon_ttm_vm_ops = *ttm_vm_ops;
31031 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
31032 + pax_open_kernel();
31033 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
31034 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
31035 + pax_close_kernel();
31036 }
31037 vma->vm_ops = &radeon_ttm_vm_ops;
31038 return 0;
31039 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
31040 index f2c3b9d..d5a376b 100644
31041 --- a/drivers/gpu/drm/radeon/rs690.c
31042 +++ b/drivers/gpu/drm/radeon/rs690.c
31043 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
31044 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
31045 rdev->pm.sideport_bandwidth.full)
31046 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
31047 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
31048 + read_delay_latency.full = dfixed_const(800 * 1000);
31049 read_delay_latency.full = dfixed_div(read_delay_latency,
31050 rdev->pm.igp_sideport_mclk);
31051 + a.full = dfixed_const(370);
31052 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
31053 } else {
31054 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
31055 rdev->pm.k8_bandwidth.full)
31056 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
31057 index ebc6fac..a8313ed 100644
31058 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
31059 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
31060 @@ -394,9 +394,9 @@ static int ttm_pool_get_num_unused_pages(void)
31061 static int ttm_pool_mm_shrink(struct shrinker *shrink,
31062 struct shrink_control *sc)
31063 {
31064 - static atomic_t start_pool = ATOMIC_INIT(0);
31065 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
31066 unsigned i;
31067 - unsigned pool_offset = atomic_add_return(1, &start_pool);
31068 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
31069 struct ttm_page_pool *pool;
31070 int shrink_pages = sc->nr_to_scan;
31071
31072 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
31073 index 88edacc..1e5412b 100644
31074 --- a/drivers/gpu/drm/via/via_drv.h
31075 +++ b/drivers/gpu/drm/via/via_drv.h
31076 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
31077 typedef uint32_t maskarray_t[5];
31078
31079 typedef struct drm_via_irq {
31080 - atomic_t irq_received;
31081 + atomic_unchecked_t irq_received;
31082 uint32_t pending_mask;
31083 uint32_t enable_mask;
31084 wait_queue_head_t irq_queue;
31085 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
31086 struct timeval last_vblank;
31087 int last_vblank_valid;
31088 unsigned usec_per_vblank;
31089 - atomic_t vbl_received;
31090 + atomic_unchecked_t vbl_received;
31091 drm_via_state_t hc_state;
31092 char pci_buf[VIA_PCI_BUF_SIZE];
31093 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
31094 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
31095 index d391f48..10c8ca3 100644
31096 --- a/drivers/gpu/drm/via/via_irq.c
31097 +++ b/drivers/gpu/drm/via/via_irq.c
31098 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
31099 if (crtc != 0)
31100 return 0;
31101
31102 - return atomic_read(&dev_priv->vbl_received);
31103 + return atomic_read_unchecked(&dev_priv->vbl_received);
31104 }
31105
31106 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31107 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31108
31109 status = VIA_READ(VIA_REG_INTERRUPT);
31110 if (status & VIA_IRQ_VBLANK_PENDING) {
31111 - atomic_inc(&dev_priv->vbl_received);
31112 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
31113 + atomic_inc_unchecked(&dev_priv->vbl_received);
31114 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
31115 do_gettimeofday(&cur_vblank);
31116 if (dev_priv->last_vblank_valid) {
31117 dev_priv->usec_per_vblank =
31118 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31119 dev_priv->last_vblank = cur_vblank;
31120 dev_priv->last_vblank_valid = 1;
31121 }
31122 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
31123 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
31124 DRM_DEBUG("US per vblank is: %u\n",
31125 dev_priv->usec_per_vblank);
31126 }
31127 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
31128
31129 for (i = 0; i < dev_priv->num_irqs; ++i) {
31130 if (status & cur_irq->pending_mask) {
31131 - atomic_inc(&cur_irq->irq_received);
31132 + atomic_inc_unchecked(&cur_irq->irq_received);
31133 DRM_WAKEUP(&cur_irq->irq_queue);
31134 handled = 1;
31135 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
31136 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
31137 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
31138 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
31139 masks[irq][4]));
31140 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
31141 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
31142 } else {
31143 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
31144 (((cur_irq_sequence =
31145 - atomic_read(&cur_irq->irq_received)) -
31146 + atomic_read_unchecked(&cur_irq->irq_received)) -
31147 *sequence) <= (1 << 23)));
31148 }
31149 *sequence = cur_irq_sequence;
31150 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
31151 }
31152
31153 for (i = 0; i < dev_priv->num_irqs; ++i) {
31154 - atomic_set(&cur_irq->irq_received, 0);
31155 + atomic_set_unchecked(&cur_irq->irq_received, 0);
31156 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
31157 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
31158 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
31159 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
31160 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
31161 case VIA_IRQ_RELATIVE:
31162 irqwait->request.sequence +=
31163 - atomic_read(&cur_irq->irq_received);
31164 + atomic_read_unchecked(&cur_irq->irq_received);
31165 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
31166 case VIA_IRQ_ABSOLUTE:
31167 break;
31168 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31169 index d0f2c07..9ebd9c3 100644
31170 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31171 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31172 @@ -263,7 +263,7 @@ struct vmw_private {
31173 * Fencing and IRQs.
31174 */
31175
31176 - atomic_t marker_seq;
31177 + atomic_unchecked_t marker_seq;
31178 wait_queue_head_t fence_queue;
31179 wait_queue_head_t fifo_queue;
31180 int fence_queue_waiters; /* Protected by hw_mutex */
31181 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31182 index a0c2f12..68ae6cb 100644
31183 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31184 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31185 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
31186 (unsigned int) min,
31187 (unsigned int) fifo->capabilities);
31188
31189 - atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
31190 + atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
31191 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
31192 vmw_marker_queue_init(&fifo->marker_queue);
31193 return vmw_fifo_send_fence(dev_priv, &dummy);
31194 @@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
31195 if (reserveable)
31196 iowrite32(bytes, fifo_mem +
31197 SVGA_FIFO_RESERVED);
31198 - return fifo_mem + (next_cmd >> 2);
31199 + return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
31200 } else {
31201 need_bounce = true;
31202 }
31203 @@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
31204
31205 fm = vmw_fifo_reserve(dev_priv, bytes);
31206 if (unlikely(fm == NULL)) {
31207 - *seqno = atomic_read(&dev_priv->marker_seq);
31208 + *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
31209 ret = -ENOMEM;
31210 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
31211 false, 3*HZ);
31212 @@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
31213 }
31214
31215 do {
31216 - *seqno = atomic_add_return(1, &dev_priv->marker_seq);
31217 + *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
31218 } while (*seqno == 0);
31219
31220 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
31221 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31222 index cabc95f..14b3d77 100644
31223 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31224 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31225 @@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
31226 * emitted. Then the fence is stale and signaled.
31227 */
31228
31229 - ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
31230 + ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
31231 > VMW_FENCE_WRAP);
31232
31233 return ret;
31234 @@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
31235
31236 if (fifo_idle)
31237 down_read(&fifo_state->rwsem);
31238 - signal_seq = atomic_read(&dev_priv->marker_seq);
31239 + signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
31240 ret = 0;
31241
31242 for (;;) {
31243 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31244 index 8a8725c..afed796 100644
31245 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31246 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31247 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
31248 while (!vmw_lag_lt(queue, us)) {
31249 spin_lock(&queue->lock);
31250 if (list_empty(&queue->head))
31251 - seqno = atomic_read(&dev_priv->marker_seq);
31252 + seqno = atomic_read_unchecked(&dev_priv->marker_seq);
31253 else {
31254 marker = list_first_entry(&queue->head,
31255 struct vmw_marker, head);
31256 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
31257 index 054677b..741672a 100644
31258 --- a/drivers/hid/hid-core.c
31259 +++ b/drivers/hid/hid-core.c
31260 @@ -2070,7 +2070,7 @@ static bool hid_ignore(struct hid_device *hdev)
31261
31262 int hid_add_device(struct hid_device *hdev)
31263 {
31264 - static atomic_t id = ATOMIC_INIT(0);
31265 + static atomic_unchecked_t id = ATOMIC_INIT(0);
31266 int ret;
31267
31268 if (WARN_ON(hdev->status & HID_STAT_ADDED))
31269 @@ -2085,7 +2085,7 @@ int hid_add_device(struct hid_device *hdev)
31270 /* XXX hack, any other cleaner solution after the driver core
31271 * is converted to allow more than 20 bytes as the device name? */
31272 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
31273 - hdev->vendor, hdev->product, atomic_inc_return(&id));
31274 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
31275
31276 hid_debug_register(hdev, dev_name(&hdev->dev));
31277 ret = device_add(&hdev->dev);
31278 diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
31279 index eec3291..8ed706b 100644
31280 --- a/drivers/hid/hid-wiimote-debug.c
31281 +++ b/drivers/hid/hid-wiimote-debug.c
31282 @@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
31283 else if (size == 0)
31284 return -EIO;
31285
31286 - if (copy_to_user(u, buf, size))
31287 + if (size > sizeof(buf) || copy_to_user(u, buf, size))
31288 return -EFAULT;
31289
31290 *off += size;
31291 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
31292 index b1ec0e2..c295a61 100644
31293 --- a/drivers/hid/usbhid/hiddev.c
31294 +++ b/drivers/hid/usbhid/hiddev.c
31295 @@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
31296 break;
31297
31298 case HIDIOCAPPLICATION:
31299 - if (arg < 0 || arg >= hid->maxapplication)
31300 + if (arg >= hid->maxapplication)
31301 break;
31302
31303 for (i = 0; i < hid->maxcollection; i++)
31304 diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
31305 index 4065374..10ed7dc 100644
31306 --- a/drivers/hv/channel.c
31307 +++ b/drivers/hv/channel.c
31308 @@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
31309 int ret = 0;
31310 int t;
31311
31312 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
31313 - atomic_inc(&vmbus_connection.next_gpadl_handle);
31314 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
31315 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
31316
31317 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
31318 if (ret)
31319 diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
31320 index 15956bd..ea34398 100644
31321 --- a/drivers/hv/hv.c
31322 +++ b/drivers/hv/hv.c
31323 @@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
31324 u64 output_address = (output) ? virt_to_phys(output) : 0;
31325 u32 output_address_hi = output_address >> 32;
31326 u32 output_address_lo = output_address & 0xFFFFFFFF;
31327 - void *hypercall_page = hv_context.hypercall_page;
31328 + void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
31329
31330 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
31331 "=a"(hv_status_lo) : "d" (control_hi),
31332 diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
31333 index 699f0d8..f4f19250 100644
31334 --- a/drivers/hv/hyperv_vmbus.h
31335 +++ b/drivers/hv/hyperv_vmbus.h
31336 @@ -555,7 +555,7 @@ enum vmbus_connect_state {
31337 struct vmbus_connection {
31338 enum vmbus_connect_state conn_state;
31339
31340 - atomic_t next_gpadl_handle;
31341 + atomic_unchecked_t next_gpadl_handle;
31342
31343 /*
31344 * Represents channel interrupts. Each bit position represents a
31345 diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
31346 index a220e57..428f54d 100644
31347 --- a/drivers/hv/vmbus_drv.c
31348 +++ b/drivers/hv/vmbus_drv.c
31349 @@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
31350 {
31351 int ret = 0;
31352
31353 - static atomic_t device_num = ATOMIC_INIT(0);
31354 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
31355
31356 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
31357 - atomic_inc_return(&device_num));
31358 + atomic_inc_return_unchecked(&device_num));
31359
31360 child_device_obj->device.bus = &hv_bus;
31361 child_device_obj->device.parent = &hv_acpi_dev->dev;
31362 diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
31363 index 9140236..ceaef4e 100644
31364 --- a/drivers/hwmon/acpi_power_meter.c
31365 +++ b/drivers/hwmon/acpi_power_meter.c
31366 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
31367 return res;
31368
31369 temp /= 1000;
31370 - if (temp < 0)
31371 - return -EINVAL;
31372
31373 mutex_lock(&resource->lock);
31374 resource->trip[attr->index - 7] = temp;
31375 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
31376 index 8b011d0..3de24a1 100644
31377 --- a/drivers/hwmon/sht15.c
31378 +++ b/drivers/hwmon/sht15.c
31379 @@ -166,7 +166,7 @@ struct sht15_data {
31380 int supply_uV;
31381 bool supply_uV_valid;
31382 struct work_struct update_supply_work;
31383 - atomic_t interrupt_handled;
31384 + atomic_unchecked_t interrupt_handled;
31385 };
31386
31387 /**
31388 @@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
31389 return ret;
31390
31391 gpio_direction_input(data->pdata->gpio_data);
31392 - atomic_set(&data->interrupt_handled, 0);
31393 + atomic_set_unchecked(&data->interrupt_handled, 0);
31394
31395 enable_irq(gpio_to_irq(data->pdata->gpio_data));
31396 if (gpio_get_value(data->pdata->gpio_data) == 0) {
31397 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
31398 /* Only relevant if the interrupt hasn't occurred. */
31399 - if (!atomic_read(&data->interrupt_handled))
31400 + if (!atomic_read_unchecked(&data->interrupt_handled))
31401 schedule_work(&data->read_work);
31402 }
31403 ret = wait_event_timeout(data->wait_queue,
31404 @@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
31405
31406 /* First disable the interrupt */
31407 disable_irq_nosync(irq);
31408 - atomic_inc(&data->interrupt_handled);
31409 + atomic_inc_unchecked(&data->interrupt_handled);
31410 /* Then schedule a reading work struct */
31411 if (data->state != SHT15_READING_NOTHING)
31412 schedule_work(&data->read_work);
31413 @@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
31414 * If not, then start the interrupt again - care here as could
31415 * have gone low in meantime so verify it hasn't!
31416 */
31417 - atomic_set(&data->interrupt_handled, 0);
31418 + atomic_set_unchecked(&data->interrupt_handled, 0);
31419 enable_irq(gpio_to_irq(data->pdata->gpio_data));
31420 /* If still not occurred or another handler was scheduled */
31421 if (gpio_get_value(data->pdata->gpio_data)
31422 - || atomic_read(&data->interrupt_handled))
31423 + || atomic_read_unchecked(&data->interrupt_handled))
31424 return;
31425 }
31426
31427 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
31428 index 378fcb5..5e91fa8 100644
31429 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
31430 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
31431 @@ -43,7 +43,7 @@
31432 extern struct i2c_adapter amd756_smbus;
31433
31434 static struct i2c_adapter *s4882_adapter;
31435 -static struct i2c_algorithm *s4882_algo;
31436 +static i2c_algorithm_no_const *s4882_algo;
31437
31438 /* Wrapper access functions for multiplexed SMBus */
31439 static DEFINE_MUTEX(amd756_lock);
31440 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
31441 index 29015eb..af2d8e9 100644
31442 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
31443 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
31444 @@ -41,7 +41,7 @@
31445 extern struct i2c_adapter *nforce2_smbus;
31446
31447 static struct i2c_adapter *s4985_adapter;
31448 -static struct i2c_algorithm *s4985_algo;
31449 +static i2c_algorithm_no_const *s4985_algo;
31450
31451 /* Wrapper access functions for multiplexed SMBus */
31452 static DEFINE_MUTEX(nforce2_lock);
31453 diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
31454 index d7a4833..7fae376 100644
31455 --- a/drivers/i2c/i2c-mux.c
31456 +++ b/drivers/i2c/i2c-mux.c
31457 @@ -28,7 +28,7 @@
31458 /* multiplexer per channel data */
31459 struct i2c_mux_priv {
31460 struct i2c_adapter adap;
31461 - struct i2c_algorithm algo;
31462 + i2c_algorithm_no_const algo;
31463
31464 struct i2c_adapter *parent;
31465 void *mux_dev; /* the mux chip/device */
31466 diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
31467 index 57d00ca..0145194 100644
31468 --- a/drivers/ide/aec62xx.c
31469 +++ b/drivers/ide/aec62xx.c
31470 @@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
31471 .cable_detect = atp86x_cable_detect,
31472 };
31473
31474 -static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
31475 +static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
31476 { /* 0: AEC6210 */
31477 .name = DRV_NAME,
31478 .init_chipset = init_chipset_aec62xx,
31479 diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
31480 index 2c8016a..911a27c 100644
31481 --- a/drivers/ide/alim15x3.c
31482 +++ b/drivers/ide/alim15x3.c
31483 @@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
31484 .dma_sff_read_status = ide_dma_sff_read_status,
31485 };
31486
31487 -static const struct ide_port_info ali15x3_chipset __devinitdata = {
31488 +static const struct ide_port_info ali15x3_chipset __devinitconst = {
31489 .name = DRV_NAME,
31490 .init_chipset = init_chipset_ali15x3,
31491 .init_hwif = init_hwif_ali15x3,
31492 diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
31493 index 3747b25..56fc995 100644
31494 --- a/drivers/ide/amd74xx.c
31495 +++ b/drivers/ide/amd74xx.c
31496 @@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
31497 .udma_mask = udma, \
31498 }
31499
31500 -static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
31501 +static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
31502 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
31503 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
31504 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
31505 diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
31506 index 15f0ead..cb43480 100644
31507 --- a/drivers/ide/atiixp.c
31508 +++ b/drivers/ide/atiixp.c
31509 @@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
31510 .cable_detect = atiixp_cable_detect,
31511 };
31512
31513 -static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
31514 +static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
31515 { /* 0: IXP200/300/400/700 */
31516 .name = DRV_NAME,
31517 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
31518 diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
31519 index 5f80312..d1fc438 100644
31520 --- a/drivers/ide/cmd64x.c
31521 +++ b/drivers/ide/cmd64x.c
31522 @@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
31523 .dma_sff_read_status = ide_dma_sff_read_status,
31524 };
31525
31526 -static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
31527 +static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
31528 { /* 0: CMD643 */
31529 .name = DRV_NAME,
31530 .init_chipset = init_chipset_cmd64x,
31531 diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
31532 index 2c1e5f7..1444762 100644
31533 --- a/drivers/ide/cs5520.c
31534 +++ b/drivers/ide/cs5520.c
31535 @@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
31536 .set_dma_mode = cs5520_set_dma_mode,
31537 };
31538
31539 -static const struct ide_port_info cyrix_chipset __devinitdata = {
31540 +static const struct ide_port_info cyrix_chipset __devinitconst = {
31541 .name = DRV_NAME,
31542 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
31543 .port_ops = &cs5520_port_ops,
31544 diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
31545 index 4dc4eb9..49b40ad 100644
31546 --- a/drivers/ide/cs5530.c
31547 +++ b/drivers/ide/cs5530.c
31548 @@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
31549 .udma_filter = cs5530_udma_filter,
31550 };
31551
31552 -static const struct ide_port_info cs5530_chipset __devinitdata = {
31553 +static const struct ide_port_info cs5530_chipset __devinitconst = {
31554 .name = DRV_NAME,
31555 .init_chipset = init_chipset_cs5530,
31556 .init_hwif = init_hwif_cs5530,
31557 diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
31558 index 5059faf..18d4c85 100644
31559 --- a/drivers/ide/cs5535.c
31560 +++ b/drivers/ide/cs5535.c
31561 @@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
31562 .cable_detect = cs5535_cable_detect,
31563 };
31564
31565 -static const struct ide_port_info cs5535_chipset __devinitdata = {
31566 +static const struct ide_port_info cs5535_chipset __devinitconst = {
31567 .name = DRV_NAME,
31568 .port_ops = &cs5535_port_ops,
31569 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
31570 diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
31571 index 847553f..3ffb49d 100644
31572 --- a/drivers/ide/cy82c693.c
31573 +++ b/drivers/ide/cy82c693.c
31574 @@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
31575 .set_dma_mode = cy82c693_set_dma_mode,
31576 };
31577
31578 -static const struct ide_port_info cy82c693_chipset __devinitdata = {
31579 +static const struct ide_port_info cy82c693_chipset __devinitconst = {
31580 .name = DRV_NAME,
31581 .init_iops = init_iops_cy82c693,
31582 .port_ops = &cy82c693_port_ops,
31583 diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
31584 index 58c51cd..4aec3b8 100644
31585 --- a/drivers/ide/hpt366.c
31586 +++ b/drivers/ide/hpt366.c
31587 @@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
31588 }
31589 };
31590
31591 -static const struct hpt_info hpt36x __devinitdata = {
31592 +static const struct hpt_info hpt36x __devinitconst = {
31593 .chip_name = "HPT36x",
31594 .chip_type = HPT36x,
31595 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
31596 @@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
31597 .timings = &hpt36x_timings
31598 };
31599
31600 -static const struct hpt_info hpt370 __devinitdata = {
31601 +static const struct hpt_info hpt370 __devinitconst = {
31602 .chip_name = "HPT370",
31603 .chip_type = HPT370,
31604 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
31605 @@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
31606 .timings = &hpt37x_timings
31607 };
31608
31609 -static const struct hpt_info hpt370a __devinitdata = {
31610 +static const struct hpt_info hpt370a __devinitconst = {
31611 .chip_name = "HPT370A",
31612 .chip_type = HPT370A,
31613 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
31614 @@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
31615 .timings = &hpt37x_timings
31616 };
31617
31618 -static const struct hpt_info hpt374 __devinitdata = {
31619 +static const struct hpt_info hpt374 __devinitconst = {
31620 .chip_name = "HPT374",
31621 .chip_type = HPT374,
31622 .udma_mask = ATA_UDMA5,
31623 @@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
31624 .timings = &hpt37x_timings
31625 };
31626
31627 -static const struct hpt_info hpt372 __devinitdata = {
31628 +static const struct hpt_info hpt372 __devinitconst = {
31629 .chip_name = "HPT372",
31630 .chip_type = HPT372,
31631 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31632 @@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
31633 .timings = &hpt37x_timings
31634 };
31635
31636 -static const struct hpt_info hpt372a __devinitdata = {
31637 +static const struct hpt_info hpt372a __devinitconst = {
31638 .chip_name = "HPT372A",
31639 .chip_type = HPT372A,
31640 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31641 @@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
31642 .timings = &hpt37x_timings
31643 };
31644
31645 -static const struct hpt_info hpt302 __devinitdata = {
31646 +static const struct hpt_info hpt302 __devinitconst = {
31647 .chip_name = "HPT302",
31648 .chip_type = HPT302,
31649 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31650 @@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
31651 .timings = &hpt37x_timings
31652 };
31653
31654 -static const struct hpt_info hpt371 __devinitdata = {
31655 +static const struct hpt_info hpt371 __devinitconst = {
31656 .chip_name = "HPT371",
31657 .chip_type = HPT371,
31658 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31659 @@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
31660 .timings = &hpt37x_timings
31661 };
31662
31663 -static const struct hpt_info hpt372n __devinitdata = {
31664 +static const struct hpt_info hpt372n __devinitconst = {
31665 .chip_name = "HPT372N",
31666 .chip_type = HPT372N,
31667 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31668 @@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
31669 .timings = &hpt37x_timings
31670 };
31671
31672 -static const struct hpt_info hpt302n __devinitdata = {
31673 +static const struct hpt_info hpt302n __devinitconst = {
31674 .chip_name = "HPT302N",
31675 .chip_type = HPT302N,
31676 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31677 @@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
31678 .timings = &hpt37x_timings
31679 };
31680
31681 -static const struct hpt_info hpt371n __devinitdata = {
31682 +static const struct hpt_info hpt371n __devinitconst = {
31683 .chip_name = "HPT371N",
31684 .chip_type = HPT371N,
31685 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
31686 @@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
31687 .dma_sff_read_status = ide_dma_sff_read_status,
31688 };
31689
31690 -static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
31691 +static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
31692 { /* 0: HPT36x */
31693 .name = DRV_NAME,
31694 .init_chipset = init_chipset_hpt366,
31695 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
31696 index 8126824..55a2798 100644
31697 --- a/drivers/ide/ide-cd.c
31698 +++ b/drivers/ide/ide-cd.c
31699 @@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
31700 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
31701 if ((unsigned long)buf & alignment
31702 || blk_rq_bytes(rq) & q->dma_pad_mask
31703 - || object_is_on_stack(buf))
31704 + || object_starts_on_stack(buf))
31705 drive->dma = 0;
31706 }
31707 }
31708 diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
31709 index 7f56b73..dab5b67 100644
31710 --- a/drivers/ide/ide-pci-generic.c
31711 +++ b/drivers/ide/ide-pci-generic.c
31712 @@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
31713 .udma_mask = ATA_UDMA6, \
31714 }
31715
31716 -static const struct ide_port_info generic_chipsets[] __devinitdata = {
31717 +static const struct ide_port_info generic_chipsets[] __devinitconst = {
31718 /* 0: Unknown */
31719 DECLARE_GENERIC_PCI_DEV(0),
31720
31721 diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
31722 index 560e66d..d5dd180 100644
31723 --- a/drivers/ide/it8172.c
31724 +++ b/drivers/ide/it8172.c
31725 @@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
31726 .set_dma_mode = it8172_set_dma_mode,
31727 };
31728
31729 -static const struct ide_port_info it8172_port_info __devinitdata = {
31730 +static const struct ide_port_info it8172_port_info __devinitconst = {
31731 .name = DRV_NAME,
31732 .port_ops = &it8172_port_ops,
31733 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
31734 diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
31735 index 46816ba..1847aeb 100644
31736 --- a/drivers/ide/it8213.c
31737 +++ b/drivers/ide/it8213.c
31738 @@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
31739 .cable_detect = it8213_cable_detect,
31740 };
31741
31742 -static const struct ide_port_info it8213_chipset __devinitdata = {
31743 +static const struct ide_port_info it8213_chipset __devinitconst = {
31744 .name = DRV_NAME,
31745 .enablebits = { {0x41, 0x80, 0x80} },
31746 .port_ops = &it8213_port_ops,
31747 diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
31748 index 2e3169f..c5611db 100644
31749 --- a/drivers/ide/it821x.c
31750 +++ b/drivers/ide/it821x.c
31751 @@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
31752 .cable_detect = it821x_cable_detect,
31753 };
31754
31755 -static const struct ide_port_info it821x_chipset __devinitdata = {
31756 +static const struct ide_port_info it821x_chipset __devinitconst = {
31757 .name = DRV_NAME,
31758 .init_chipset = init_chipset_it821x,
31759 .init_hwif = init_hwif_it821x,
31760 diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
31761 index 74c2c4a..efddd7d 100644
31762 --- a/drivers/ide/jmicron.c
31763 +++ b/drivers/ide/jmicron.c
31764 @@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
31765 .cable_detect = jmicron_cable_detect,
31766 };
31767
31768 -static const struct ide_port_info jmicron_chipset __devinitdata = {
31769 +static const struct ide_port_info jmicron_chipset __devinitconst = {
31770 .name = DRV_NAME,
31771 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
31772 .port_ops = &jmicron_port_ops,
31773 diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
31774 index 95327a2..73f78d8 100644
31775 --- a/drivers/ide/ns87415.c
31776 +++ b/drivers/ide/ns87415.c
31777 @@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
31778 .dma_sff_read_status = superio_dma_sff_read_status,
31779 };
31780
31781 -static const struct ide_port_info ns87415_chipset __devinitdata = {
31782 +static const struct ide_port_info ns87415_chipset __devinitconst = {
31783 .name = DRV_NAME,
31784 .init_hwif = init_hwif_ns87415,
31785 .tp_ops = &ns87415_tp_ops,
31786 diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
31787 index 1a53a4c..39edc66 100644
31788 --- a/drivers/ide/opti621.c
31789 +++ b/drivers/ide/opti621.c
31790 @@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
31791 .set_pio_mode = opti621_set_pio_mode,
31792 };
31793
31794 -static const struct ide_port_info opti621_chipset __devinitdata = {
31795 +static const struct ide_port_info opti621_chipset __devinitconst = {
31796 .name = DRV_NAME,
31797 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
31798 .port_ops = &opti621_port_ops,
31799 diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
31800 index 9546fe2..2e5ceb6 100644
31801 --- a/drivers/ide/pdc202xx_new.c
31802 +++ b/drivers/ide/pdc202xx_new.c
31803 @@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
31804 .udma_mask = udma, \
31805 }
31806
31807 -static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
31808 +static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
31809 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
31810 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
31811 };
31812 diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
31813 index 3a35ec6..5634510 100644
31814 --- a/drivers/ide/pdc202xx_old.c
31815 +++ b/drivers/ide/pdc202xx_old.c
31816 @@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
31817 .max_sectors = sectors, \
31818 }
31819
31820 -static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
31821 +static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
31822 { /* 0: PDC20246 */
31823 .name = DRV_NAME,
31824 .init_chipset = init_chipset_pdc202xx,
31825 diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
31826 index 1892e81..fe0fd60 100644
31827 --- a/drivers/ide/piix.c
31828 +++ b/drivers/ide/piix.c
31829 @@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
31830 .udma_mask = udma, \
31831 }
31832
31833 -static const struct ide_port_info piix_pci_info[] __devinitdata = {
31834 +static const struct ide_port_info piix_pci_info[] __devinitconst = {
31835 /* 0: MPIIX */
31836 { /*
31837 * MPIIX actually has only a single IDE channel mapped to
31838 diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
31839 index a6414a8..c04173e 100644
31840 --- a/drivers/ide/rz1000.c
31841 +++ b/drivers/ide/rz1000.c
31842 @@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
31843 }
31844 }
31845
31846 -static const struct ide_port_info rz1000_chipset __devinitdata = {
31847 +static const struct ide_port_info rz1000_chipset __devinitconst = {
31848 .name = DRV_NAME,
31849 .host_flags = IDE_HFLAG_NO_DMA,
31850 };
31851 diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
31852 index 356b9b5..d4758eb 100644
31853 --- a/drivers/ide/sc1200.c
31854 +++ b/drivers/ide/sc1200.c
31855 @@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
31856 .dma_sff_read_status = ide_dma_sff_read_status,
31857 };
31858
31859 -static const struct ide_port_info sc1200_chipset __devinitdata = {
31860 +static const struct ide_port_info sc1200_chipset __devinitconst = {
31861 .name = DRV_NAME,
31862 .port_ops = &sc1200_port_ops,
31863 .dma_ops = &sc1200_dma_ops,
31864 diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
31865 index b7f5b0c..9701038 100644
31866 --- a/drivers/ide/scc_pata.c
31867 +++ b/drivers/ide/scc_pata.c
31868 @@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
31869 .dma_sff_read_status = scc_dma_sff_read_status,
31870 };
31871
31872 -static const struct ide_port_info scc_chipset __devinitdata = {
31873 +static const struct ide_port_info scc_chipset __devinitconst = {
31874 .name = "sccIDE",
31875 .init_iops = init_iops_scc,
31876 .init_dma = scc_init_dma,
31877 diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
31878 index 35fb8da..24d72ef 100644
31879 --- a/drivers/ide/serverworks.c
31880 +++ b/drivers/ide/serverworks.c
31881 @@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
31882 .cable_detect = svwks_cable_detect,
31883 };
31884
31885 -static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
31886 +static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
31887 { /* 0: OSB4 */
31888 .name = DRV_NAME,
31889 .init_chipset = init_chipset_svwks,
31890 diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
31891 index ddeda44..46f7e30 100644
31892 --- a/drivers/ide/siimage.c
31893 +++ b/drivers/ide/siimage.c
31894 @@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
31895 .udma_mask = ATA_UDMA6, \
31896 }
31897
31898 -static const struct ide_port_info siimage_chipsets[] __devinitdata = {
31899 +static const struct ide_port_info siimage_chipsets[] __devinitconst = {
31900 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
31901 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
31902 };
31903 diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
31904 index 4a00225..09e61b4 100644
31905 --- a/drivers/ide/sis5513.c
31906 +++ b/drivers/ide/sis5513.c
31907 @@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
31908 .cable_detect = sis_cable_detect,
31909 };
31910
31911 -static const struct ide_port_info sis5513_chipset __devinitdata = {
31912 +static const struct ide_port_info sis5513_chipset __devinitconst = {
31913 .name = DRV_NAME,
31914 .init_chipset = init_chipset_sis5513,
31915 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
31916 diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
31917 index f21dc2a..d051cd2 100644
31918 --- a/drivers/ide/sl82c105.c
31919 +++ b/drivers/ide/sl82c105.c
31920 @@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
31921 .dma_sff_read_status = ide_dma_sff_read_status,
31922 };
31923
31924 -static const struct ide_port_info sl82c105_chipset __devinitdata = {
31925 +static const struct ide_port_info sl82c105_chipset __devinitconst = {
31926 .name = DRV_NAME,
31927 .init_chipset = init_chipset_sl82c105,
31928 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
31929 diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
31930 index 864ffe0..863a5e9 100644
31931 --- a/drivers/ide/slc90e66.c
31932 +++ b/drivers/ide/slc90e66.c
31933 @@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
31934 .cable_detect = slc90e66_cable_detect,
31935 };
31936
31937 -static const struct ide_port_info slc90e66_chipset __devinitdata = {
31938 +static const struct ide_port_info slc90e66_chipset __devinitconst = {
31939 .name = DRV_NAME,
31940 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
31941 .port_ops = &slc90e66_port_ops,
31942 diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
31943 index 4799d5c..1794678 100644
31944 --- a/drivers/ide/tc86c001.c
31945 +++ b/drivers/ide/tc86c001.c
31946 @@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
31947 .dma_sff_read_status = ide_dma_sff_read_status,
31948 };
31949
31950 -static const struct ide_port_info tc86c001_chipset __devinitdata = {
31951 +static const struct ide_port_info tc86c001_chipset __devinitconst = {
31952 .name = DRV_NAME,
31953 .init_hwif = init_hwif_tc86c001,
31954 .port_ops = &tc86c001_port_ops,
31955 diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
31956 index 281c914..55ce1b8 100644
31957 --- a/drivers/ide/triflex.c
31958 +++ b/drivers/ide/triflex.c
31959 @@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
31960 .set_dma_mode = triflex_set_mode,
31961 };
31962
31963 -static const struct ide_port_info triflex_device __devinitdata = {
31964 +static const struct ide_port_info triflex_device __devinitconst = {
31965 .name = DRV_NAME,
31966 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
31967 .port_ops = &triflex_port_ops,
31968 diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
31969 index 4b42ca0..e494a98 100644
31970 --- a/drivers/ide/trm290.c
31971 +++ b/drivers/ide/trm290.c
31972 @@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
31973 .dma_check = trm290_dma_check,
31974 };
31975
31976 -static const struct ide_port_info trm290_chipset __devinitdata = {
31977 +static const struct ide_port_info trm290_chipset __devinitconst = {
31978 .name = DRV_NAME,
31979 .init_hwif = init_hwif_trm290,
31980 .tp_ops = &trm290_tp_ops,
31981 diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
31982 index f46f49c..eb77678 100644
31983 --- a/drivers/ide/via82cxxx.c
31984 +++ b/drivers/ide/via82cxxx.c
31985 @@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
31986 .cable_detect = via82cxxx_cable_detect,
31987 };
31988
31989 -static const struct ide_port_info via82cxxx_chipset __devinitdata = {
31990 +static const struct ide_port_info via82cxxx_chipset __devinitconst = {
31991 .name = DRV_NAME,
31992 .init_chipset = init_chipset_via82cxxx,
31993 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
31994 diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
31995 index 73d4531..c90cd2d 100644
31996 --- a/drivers/ieee802154/fakehard.c
31997 +++ b/drivers/ieee802154/fakehard.c
31998 @@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
31999 phy->transmit_power = 0xbf;
32000
32001 dev->netdev_ops = &fake_ops;
32002 - dev->ml_priv = &fake_mlme;
32003 + dev->ml_priv = (void *)&fake_mlme;
32004
32005 priv = netdev_priv(dev);
32006 priv->phy = phy;
32007 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
32008 index c889aae..6cf5aa7 100644
32009 --- a/drivers/infiniband/core/cm.c
32010 +++ b/drivers/infiniband/core/cm.c
32011 @@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
32012
32013 struct cm_counter_group {
32014 struct kobject obj;
32015 - atomic_long_t counter[CM_ATTR_COUNT];
32016 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
32017 };
32018
32019 struct cm_counter_attribute {
32020 @@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
32021 struct ib_mad_send_buf *msg = NULL;
32022 int ret;
32023
32024 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32025 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32026 counter[CM_REQ_COUNTER]);
32027
32028 /* Quick state check to discard duplicate REQs. */
32029 @@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
32030 if (!cm_id_priv)
32031 return;
32032
32033 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32034 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32035 counter[CM_REP_COUNTER]);
32036 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
32037 if (ret)
32038 @@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
32039 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
32040 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
32041 spin_unlock_irq(&cm_id_priv->lock);
32042 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32043 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32044 counter[CM_RTU_COUNTER]);
32045 goto out;
32046 }
32047 @@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
32048 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
32049 dreq_msg->local_comm_id);
32050 if (!cm_id_priv) {
32051 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32052 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32053 counter[CM_DREQ_COUNTER]);
32054 cm_issue_drep(work->port, work->mad_recv_wc);
32055 return -EINVAL;
32056 @@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
32057 case IB_CM_MRA_REP_RCVD:
32058 break;
32059 case IB_CM_TIMEWAIT:
32060 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32061 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32062 counter[CM_DREQ_COUNTER]);
32063 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
32064 goto unlock;
32065 @@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
32066 cm_free_msg(msg);
32067 goto deref;
32068 case IB_CM_DREQ_RCVD:
32069 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32070 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32071 counter[CM_DREQ_COUNTER]);
32072 goto unlock;
32073 default:
32074 @@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
32075 ib_modify_mad(cm_id_priv->av.port->mad_agent,
32076 cm_id_priv->msg, timeout)) {
32077 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
32078 - atomic_long_inc(&work->port->
32079 + atomic_long_inc_unchecked(&work->port->
32080 counter_group[CM_RECV_DUPLICATES].
32081 counter[CM_MRA_COUNTER]);
32082 goto out;
32083 @@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
32084 break;
32085 case IB_CM_MRA_REQ_RCVD:
32086 case IB_CM_MRA_REP_RCVD:
32087 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32088 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32089 counter[CM_MRA_COUNTER]);
32090 /* fall through */
32091 default:
32092 @@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
32093 case IB_CM_LAP_IDLE:
32094 break;
32095 case IB_CM_MRA_LAP_SENT:
32096 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32097 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32098 counter[CM_LAP_COUNTER]);
32099 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
32100 goto unlock;
32101 @@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
32102 cm_free_msg(msg);
32103 goto deref;
32104 case IB_CM_LAP_RCVD:
32105 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32106 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32107 counter[CM_LAP_COUNTER]);
32108 goto unlock;
32109 default:
32110 @@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
32111 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
32112 if (cur_cm_id_priv) {
32113 spin_unlock_irq(&cm.lock);
32114 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32115 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32116 counter[CM_SIDR_REQ_COUNTER]);
32117 goto out; /* Duplicate message. */
32118 }
32119 @@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
32120 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
32121 msg->retries = 1;
32122
32123 - atomic_long_add(1 + msg->retries,
32124 + atomic_long_add_unchecked(1 + msg->retries,
32125 &port->counter_group[CM_XMIT].counter[attr_index]);
32126 if (msg->retries)
32127 - atomic_long_add(msg->retries,
32128 + atomic_long_add_unchecked(msg->retries,
32129 &port->counter_group[CM_XMIT_RETRIES].
32130 counter[attr_index]);
32131
32132 @@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
32133 }
32134
32135 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
32136 - atomic_long_inc(&port->counter_group[CM_RECV].
32137 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
32138 counter[attr_id - CM_ATTR_ID_OFFSET]);
32139
32140 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
32141 @@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
32142 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
32143
32144 return sprintf(buf, "%ld\n",
32145 - atomic_long_read(&group->counter[cm_attr->index]));
32146 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
32147 }
32148
32149 static const struct sysfs_ops cm_counter_ops = {
32150 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
32151 index 176c8f9..2627b62 100644
32152 --- a/drivers/infiniband/core/fmr_pool.c
32153 +++ b/drivers/infiniband/core/fmr_pool.c
32154 @@ -98,8 +98,8 @@ struct ib_fmr_pool {
32155
32156 struct task_struct *thread;
32157
32158 - atomic_t req_ser;
32159 - atomic_t flush_ser;
32160 + atomic_unchecked_t req_ser;
32161 + atomic_unchecked_t flush_ser;
32162
32163 wait_queue_head_t force_wait;
32164 };
32165 @@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
32166 struct ib_fmr_pool *pool = pool_ptr;
32167
32168 do {
32169 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
32170 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
32171 ib_fmr_batch_release(pool);
32172
32173 - atomic_inc(&pool->flush_ser);
32174 + atomic_inc_unchecked(&pool->flush_ser);
32175 wake_up_interruptible(&pool->force_wait);
32176
32177 if (pool->flush_function)
32178 @@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
32179 }
32180
32181 set_current_state(TASK_INTERRUPTIBLE);
32182 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
32183 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
32184 !kthread_should_stop())
32185 schedule();
32186 __set_current_state(TASK_RUNNING);
32187 @@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
32188 pool->dirty_watermark = params->dirty_watermark;
32189 pool->dirty_len = 0;
32190 spin_lock_init(&pool->pool_lock);
32191 - atomic_set(&pool->req_ser, 0);
32192 - atomic_set(&pool->flush_ser, 0);
32193 + atomic_set_unchecked(&pool->req_ser, 0);
32194 + atomic_set_unchecked(&pool->flush_ser, 0);
32195 init_waitqueue_head(&pool->force_wait);
32196
32197 pool->thread = kthread_run(ib_fmr_cleanup_thread,
32198 @@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
32199 }
32200 spin_unlock_irq(&pool->pool_lock);
32201
32202 - serial = atomic_inc_return(&pool->req_ser);
32203 + serial = atomic_inc_return_unchecked(&pool->req_ser);
32204 wake_up_process(pool->thread);
32205
32206 if (wait_event_interruptible(pool->force_wait,
32207 - atomic_read(&pool->flush_ser) - serial >= 0))
32208 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
32209 return -EINTR;
32210
32211 return 0;
32212 @@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
32213 } else {
32214 list_add_tail(&fmr->list, &pool->dirty_list);
32215 if (++pool->dirty_len >= pool->dirty_watermark) {
32216 - atomic_inc(&pool->req_ser);
32217 + atomic_inc_unchecked(&pool->req_ser);
32218 wake_up_process(pool->thread);
32219 }
32220 }
32221 diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
32222 index 40c8353..946b0e4 100644
32223 --- a/drivers/infiniband/hw/cxgb4/mem.c
32224 +++ b/drivers/infiniband/hw/cxgb4/mem.c
32225 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
32226 int err;
32227 struct fw_ri_tpte tpt;
32228 u32 stag_idx;
32229 - static atomic_t key;
32230 + static atomic_unchecked_t key;
32231
32232 if (c4iw_fatal_error(rdev))
32233 return -EIO;
32234 @@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
32235 &rdev->resource.tpt_fifo_lock);
32236 if (!stag_idx)
32237 return -ENOMEM;
32238 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
32239 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
32240 }
32241 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
32242 __func__, stag_state, type, pdid, stag_idx);
32243 diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
32244 index 79b3dbc..96e5fcc 100644
32245 --- a/drivers/infiniband/hw/ipath/ipath_rc.c
32246 +++ b/drivers/infiniband/hw/ipath/ipath_rc.c
32247 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
32248 struct ib_atomic_eth *ateth;
32249 struct ipath_ack_entry *e;
32250 u64 vaddr;
32251 - atomic64_t *maddr;
32252 + atomic64_unchecked_t *maddr;
32253 u64 sdata;
32254 u32 rkey;
32255 u8 next;
32256 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
32257 IB_ACCESS_REMOTE_ATOMIC)))
32258 goto nack_acc_unlck;
32259 /* Perform atomic OP and save result. */
32260 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
32261 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
32262 sdata = be64_to_cpu(ateth->swap_data);
32263 e = &qp->s_ack_queue[qp->r_head_ack_queue];
32264 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
32265 - (u64) atomic64_add_return(sdata, maddr) - sdata :
32266 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
32267 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
32268 be64_to_cpu(ateth->compare_data),
32269 sdata);
32270 diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
32271 index 1f95bba..9530f87 100644
32272 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c
32273 +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
32274 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
32275 unsigned long flags;
32276 struct ib_wc wc;
32277 u64 sdata;
32278 - atomic64_t *maddr;
32279 + atomic64_unchecked_t *maddr;
32280 enum ib_wc_status send_status;
32281
32282 /*
32283 @@ -382,11 +382,11 @@ again:
32284 IB_ACCESS_REMOTE_ATOMIC)))
32285 goto acc_err;
32286 /* Perform atomic OP and save result. */
32287 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
32288 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
32289 sdata = wqe->wr.wr.atomic.compare_add;
32290 *(u64 *) sqp->s_sge.sge.vaddr =
32291 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
32292 - (u64) atomic64_add_return(sdata, maddr) - sdata :
32293 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
32294 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
32295 sdata, wqe->wr.wr.atomic.swap);
32296 goto send_comp;
32297 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
32298 index 7140199..da60063 100644
32299 --- a/drivers/infiniband/hw/nes/nes.c
32300 +++ b/drivers/infiniband/hw/nes/nes.c
32301 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
32302 LIST_HEAD(nes_adapter_list);
32303 static LIST_HEAD(nes_dev_list);
32304
32305 -atomic_t qps_destroyed;
32306 +atomic_unchecked_t qps_destroyed;
32307
32308 static unsigned int ee_flsh_adapter;
32309 static unsigned int sysfs_nonidx_addr;
32310 @@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
32311 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
32312 struct nes_adapter *nesadapter = nesdev->nesadapter;
32313
32314 - atomic_inc(&qps_destroyed);
32315 + atomic_inc_unchecked(&qps_destroyed);
32316
32317 /* Free the control structures */
32318
32319 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
32320 index c438e46..ca30356 100644
32321 --- a/drivers/infiniband/hw/nes/nes.h
32322 +++ b/drivers/infiniband/hw/nes/nes.h
32323 @@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
32324 extern unsigned int wqm_quanta;
32325 extern struct list_head nes_adapter_list;
32326
32327 -extern atomic_t cm_connects;
32328 -extern atomic_t cm_accepts;
32329 -extern atomic_t cm_disconnects;
32330 -extern atomic_t cm_closes;
32331 -extern atomic_t cm_connecteds;
32332 -extern atomic_t cm_connect_reqs;
32333 -extern atomic_t cm_rejects;
32334 -extern atomic_t mod_qp_timouts;
32335 -extern atomic_t qps_created;
32336 -extern atomic_t qps_destroyed;
32337 -extern atomic_t sw_qps_destroyed;
32338 +extern atomic_unchecked_t cm_connects;
32339 +extern atomic_unchecked_t cm_accepts;
32340 +extern atomic_unchecked_t cm_disconnects;
32341 +extern atomic_unchecked_t cm_closes;
32342 +extern atomic_unchecked_t cm_connecteds;
32343 +extern atomic_unchecked_t cm_connect_reqs;
32344 +extern atomic_unchecked_t cm_rejects;
32345 +extern atomic_unchecked_t mod_qp_timouts;
32346 +extern atomic_unchecked_t qps_created;
32347 +extern atomic_unchecked_t qps_destroyed;
32348 +extern atomic_unchecked_t sw_qps_destroyed;
32349 extern u32 mh_detected;
32350 extern u32 mh_pauses_sent;
32351 extern u32 cm_packets_sent;
32352 @@ -197,16 +197,16 @@ extern u32 cm_packets_created;
32353 extern u32 cm_packets_received;
32354 extern u32 cm_packets_dropped;
32355 extern u32 cm_packets_retrans;
32356 -extern atomic_t cm_listens_created;
32357 -extern atomic_t cm_listens_destroyed;
32358 +extern atomic_unchecked_t cm_listens_created;
32359 +extern atomic_unchecked_t cm_listens_destroyed;
32360 extern u32 cm_backlog_drops;
32361 -extern atomic_t cm_loopbacks;
32362 -extern atomic_t cm_nodes_created;
32363 -extern atomic_t cm_nodes_destroyed;
32364 -extern atomic_t cm_accel_dropped_pkts;
32365 -extern atomic_t cm_resets_recvd;
32366 -extern atomic_t pau_qps_created;
32367 -extern atomic_t pau_qps_destroyed;
32368 +extern atomic_unchecked_t cm_loopbacks;
32369 +extern atomic_unchecked_t cm_nodes_created;
32370 +extern atomic_unchecked_t cm_nodes_destroyed;
32371 +extern atomic_unchecked_t cm_accel_dropped_pkts;
32372 +extern atomic_unchecked_t cm_resets_recvd;
32373 +extern atomic_unchecked_t pau_qps_created;
32374 +extern atomic_unchecked_t pau_qps_destroyed;
32375
32376 extern u32 int_mod_timer_init;
32377 extern u32 int_mod_cq_depth_256;
32378 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
32379 index 71edfbb..15b62ae 100644
32380 --- a/drivers/infiniband/hw/nes/nes_cm.c
32381 +++ b/drivers/infiniband/hw/nes/nes_cm.c
32382 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
32383 u32 cm_packets_retrans;
32384 u32 cm_packets_created;
32385 u32 cm_packets_received;
32386 -atomic_t cm_listens_created;
32387 -atomic_t cm_listens_destroyed;
32388 +atomic_unchecked_t cm_listens_created;
32389 +atomic_unchecked_t cm_listens_destroyed;
32390 u32 cm_backlog_drops;
32391 -atomic_t cm_loopbacks;
32392 -atomic_t cm_nodes_created;
32393 -atomic_t cm_nodes_destroyed;
32394 -atomic_t cm_accel_dropped_pkts;
32395 -atomic_t cm_resets_recvd;
32396 +atomic_unchecked_t cm_loopbacks;
32397 +atomic_unchecked_t cm_nodes_created;
32398 +atomic_unchecked_t cm_nodes_destroyed;
32399 +atomic_unchecked_t cm_accel_dropped_pkts;
32400 +atomic_unchecked_t cm_resets_recvd;
32401
32402 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
32403 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
32404 @@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
32405
32406 static struct nes_cm_core *g_cm_core;
32407
32408 -atomic_t cm_connects;
32409 -atomic_t cm_accepts;
32410 -atomic_t cm_disconnects;
32411 -atomic_t cm_closes;
32412 -atomic_t cm_connecteds;
32413 -atomic_t cm_connect_reqs;
32414 -atomic_t cm_rejects;
32415 +atomic_unchecked_t cm_connects;
32416 +atomic_unchecked_t cm_accepts;
32417 +atomic_unchecked_t cm_disconnects;
32418 +atomic_unchecked_t cm_closes;
32419 +atomic_unchecked_t cm_connecteds;
32420 +atomic_unchecked_t cm_connect_reqs;
32421 +atomic_unchecked_t cm_rejects;
32422
32423 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
32424 {
32425 @@ -1279,7 +1279,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
32426 kfree(listener);
32427 listener = NULL;
32428 ret = 0;
32429 - atomic_inc(&cm_listens_destroyed);
32430 + atomic_inc_unchecked(&cm_listens_destroyed);
32431 } else {
32432 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
32433 }
32434 @@ -1482,7 +1482,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
32435 cm_node->rem_mac);
32436
32437 add_hte_node(cm_core, cm_node);
32438 - atomic_inc(&cm_nodes_created);
32439 + atomic_inc_unchecked(&cm_nodes_created);
32440
32441 return cm_node;
32442 }
32443 @@ -1540,7 +1540,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
32444 }
32445
32446 atomic_dec(&cm_core->node_cnt);
32447 - atomic_inc(&cm_nodes_destroyed);
32448 + atomic_inc_unchecked(&cm_nodes_destroyed);
32449 nesqp = cm_node->nesqp;
32450 if (nesqp) {
32451 nesqp->cm_node = NULL;
32452 @@ -1604,7 +1604,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
32453
32454 static void drop_packet(struct sk_buff *skb)
32455 {
32456 - atomic_inc(&cm_accel_dropped_pkts);
32457 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
32458 dev_kfree_skb_any(skb);
32459 }
32460
32461 @@ -1667,7 +1667,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
32462 {
32463
32464 int reset = 0; /* whether to send reset in case of err.. */
32465 - atomic_inc(&cm_resets_recvd);
32466 + atomic_inc_unchecked(&cm_resets_recvd);
32467 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
32468 " refcnt=%d\n", cm_node, cm_node->state,
32469 atomic_read(&cm_node->ref_count));
32470 @@ -2308,7 +2308,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
32471 rem_ref_cm_node(cm_node->cm_core, cm_node);
32472 return NULL;
32473 }
32474 - atomic_inc(&cm_loopbacks);
32475 + atomic_inc_unchecked(&cm_loopbacks);
32476 loopbackremotenode->loopbackpartner = cm_node;
32477 loopbackremotenode->tcp_cntxt.rcv_wscale =
32478 NES_CM_DEFAULT_RCV_WND_SCALE;
32479 @@ -2583,7 +2583,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
32480 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
32481 else {
32482 rem_ref_cm_node(cm_core, cm_node);
32483 - atomic_inc(&cm_accel_dropped_pkts);
32484 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
32485 dev_kfree_skb_any(skb);
32486 }
32487 break;
32488 @@ -2890,7 +2890,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
32489
32490 if ((cm_id) && (cm_id->event_handler)) {
32491 if (issue_disconn) {
32492 - atomic_inc(&cm_disconnects);
32493 + atomic_inc_unchecked(&cm_disconnects);
32494 cm_event.event = IW_CM_EVENT_DISCONNECT;
32495 cm_event.status = disconn_status;
32496 cm_event.local_addr = cm_id->local_addr;
32497 @@ -2912,7 +2912,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
32498 }
32499
32500 if (issue_close) {
32501 - atomic_inc(&cm_closes);
32502 + atomic_inc_unchecked(&cm_closes);
32503 nes_disconnect(nesqp, 1);
32504
32505 cm_id->provider_data = nesqp;
32506 @@ -3048,7 +3048,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
32507
32508 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
32509 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
32510 - atomic_inc(&cm_accepts);
32511 + atomic_inc_unchecked(&cm_accepts);
32512
32513 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
32514 netdev_refcnt_read(nesvnic->netdev));
32515 @@ -3250,7 +3250,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
32516 struct nes_cm_core *cm_core;
32517 u8 *start_buff;
32518
32519 - atomic_inc(&cm_rejects);
32520 + atomic_inc_unchecked(&cm_rejects);
32521 cm_node = (struct nes_cm_node *)cm_id->provider_data;
32522 loopback = cm_node->loopbackpartner;
32523 cm_core = cm_node->cm_core;
32524 @@ -3310,7 +3310,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
32525 ntohl(cm_id->local_addr.sin_addr.s_addr),
32526 ntohs(cm_id->local_addr.sin_port));
32527
32528 - atomic_inc(&cm_connects);
32529 + atomic_inc_unchecked(&cm_connects);
32530 nesqp->active_conn = 1;
32531
32532 /* cache the cm_id in the qp */
32533 @@ -3416,7 +3416,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
32534 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
32535 return err;
32536 }
32537 - atomic_inc(&cm_listens_created);
32538 + atomic_inc_unchecked(&cm_listens_created);
32539 }
32540
32541 cm_id->add_ref(cm_id);
32542 @@ -3517,7 +3517,7 @@ static void cm_event_connected(struct nes_cm_event *event)
32543
32544 if (nesqp->destroyed)
32545 return;
32546 - atomic_inc(&cm_connecteds);
32547 + atomic_inc_unchecked(&cm_connecteds);
32548 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
32549 " local port 0x%04X. jiffies = %lu.\n",
32550 nesqp->hwqp.qp_id,
32551 @@ -3704,7 +3704,7 @@ static void cm_event_reset(struct nes_cm_event *event)
32552
32553 cm_id->add_ref(cm_id);
32554 ret = cm_id->event_handler(cm_id, &cm_event);
32555 - atomic_inc(&cm_closes);
32556 + atomic_inc_unchecked(&cm_closes);
32557 cm_event.event = IW_CM_EVENT_CLOSE;
32558 cm_event.status = 0;
32559 cm_event.provider_data = cm_id->provider_data;
32560 @@ -3740,7 +3740,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
32561 return;
32562 cm_id = cm_node->cm_id;
32563
32564 - atomic_inc(&cm_connect_reqs);
32565 + atomic_inc_unchecked(&cm_connect_reqs);
32566 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
32567 cm_node, cm_id, jiffies);
32568
32569 @@ -3780,7 +3780,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
32570 return;
32571 cm_id = cm_node->cm_id;
32572
32573 - atomic_inc(&cm_connect_reqs);
32574 + atomic_inc_unchecked(&cm_connect_reqs);
32575 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
32576 cm_node, cm_id, jiffies);
32577
32578 diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
32579 index 3ba7be3..c81f6ff 100644
32580 --- a/drivers/infiniband/hw/nes/nes_mgt.c
32581 +++ b/drivers/infiniband/hw/nes/nes_mgt.c
32582 @@ -40,8 +40,8 @@
32583 #include "nes.h"
32584 #include "nes_mgt.h"
32585
32586 -atomic_t pau_qps_created;
32587 -atomic_t pau_qps_destroyed;
32588 +atomic_unchecked_t pau_qps_created;
32589 +atomic_unchecked_t pau_qps_destroyed;
32590
32591 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
32592 {
32593 @@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
32594 {
32595 struct sk_buff *skb;
32596 unsigned long flags;
32597 - atomic_inc(&pau_qps_destroyed);
32598 + atomic_inc_unchecked(&pau_qps_destroyed);
32599
32600 /* Free packets that have not yet been forwarded */
32601 /* Lock is acquired by skb_dequeue when removing the skb */
32602 @@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
32603 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
32604 skb_queue_head_init(&nesqp->pau_list);
32605 spin_lock_init(&nesqp->pau_lock);
32606 - atomic_inc(&pau_qps_created);
32607 + atomic_inc_unchecked(&pau_qps_created);
32608 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
32609 }
32610
32611 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
32612 index f3a3ecf..57d311d 100644
32613 --- a/drivers/infiniband/hw/nes/nes_nic.c
32614 +++ b/drivers/infiniband/hw/nes/nes_nic.c
32615 @@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
32616 target_stat_values[++index] = mh_detected;
32617 target_stat_values[++index] = mh_pauses_sent;
32618 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
32619 - target_stat_values[++index] = atomic_read(&cm_connects);
32620 - target_stat_values[++index] = atomic_read(&cm_accepts);
32621 - target_stat_values[++index] = atomic_read(&cm_disconnects);
32622 - target_stat_values[++index] = atomic_read(&cm_connecteds);
32623 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
32624 - target_stat_values[++index] = atomic_read(&cm_rejects);
32625 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
32626 - target_stat_values[++index] = atomic_read(&qps_created);
32627 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
32628 - target_stat_values[++index] = atomic_read(&qps_destroyed);
32629 - target_stat_values[++index] = atomic_read(&cm_closes);
32630 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
32631 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
32632 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
32633 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
32634 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
32635 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
32636 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
32637 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
32638 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
32639 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
32640 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
32641 target_stat_values[++index] = cm_packets_sent;
32642 target_stat_values[++index] = cm_packets_bounced;
32643 target_stat_values[++index] = cm_packets_created;
32644 target_stat_values[++index] = cm_packets_received;
32645 target_stat_values[++index] = cm_packets_dropped;
32646 target_stat_values[++index] = cm_packets_retrans;
32647 - target_stat_values[++index] = atomic_read(&cm_listens_created);
32648 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
32649 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
32650 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
32651 target_stat_values[++index] = cm_backlog_drops;
32652 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
32653 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
32654 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
32655 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
32656 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
32657 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
32658 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
32659 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
32660 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
32661 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
32662 target_stat_values[++index] = nesadapter->free_4kpbl;
32663 target_stat_values[++index] = nesadapter->free_256pbl;
32664 target_stat_values[++index] = int_mod_timer_init;
32665 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
32666 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
32667 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
32668 - target_stat_values[++index] = atomic_read(&pau_qps_created);
32669 - target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
32670 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
32671 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
32672 }
32673
32674 /**
32675 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
32676 index 8b8812d..a5e1133 100644
32677 --- a/drivers/infiniband/hw/nes/nes_verbs.c
32678 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
32679 @@ -46,9 +46,9 @@
32680
32681 #include <rdma/ib_umem.h>
32682
32683 -atomic_t mod_qp_timouts;
32684 -atomic_t qps_created;
32685 -atomic_t sw_qps_destroyed;
32686 +atomic_unchecked_t mod_qp_timouts;
32687 +atomic_unchecked_t qps_created;
32688 +atomic_unchecked_t sw_qps_destroyed;
32689
32690 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
32691
32692 @@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
32693 if (init_attr->create_flags)
32694 return ERR_PTR(-EINVAL);
32695
32696 - atomic_inc(&qps_created);
32697 + atomic_inc_unchecked(&qps_created);
32698 switch (init_attr->qp_type) {
32699 case IB_QPT_RC:
32700 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
32701 @@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
32702 struct iw_cm_event cm_event;
32703 int ret = 0;
32704
32705 - atomic_inc(&sw_qps_destroyed);
32706 + atomic_inc_unchecked(&sw_qps_destroyed);
32707 nesqp->destroyed = 1;
32708
32709 /* Blow away the connection if it exists. */
32710 diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
32711 index 6b811e3..f8acf88 100644
32712 --- a/drivers/infiniband/hw/qib/qib.h
32713 +++ b/drivers/infiniband/hw/qib/qib.h
32714 @@ -51,6 +51,7 @@
32715 #include <linux/completion.h>
32716 #include <linux/kref.h>
32717 #include <linux/sched.h>
32718 +#include <linux/slab.h>
32719
32720 #include "qib_common.h"
32721 #include "qib_verbs.h"
32722 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
32723 index da739d9..da1c7f4 100644
32724 --- a/drivers/input/gameport/gameport.c
32725 +++ b/drivers/input/gameport/gameport.c
32726 @@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
32727 */
32728 static void gameport_init_port(struct gameport *gameport)
32729 {
32730 - static atomic_t gameport_no = ATOMIC_INIT(0);
32731 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
32732
32733 __module_get(THIS_MODULE);
32734
32735 mutex_init(&gameport->drv_mutex);
32736 device_initialize(&gameport->dev);
32737 dev_set_name(&gameport->dev, "gameport%lu",
32738 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
32739 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
32740 gameport->dev.bus = &gameport_bus;
32741 gameport->dev.release = gameport_release_port;
32742 if (gameport->parent)
32743 diff --git a/drivers/input/input.c b/drivers/input/input.c
32744 index 8921c61..f5cd63d 100644
32745 --- a/drivers/input/input.c
32746 +++ b/drivers/input/input.c
32747 @@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
32748 */
32749 int input_register_device(struct input_dev *dev)
32750 {
32751 - static atomic_t input_no = ATOMIC_INIT(0);
32752 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
32753 struct input_handler *handler;
32754 const char *path;
32755 int error;
32756 @@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
32757 dev->setkeycode = input_default_setkeycode;
32758
32759 dev_set_name(&dev->dev, "input%ld",
32760 - (unsigned long) atomic_inc_return(&input_no) - 1);
32761 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
32762
32763 error = device_add(&dev->dev);
32764 if (error)
32765 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
32766 index b8d8611..7a4a04b 100644
32767 --- a/drivers/input/joystick/sidewinder.c
32768 +++ b/drivers/input/joystick/sidewinder.c
32769 @@ -30,6 +30,7 @@
32770 #include <linux/kernel.h>
32771 #include <linux/module.h>
32772 #include <linux/slab.h>
32773 +#include <linux/sched.h>
32774 #include <linux/init.h>
32775 #include <linux/input.h>
32776 #include <linux/gameport.h>
32777 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
32778 index 42f7b25..09fcf46 100644
32779 --- a/drivers/input/joystick/xpad.c
32780 +++ b/drivers/input/joystick/xpad.c
32781 @@ -714,7 +714,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
32782
32783 static int xpad_led_probe(struct usb_xpad *xpad)
32784 {
32785 - static atomic_t led_seq = ATOMIC_INIT(0);
32786 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
32787 long led_no;
32788 struct xpad_led *led;
32789 struct led_classdev *led_cdev;
32790 @@ -727,7 +727,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
32791 if (!led)
32792 return -ENOMEM;
32793
32794 - led_no = (long)atomic_inc_return(&led_seq) - 1;
32795 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
32796
32797 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
32798 led->xpad = xpad;
32799 diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
32800 index 0110b5a..d3ad144 100644
32801 --- a/drivers/input/mousedev.c
32802 +++ b/drivers/input/mousedev.c
32803 @@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
32804
32805 spin_unlock_irq(&client->packet_lock);
32806
32807 - if (copy_to_user(buffer, data, count))
32808 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
32809 return -EFAULT;
32810
32811 return count;
32812 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
32813 index d0f7533..fb8215b 100644
32814 --- a/drivers/input/serio/serio.c
32815 +++ b/drivers/input/serio/serio.c
32816 @@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
32817 */
32818 static void serio_init_port(struct serio *serio)
32819 {
32820 - static atomic_t serio_no = ATOMIC_INIT(0);
32821 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
32822
32823 __module_get(THIS_MODULE);
32824
32825 @@ -507,7 +507,7 @@ static void serio_init_port(struct serio *serio)
32826 mutex_init(&serio->drv_mutex);
32827 device_initialize(&serio->dev);
32828 dev_set_name(&serio->dev, "serio%ld",
32829 - (long)atomic_inc_return(&serio_no) - 1);
32830 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
32831 serio->dev.bus = &serio_bus;
32832 serio->dev.release = serio_release_port;
32833 serio->dev.groups = serio_device_attr_groups;
32834 diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
32835 index b902794..fc7b85b 100644
32836 --- a/drivers/isdn/capi/capi.c
32837 +++ b/drivers/isdn/capi/capi.c
32838 @@ -83,8 +83,8 @@ struct capiminor {
32839
32840 struct capi20_appl *ap;
32841 u32 ncci;
32842 - atomic_t datahandle;
32843 - atomic_t msgid;
32844 + atomic_unchecked_t datahandle;
32845 + atomic_unchecked_t msgid;
32846
32847 struct tty_port port;
32848 int ttyinstop;
32849 @@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
32850 capimsg_setu16(s, 2, mp->ap->applid);
32851 capimsg_setu8 (s, 4, CAPI_DATA_B3);
32852 capimsg_setu8 (s, 5, CAPI_RESP);
32853 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
32854 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
32855 capimsg_setu32(s, 8, mp->ncci);
32856 capimsg_setu16(s, 12, datahandle);
32857 }
32858 @@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
32859 mp->outbytes -= len;
32860 spin_unlock_bh(&mp->outlock);
32861
32862 - datahandle = atomic_inc_return(&mp->datahandle);
32863 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
32864 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
32865 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
32866 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
32867 capimsg_setu16(skb->data, 2, mp->ap->applid);
32868 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
32869 capimsg_setu8 (skb->data, 5, CAPI_REQ);
32870 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
32871 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
32872 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
32873 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
32874 capimsg_setu16(skb->data, 16, len); /* Data length */
32875 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
32876 index 821f7ac..28d4030 100644
32877 --- a/drivers/isdn/hardware/avm/b1.c
32878 +++ b/drivers/isdn/hardware/avm/b1.c
32879 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
32880 }
32881 if (left) {
32882 if (t4file->user) {
32883 - if (copy_from_user(buf, dp, left))
32884 + if (left > sizeof buf || copy_from_user(buf, dp, left))
32885 return -EFAULT;
32886 } else {
32887 memcpy(buf, dp, left);
32888 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
32889 }
32890 if (left) {
32891 if (config->user) {
32892 - if (copy_from_user(buf, dp, left))
32893 + if (left > sizeof buf || copy_from_user(buf, dp, left))
32894 return -EFAULT;
32895 } else {
32896 memcpy(buf, dp, left);
32897 diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
32898 index dd6b53a..19d9ee6 100644
32899 --- a/drivers/isdn/hardware/eicon/divasync.h
32900 +++ b/drivers/isdn/hardware/eicon/divasync.h
32901 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
32902 } diva_didd_add_adapter_t;
32903 typedef struct _diva_didd_remove_adapter {
32904 IDI_CALL p_request;
32905 -} diva_didd_remove_adapter_t;
32906 +} __no_const diva_didd_remove_adapter_t;
32907 typedef struct _diva_didd_read_adapter_array {
32908 void *buffer;
32909 dword length;
32910 diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
32911 index d303e65..28bcb7b 100644
32912 --- a/drivers/isdn/hardware/eicon/xdi_adapter.h
32913 +++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
32914 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
32915 typedef struct _diva_os_idi_adapter_interface {
32916 diva_init_card_proc_t cleanup_adapter_proc;
32917 diva_cmd_card_proc_t cmd_proc;
32918 -} diva_os_idi_adapter_interface_t;
32919 +} __no_const diva_os_idi_adapter_interface_t;
32920
32921 typedef struct _diva_os_xdi_adapter {
32922 struct list_head link;
32923 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
32924 index e74df7c..03a03ba 100644
32925 --- a/drivers/isdn/icn/icn.c
32926 +++ b/drivers/isdn/icn/icn.c
32927 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
32928 if (count > len)
32929 count = len;
32930 if (user) {
32931 - if (copy_from_user(msg, buf, count))
32932 + if (count > sizeof msg || copy_from_user(msg, buf, count))
32933 return -EFAULT;
32934 } else
32935 memcpy(msg, buf, count);
32936 diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
32937 index 8bc4915..4cc6a2e 100644
32938 --- a/drivers/leds/leds-mc13783.c
32939 +++ b/drivers/leds/leds-mc13783.c
32940 @@ -280,7 +280,7 @@ static int __devinit mc13783_led_probe(struct platform_device *pdev)
32941 return -EINVAL;
32942 }
32943
32944 - led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL);
32945 + led = kcalloc(pdata->num_leds, sizeof(*led), GFP_KERNEL);
32946 if (led == NULL) {
32947 dev_err(&pdev->dev, "failed to alloc memory\n");
32948 return -ENOMEM;
32949 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
32950 index b5fdcb7..5b6c59f 100644
32951 --- a/drivers/lguest/core.c
32952 +++ b/drivers/lguest/core.c
32953 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
32954 * it's worked so far. The end address needs +1 because __get_vm_area
32955 * allocates an extra guard page, so we need space for that.
32956 */
32957 +
32958 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
32959 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
32960 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
32961 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
32962 +#else
32963 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
32964 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
32965 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
32966 +#endif
32967 +
32968 if (!switcher_vma) {
32969 err = -ENOMEM;
32970 printk("lguest: could not map switcher pages high\n");
32971 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
32972 * Now the Switcher is mapped at the right address, we can't fail!
32973 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
32974 */
32975 - memcpy(switcher_vma->addr, start_switcher_text,
32976 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
32977 end_switcher_text - start_switcher_text);
32978
32979 printk(KERN_INFO "lguest: mapped switcher at %p\n",
32980 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
32981 index 39809035..ce25c5e 100644
32982 --- a/drivers/lguest/x86/core.c
32983 +++ b/drivers/lguest/x86/core.c
32984 @@ -59,7 +59,7 @@ static struct {
32985 /* Offset from where switcher.S was compiled to where we've copied it */
32986 static unsigned long switcher_offset(void)
32987 {
32988 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
32989 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
32990 }
32991
32992 /* This cpu's struct lguest_pages. */
32993 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
32994 * These copies are pretty cheap, so we do them unconditionally: */
32995 /* Save the current Host top-level page directory.
32996 */
32997 +
32998 +#ifdef CONFIG_PAX_PER_CPU_PGD
32999 + pages->state.host_cr3 = read_cr3();
33000 +#else
33001 pages->state.host_cr3 = __pa(current->mm->pgd);
33002 +#endif
33003 +
33004 /*
33005 * Set up the Guest's page tables to see this CPU's pages (and no
33006 * other CPU's pages).
33007 @@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
33008 * compiled-in switcher code and the high-mapped copy we just made.
33009 */
33010 for (i = 0; i < IDT_ENTRIES; i++)
33011 - default_idt_entries[i] += switcher_offset();
33012 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
33013
33014 /*
33015 * Set up the Switcher's per-cpu areas.
33016 @@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
33017 * it will be undisturbed when we switch. To change %cs and jump we
33018 * need this structure to feed to Intel's "lcall" instruction.
33019 */
33020 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
33021 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
33022 lguest_entry.segment = LGUEST_CS;
33023
33024 /*
33025 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
33026 index 40634b0..4f5855e 100644
33027 --- a/drivers/lguest/x86/switcher_32.S
33028 +++ b/drivers/lguest/x86/switcher_32.S
33029 @@ -87,6 +87,7 @@
33030 #include <asm/page.h>
33031 #include <asm/segment.h>
33032 #include <asm/lguest.h>
33033 +#include <asm/processor-flags.h>
33034
33035 // We mark the start of the code to copy
33036 // It's placed in .text tho it's never run here
33037 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
33038 // Changes type when we load it: damn Intel!
33039 // For after we switch over our page tables
33040 // That entry will be read-only: we'd crash.
33041 +
33042 +#ifdef CONFIG_PAX_KERNEXEC
33043 + mov %cr0, %edx
33044 + xor $X86_CR0_WP, %edx
33045 + mov %edx, %cr0
33046 +#endif
33047 +
33048 movl $(GDT_ENTRY_TSS*8), %edx
33049 ltr %dx
33050
33051 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
33052 // Let's clear it again for our return.
33053 // The GDT descriptor of the Host
33054 // Points to the table after two "size" bytes
33055 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
33056 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
33057 // Clear "used" from type field (byte 5, bit 2)
33058 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
33059 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
33060 +
33061 +#ifdef CONFIG_PAX_KERNEXEC
33062 + mov %cr0, %eax
33063 + xor $X86_CR0_WP, %eax
33064 + mov %eax, %cr0
33065 +#endif
33066
33067 // Once our page table's switched, the Guest is live!
33068 // The Host fades as we run this final step.
33069 @@ -295,13 +309,12 @@ deliver_to_host:
33070 // I consulted gcc, and it gave
33071 // These instructions, which I gladly credit:
33072 leal (%edx,%ebx,8), %eax
33073 - movzwl (%eax),%edx
33074 - movl 4(%eax), %eax
33075 - xorw %ax, %ax
33076 - orl %eax, %edx
33077 + movl 4(%eax), %edx
33078 + movw (%eax), %dx
33079 // Now the address of the handler's in %edx
33080 // We call it now: its "iret" drops us home.
33081 - jmp *%edx
33082 + ljmp $__KERNEL_CS, $1f
33083 +1: jmp *%edx
33084
33085 // Every interrupt can come to us here
33086 // But we must truly tell each apart.
33087 diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
33088 index 20e5c2c..9e849a9 100644
33089 --- a/drivers/macintosh/macio_asic.c
33090 +++ b/drivers/macintosh/macio_asic.c
33091 @@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
33092 * MacIO is matched against any Apple ID, it's probe() function
33093 * will then decide wether it applies or not
33094 */
33095 -static const struct pci_device_id __devinitdata pci_ids [] = { {
33096 +static const struct pci_device_id __devinitconst pci_ids [] = { {
33097 .vendor = PCI_VENDOR_ID_APPLE,
33098 .device = PCI_ANY_ID,
33099 .subvendor = PCI_ANY_ID,
33100 diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
33101 index 17e2b47..bcbeec4 100644
33102 --- a/drivers/md/bitmap.c
33103 +++ b/drivers/md/bitmap.c
33104 @@ -1823,7 +1823,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
33105 chunk_kb ? "KB" : "B");
33106 if (bitmap->file) {
33107 seq_printf(seq, ", file: ");
33108 - seq_path(seq, &bitmap->file->f_path, " \t\n");
33109 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
33110 }
33111
33112 seq_printf(seq, "\n");
33113 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
33114 index a1a3e6d..1918bfc 100644
33115 --- a/drivers/md/dm-ioctl.c
33116 +++ b/drivers/md/dm-ioctl.c
33117 @@ -1590,7 +1590,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
33118 cmd == DM_LIST_VERSIONS_CMD)
33119 return 0;
33120
33121 - if ((cmd == DM_DEV_CREATE_CMD)) {
33122 + if (cmd == DM_DEV_CREATE_CMD) {
33123 if (!*param->name) {
33124 DMWARN("name not supplied when creating device");
33125 return -EINVAL;
33126 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
33127 index d039de8..0cf5b87 100644
33128 --- a/drivers/md/dm-raid1.c
33129 +++ b/drivers/md/dm-raid1.c
33130 @@ -40,7 +40,7 @@ enum dm_raid1_error {
33131
33132 struct mirror {
33133 struct mirror_set *ms;
33134 - atomic_t error_count;
33135 + atomic_unchecked_t error_count;
33136 unsigned long error_type;
33137 struct dm_dev *dev;
33138 sector_t offset;
33139 @@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
33140 struct mirror *m;
33141
33142 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
33143 - if (!atomic_read(&m->error_count))
33144 + if (!atomic_read_unchecked(&m->error_count))
33145 return m;
33146
33147 return NULL;
33148 @@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
33149 * simple way to tell if a device has encountered
33150 * errors.
33151 */
33152 - atomic_inc(&m->error_count);
33153 + atomic_inc_unchecked(&m->error_count);
33154
33155 if (test_and_set_bit(error_type, &m->error_type))
33156 return;
33157 @@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
33158 struct mirror *m = get_default_mirror(ms);
33159
33160 do {
33161 - if (likely(!atomic_read(&m->error_count)))
33162 + if (likely(!atomic_read_unchecked(&m->error_count)))
33163 return m;
33164
33165 if (m-- == ms->mirror)
33166 @@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
33167 {
33168 struct mirror *default_mirror = get_default_mirror(m->ms);
33169
33170 - return !atomic_read(&default_mirror->error_count);
33171 + return !atomic_read_unchecked(&default_mirror->error_count);
33172 }
33173
33174 static int mirror_available(struct mirror_set *ms, struct bio *bio)
33175 @@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
33176 */
33177 if (likely(region_in_sync(ms, region, 1)))
33178 m = choose_mirror(ms, bio->bi_sector);
33179 - else if (m && atomic_read(&m->error_count))
33180 + else if (m && atomic_read_unchecked(&m->error_count))
33181 m = NULL;
33182
33183 if (likely(m))
33184 @@ -938,7 +938,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
33185 }
33186
33187 ms->mirror[mirror].ms = ms;
33188 - atomic_set(&(ms->mirror[mirror].error_count), 0);
33189 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
33190 ms->mirror[mirror].error_type = 0;
33191 ms->mirror[mirror].offset = offset;
33192
33193 @@ -1351,7 +1351,7 @@ static void mirror_resume(struct dm_target *ti)
33194 */
33195 static char device_status_char(struct mirror *m)
33196 {
33197 - if (!atomic_read(&(m->error_count)))
33198 + if (!atomic_read_unchecked(&(m->error_count)))
33199 return 'A';
33200
33201 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
33202 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
33203 index 35c94ff..20d4c17 100644
33204 --- a/drivers/md/dm-stripe.c
33205 +++ b/drivers/md/dm-stripe.c
33206 @@ -20,7 +20,7 @@ struct stripe {
33207 struct dm_dev *dev;
33208 sector_t physical_start;
33209
33210 - atomic_t error_count;
33211 + atomic_unchecked_t error_count;
33212 };
33213
33214 struct stripe_c {
33215 @@ -193,7 +193,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
33216 kfree(sc);
33217 return r;
33218 }
33219 - atomic_set(&(sc->stripe[i].error_count), 0);
33220 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
33221 }
33222
33223 ti->private = sc;
33224 @@ -315,7 +315,7 @@ static int stripe_status(struct dm_target *ti,
33225 DMEMIT("%d ", sc->stripes);
33226 for (i = 0; i < sc->stripes; i++) {
33227 DMEMIT("%s ", sc->stripe[i].dev->name);
33228 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
33229 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
33230 'D' : 'A';
33231 }
33232 buffer[i] = '\0';
33233 @@ -362,8 +362,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
33234 */
33235 for (i = 0; i < sc->stripes; i++)
33236 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
33237 - atomic_inc(&(sc->stripe[i].error_count));
33238 - if (atomic_read(&(sc->stripe[i].error_count)) <
33239 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
33240 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
33241 DM_IO_ERROR_THRESHOLD)
33242 schedule_work(&sc->trigger_event);
33243 }
33244 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
33245 index 2e227fb..44ead1f 100644
33246 --- a/drivers/md/dm-table.c
33247 +++ b/drivers/md/dm-table.c
33248 @@ -390,7 +390,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
33249 if (!dev_size)
33250 return 0;
33251
33252 - if ((start >= dev_size) || (start + len > dev_size)) {
33253 + if ((start >= dev_size) || (len > dev_size - start)) {
33254 DMWARN("%s: %s too small for target: "
33255 "start=%llu, len=%llu, dev_size=%llu",
33256 dm_device_name(ti->table->md), bdevname(bdev, b),
33257 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
33258 index 737d388..811ad5a 100644
33259 --- a/drivers/md/dm-thin-metadata.c
33260 +++ b/drivers/md/dm-thin-metadata.c
33261 @@ -432,7 +432,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
33262
33263 pmd->info.tm = tm;
33264 pmd->info.levels = 2;
33265 - pmd->info.value_type.context = pmd->data_sm;
33266 + pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
33267 pmd->info.value_type.size = sizeof(__le64);
33268 pmd->info.value_type.inc = data_block_inc;
33269 pmd->info.value_type.dec = data_block_dec;
33270 @@ -451,7 +451,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
33271
33272 pmd->bl_info.tm = tm;
33273 pmd->bl_info.levels = 1;
33274 - pmd->bl_info.value_type.context = pmd->data_sm;
33275 + pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
33276 pmd->bl_info.value_type.size = sizeof(__le64);
33277 pmd->bl_info.value_type.inc = data_block_inc;
33278 pmd->bl_info.value_type.dec = data_block_dec;
33279 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
33280 index e24143c..ce2f21a1 100644
33281 --- a/drivers/md/dm.c
33282 +++ b/drivers/md/dm.c
33283 @@ -176,9 +176,9 @@ struct mapped_device {
33284 /*
33285 * Event handling.
33286 */
33287 - atomic_t event_nr;
33288 + atomic_unchecked_t event_nr;
33289 wait_queue_head_t eventq;
33290 - atomic_t uevent_seq;
33291 + atomic_unchecked_t uevent_seq;
33292 struct list_head uevent_list;
33293 spinlock_t uevent_lock; /* Protect access to uevent_list */
33294
33295 @@ -1845,8 +1845,8 @@ static struct mapped_device *alloc_dev(int minor)
33296 rwlock_init(&md->map_lock);
33297 atomic_set(&md->holders, 1);
33298 atomic_set(&md->open_count, 0);
33299 - atomic_set(&md->event_nr, 0);
33300 - atomic_set(&md->uevent_seq, 0);
33301 + atomic_set_unchecked(&md->event_nr, 0);
33302 + atomic_set_unchecked(&md->uevent_seq, 0);
33303 INIT_LIST_HEAD(&md->uevent_list);
33304 spin_lock_init(&md->uevent_lock);
33305
33306 @@ -1980,7 +1980,7 @@ static void event_callback(void *context)
33307
33308 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
33309
33310 - atomic_inc(&md->event_nr);
33311 + atomic_inc_unchecked(&md->event_nr);
33312 wake_up(&md->eventq);
33313 }
33314
33315 @@ -2622,18 +2622,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
33316
33317 uint32_t dm_next_uevent_seq(struct mapped_device *md)
33318 {
33319 - return atomic_add_return(1, &md->uevent_seq);
33320 + return atomic_add_return_unchecked(1, &md->uevent_seq);
33321 }
33322
33323 uint32_t dm_get_event_nr(struct mapped_device *md)
33324 {
33325 - return atomic_read(&md->event_nr);
33326 + return atomic_read_unchecked(&md->event_nr);
33327 }
33328
33329 int dm_wait_event(struct mapped_device *md, int event_nr)
33330 {
33331 return wait_event_interruptible(md->eventq,
33332 - (event_nr != atomic_read(&md->event_nr)));
33333 + (event_nr != atomic_read_unchecked(&md->event_nr)));
33334 }
33335
33336 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
33337 diff --git a/drivers/md/md.c b/drivers/md/md.c
33338 index 2b30ffd..362b519 100644
33339 --- a/drivers/md/md.c
33340 +++ b/drivers/md/md.c
33341 @@ -277,10 +277,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
33342 * start build, activate spare
33343 */
33344 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
33345 -static atomic_t md_event_count;
33346 +static atomic_unchecked_t md_event_count;
33347 void md_new_event(struct mddev *mddev)
33348 {
33349 - atomic_inc(&md_event_count);
33350 + atomic_inc_unchecked(&md_event_count);
33351 wake_up(&md_event_waiters);
33352 }
33353 EXPORT_SYMBOL_GPL(md_new_event);
33354 @@ -290,7 +290,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
33355 */
33356 static void md_new_event_inintr(struct mddev *mddev)
33357 {
33358 - atomic_inc(&md_event_count);
33359 + atomic_inc_unchecked(&md_event_count);
33360 wake_up(&md_event_waiters);
33361 }
33362
33363 @@ -1526,7 +1526,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
33364
33365 rdev->preferred_minor = 0xffff;
33366 rdev->data_offset = le64_to_cpu(sb->data_offset);
33367 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
33368 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
33369
33370 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
33371 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
33372 @@ -1745,7 +1745,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
33373 else
33374 sb->resync_offset = cpu_to_le64(0);
33375
33376 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
33377 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
33378
33379 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
33380 sb->size = cpu_to_le64(mddev->dev_sectors);
33381 @@ -2691,7 +2691,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
33382 static ssize_t
33383 errors_show(struct md_rdev *rdev, char *page)
33384 {
33385 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
33386 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
33387 }
33388
33389 static ssize_t
33390 @@ -2700,7 +2700,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
33391 char *e;
33392 unsigned long n = simple_strtoul(buf, &e, 10);
33393 if (*buf && (*e == 0 || *e == '\n')) {
33394 - atomic_set(&rdev->corrected_errors, n);
33395 + atomic_set_unchecked(&rdev->corrected_errors, n);
33396 return len;
33397 }
33398 return -EINVAL;
33399 @@ -3086,8 +3086,8 @@ int md_rdev_init(struct md_rdev *rdev)
33400 rdev->sb_loaded = 0;
33401 rdev->bb_page = NULL;
33402 atomic_set(&rdev->nr_pending, 0);
33403 - atomic_set(&rdev->read_errors, 0);
33404 - atomic_set(&rdev->corrected_errors, 0);
33405 + atomic_set_unchecked(&rdev->read_errors, 0);
33406 + atomic_set_unchecked(&rdev->corrected_errors, 0);
33407
33408 INIT_LIST_HEAD(&rdev->same_set);
33409 init_waitqueue_head(&rdev->blocked_wait);
33410 @@ -3744,8 +3744,8 @@ array_state_show(struct mddev *mddev, char *page)
33411 return sprintf(page, "%s\n", array_states[st]);
33412 }
33413
33414 -static int do_md_stop(struct mddev * mddev, int ro, int is_open);
33415 -static int md_set_readonly(struct mddev * mddev, int is_open);
33416 +static int do_md_stop(struct mddev * mddev, int ro, struct block_device *bdev);
33417 +static int md_set_readonly(struct mddev * mddev, struct block_device *bdev);
33418 static int do_md_run(struct mddev * mddev);
33419 static int restart_array(struct mddev *mddev);
33420
33421 @@ -3761,14 +3761,14 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
33422 /* stopping an active array */
33423 if (atomic_read(&mddev->openers) > 0)
33424 return -EBUSY;
33425 - err = do_md_stop(mddev, 0, 0);
33426 + err = do_md_stop(mddev, 0, NULL);
33427 break;
33428 case inactive:
33429 /* stopping an active array */
33430 if (mddev->pers) {
33431 if (atomic_read(&mddev->openers) > 0)
33432 return -EBUSY;
33433 - err = do_md_stop(mddev, 2, 0);
33434 + err = do_md_stop(mddev, 2, NULL);
33435 } else
33436 err = 0; /* already inactive */
33437 break;
33438 @@ -3776,7 +3776,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
33439 break; /* not supported yet */
33440 case readonly:
33441 if (mddev->pers)
33442 - err = md_set_readonly(mddev, 0);
33443 + err = md_set_readonly(mddev, NULL);
33444 else {
33445 mddev->ro = 1;
33446 set_disk_ro(mddev->gendisk, 1);
33447 @@ -3786,7 +3786,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
33448 case read_auto:
33449 if (mddev->pers) {
33450 if (mddev->ro == 0)
33451 - err = md_set_readonly(mddev, 0);
33452 + err = md_set_readonly(mddev, NULL);
33453 else if (mddev->ro == 1)
33454 err = restart_array(mddev);
33455 if (err == 0) {
33456 @@ -5124,15 +5124,17 @@ void md_stop(struct mddev *mddev)
33457 }
33458 EXPORT_SYMBOL_GPL(md_stop);
33459
33460 -static int md_set_readonly(struct mddev *mddev, int is_open)
33461 +static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
33462 {
33463 int err = 0;
33464 mutex_lock(&mddev->open_mutex);
33465 - if (atomic_read(&mddev->openers) > is_open) {
33466 + if (atomic_read(&mddev->openers) > !!bdev) {
33467 printk("md: %s still in use.\n",mdname(mddev));
33468 err = -EBUSY;
33469 goto out;
33470 }
33471 + if (bdev)
33472 + sync_blockdev(bdev);
33473 if (mddev->pers) {
33474 __md_stop_writes(mddev);
33475
33476 @@ -5154,18 +5156,26 @@ out:
33477 * 0 - completely stop and dis-assemble array
33478 * 2 - stop but do not disassemble array
33479 */
33480 -static int do_md_stop(struct mddev * mddev, int mode, int is_open)
33481 +static int do_md_stop(struct mddev * mddev, int mode,
33482 + struct block_device *bdev)
33483 {
33484 struct gendisk *disk = mddev->gendisk;
33485 struct md_rdev *rdev;
33486
33487 mutex_lock(&mddev->open_mutex);
33488 - if (atomic_read(&mddev->openers) > is_open ||
33489 + if (atomic_read(&mddev->openers) > !!bdev ||
33490 mddev->sysfs_active) {
33491 printk("md: %s still in use.\n",mdname(mddev));
33492 mutex_unlock(&mddev->open_mutex);
33493 return -EBUSY;
33494 }
33495 + if (bdev)
33496 + /* It is possible IO was issued on some other
33497 + * open file which was closed before we took ->open_mutex.
33498 + * As that was not the last close __blkdev_put will not
33499 + * have called sync_blockdev, so we must.
33500 + */
33501 + sync_blockdev(bdev);
33502
33503 if (mddev->pers) {
33504 if (mddev->ro)
33505 @@ -5239,7 +5249,7 @@ static void autorun_array(struct mddev *mddev)
33506 err = do_md_run(mddev);
33507 if (err) {
33508 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
33509 - do_md_stop(mddev, 0, 0);
33510 + do_md_stop(mddev, 0, NULL);
33511 }
33512 }
33513
33514 @@ -6237,11 +6247,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
33515 goto done_unlock;
33516
33517 case STOP_ARRAY:
33518 - err = do_md_stop(mddev, 0, 1);
33519 + err = do_md_stop(mddev, 0, bdev);
33520 goto done_unlock;
33521
33522 case STOP_ARRAY_RO:
33523 - err = md_set_readonly(mddev, 1);
33524 + err = md_set_readonly(mddev, bdev);
33525 goto done_unlock;
33526
33527 case BLKROSET:
33528 @@ -6738,7 +6748,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
33529
33530 spin_unlock(&pers_lock);
33531 seq_printf(seq, "\n");
33532 - seq->poll_event = atomic_read(&md_event_count);
33533 + seq->poll_event = atomic_read_unchecked(&md_event_count);
33534 return 0;
33535 }
33536 if (v == (void*)2) {
33537 @@ -6841,7 +6851,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
33538 return error;
33539
33540 seq = file->private_data;
33541 - seq->poll_event = atomic_read(&md_event_count);
33542 + seq->poll_event = atomic_read_unchecked(&md_event_count);
33543 return error;
33544 }
33545
33546 @@ -6855,7 +6865,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
33547 /* always allow read */
33548 mask = POLLIN | POLLRDNORM;
33549
33550 - if (seq->poll_event != atomic_read(&md_event_count))
33551 + if (seq->poll_event != atomic_read_unchecked(&md_event_count))
33552 mask |= POLLERR | POLLPRI;
33553 return mask;
33554 }
33555 @@ -6899,7 +6909,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
33556 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
33557 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
33558 (int)part_stat_read(&disk->part0, sectors[1]) -
33559 - atomic_read(&disk->sync_io);
33560 + atomic_read_unchecked(&disk->sync_io);
33561 /* sync IO will cause sync_io to increase before the disk_stats
33562 * as sync_io is counted when a request starts, and
33563 * disk_stats is counted when it completes.
33564 diff --git a/drivers/md/md.h b/drivers/md/md.h
33565 index 1c2063c..9639970 100644
33566 --- a/drivers/md/md.h
33567 +++ b/drivers/md/md.h
33568 @@ -93,13 +93,13 @@ struct md_rdev {
33569 * only maintained for arrays that
33570 * support hot removal
33571 */
33572 - atomic_t read_errors; /* number of consecutive read errors that
33573 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
33574 * we have tried to ignore.
33575 */
33576 struct timespec last_read_error; /* monotonic time since our
33577 * last read error
33578 */
33579 - atomic_t corrected_errors; /* number of corrected read errors,
33580 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
33581 * for reporting to userspace and storing
33582 * in superblock.
33583 */
33584 @@ -429,7 +429,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
33585
33586 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
33587 {
33588 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
33589 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
33590 }
33591
33592 struct md_personality
33593 diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
33594 index fc90c11..c8cd9a9 100644
33595 --- a/drivers/md/persistent-data/dm-space-map-checker.c
33596 +++ b/drivers/md/persistent-data/dm-space-map-checker.c
33597 @@ -167,7 +167,7 @@ static int ca_commit(struct count_array *old, struct count_array *new)
33598 /*----------------------------------------------------------------*/
33599
33600 struct sm_checker {
33601 - struct dm_space_map sm;
33602 + dm_space_map_no_const sm;
33603
33604 struct count_array old_counts;
33605 struct count_array counts;
33606 diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
33607 index 3d0ed53..35dc592 100644
33608 --- a/drivers/md/persistent-data/dm-space-map-disk.c
33609 +++ b/drivers/md/persistent-data/dm-space-map-disk.c
33610 @@ -23,7 +23,7 @@
33611 * Space map interface.
33612 */
33613 struct sm_disk {
33614 - struct dm_space_map sm;
33615 + dm_space_map_no_const sm;
33616
33617 struct ll_disk ll;
33618 struct ll_disk old_ll;
33619 diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
33620 index e89ae5e..062e4c2 100644
33621 --- a/drivers/md/persistent-data/dm-space-map-metadata.c
33622 +++ b/drivers/md/persistent-data/dm-space-map-metadata.c
33623 @@ -43,7 +43,7 @@ struct block_op {
33624 };
33625
33626 struct sm_metadata {
33627 - struct dm_space_map sm;
33628 + dm_space_map_no_const sm;
33629
33630 struct ll_disk ll;
33631 struct ll_disk old_ll;
33632 diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
33633 index 1cbfc6b..56e1dbb 100644
33634 --- a/drivers/md/persistent-data/dm-space-map.h
33635 +++ b/drivers/md/persistent-data/dm-space-map.h
33636 @@ -60,6 +60,7 @@ struct dm_space_map {
33637 int (*root_size)(struct dm_space_map *sm, size_t *result);
33638 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
33639 };
33640 +typedef struct dm_space_map __no_const dm_space_map_no_const;
33641
33642 /*----------------------------------------------------------------*/
33643
33644 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
33645 index d1f74ab..d1b24fd 100644
33646 --- a/drivers/md/raid1.c
33647 +++ b/drivers/md/raid1.c
33648 @@ -1688,7 +1688,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
33649 if (r1_sync_page_io(rdev, sect, s,
33650 bio->bi_io_vec[idx].bv_page,
33651 READ) != 0)
33652 - atomic_add(s, &rdev->corrected_errors);
33653 + atomic_add_unchecked(s, &rdev->corrected_errors);
33654 }
33655 sectors -= s;
33656 sect += s;
33657 @@ -1902,7 +1902,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
33658 test_bit(In_sync, &rdev->flags)) {
33659 if (r1_sync_page_io(rdev, sect, s,
33660 conf->tmppage, READ)) {
33661 - atomic_add(s, &rdev->corrected_errors);
33662 + atomic_add_unchecked(s, &rdev->corrected_errors);
33663 printk(KERN_INFO
33664 "md/raid1:%s: read error corrected "
33665 "(%d sectors at %llu on %s)\n",
33666 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
33667 index a954c95..6e7a21c 100644
33668 --- a/drivers/md/raid10.c
33669 +++ b/drivers/md/raid10.c
33670 @@ -1684,7 +1684,7 @@ static void end_sync_read(struct bio *bio, int error)
33671 /* The write handler will notice the lack of
33672 * R10BIO_Uptodate and record any errors etc
33673 */
33674 - atomic_add(r10_bio->sectors,
33675 + atomic_add_unchecked(r10_bio->sectors,
33676 &conf->mirrors[d].rdev->corrected_errors);
33677
33678 /* for reconstruct, we always reschedule after a read.
33679 @@ -2033,7 +2033,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
33680 {
33681 struct timespec cur_time_mon;
33682 unsigned long hours_since_last;
33683 - unsigned int read_errors = atomic_read(&rdev->read_errors);
33684 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
33685
33686 ktime_get_ts(&cur_time_mon);
33687
33688 @@ -2055,9 +2055,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
33689 * overflowing the shift of read_errors by hours_since_last.
33690 */
33691 if (hours_since_last >= 8 * sizeof(read_errors))
33692 - atomic_set(&rdev->read_errors, 0);
33693 + atomic_set_unchecked(&rdev->read_errors, 0);
33694 else
33695 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
33696 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
33697 }
33698
33699 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
33700 @@ -2111,8 +2111,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33701 return;
33702
33703 check_decay_read_errors(mddev, rdev);
33704 - atomic_inc(&rdev->read_errors);
33705 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
33706 + atomic_inc_unchecked(&rdev->read_errors);
33707 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
33708 char b[BDEVNAME_SIZE];
33709 bdevname(rdev->bdev, b);
33710
33711 @@ -2120,7 +2120,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33712 "md/raid10:%s: %s: Raid device exceeded "
33713 "read_error threshold [cur %d:max %d]\n",
33714 mdname(mddev), b,
33715 - atomic_read(&rdev->read_errors), max_read_errors);
33716 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
33717 printk(KERN_NOTICE
33718 "md/raid10:%s: %s: Failing raid device\n",
33719 mdname(mddev), b);
33720 @@ -2271,7 +2271,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
33721 (unsigned long long)(
33722 sect + rdev->data_offset),
33723 bdevname(rdev->bdev, b));
33724 - atomic_add(s, &rdev->corrected_errors);
33725 + atomic_add_unchecked(s, &rdev->corrected_errors);
33726 }
33727
33728 rdev_dec_pending(rdev, mddev);
33729 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
33730 index 73a5800..2b0e3b1 100644
33731 --- a/drivers/md/raid5.c
33732 +++ b/drivers/md/raid5.c
33733 @@ -1694,18 +1694,18 @@ static void raid5_end_read_request(struct bio * bi, int error)
33734 (unsigned long long)(sh->sector
33735 + rdev->data_offset),
33736 bdevname(rdev->bdev, b));
33737 - atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
33738 + atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
33739 clear_bit(R5_ReadError, &sh->dev[i].flags);
33740 clear_bit(R5_ReWrite, &sh->dev[i].flags);
33741 }
33742 - if (atomic_read(&rdev->read_errors))
33743 - atomic_set(&rdev->read_errors, 0);
33744 + if (atomic_read_unchecked(&rdev->read_errors))
33745 + atomic_set_unchecked(&rdev->read_errors, 0);
33746 } else {
33747 const char *bdn = bdevname(rdev->bdev, b);
33748 int retry = 0;
33749
33750 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
33751 - atomic_inc(&rdev->read_errors);
33752 + atomic_inc_unchecked(&rdev->read_errors);
33753 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
33754 printk_ratelimited(
33755 KERN_WARNING
33756 @@ -1734,7 +1734,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
33757 (unsigned long long)(sh->sector
33758 + rdev->data_offset),
33759 bdn);
33760 - else if (atomic_read(&rdev->read_errors)
33761 + else if (atomic_read_unchecked(&rdev->read_errors)
33762 > conf->max_nr_stripes)
33763 printk(KERN_WARNING
33764 "md/raid:%s: Too many read errors, failing device %s.\n",
33765 diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
33766 index d88c4aa..17c80b1 100644
33767 --- a/drivers/media/dvb/ddbridge/ddbridge-core.c
33768 +++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
33769 @@ -1679,7 +1679,7 @@ static struct ddb_info ddb_v6 = {
33770 .subvendor = _subvend, .subdevice = _subdev, \
33771 .driver_data = (unsigned long)&_driverdata }
33772
33773 -static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
33774 +static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
33775 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
33776 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
33777 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
33778 diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
33779 index a7d876f..8c21b61 100644
33780 --- a/drivers/media/dvb/dvb-core/dvb_demux.h
33781 +++ b/drivers/media/dvb/dvb-core/dvb_demux.h
33782 @@ -73,7 +73,7 @@ struct dvb_demux_feed {
33783 union {
33784 dmx_ts_cb ts;
33785 dmx_section_cb sec;
33786 - } cb;
33787 + } __no_const cb;
33788
33789 struct dvb_demux *demux;
33790 void *priv;
33791 diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
33792 index 39eab73..60033e7 100644
33793 --- a/drivers/media/dvb/dvb-core/dvbdev.c
33794 +++ b/drivers/media/dvb/dvb-core/dvbdev.c
33795 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
33796 const struct dvb_device *template, void *priv, int type)
33797 {
33798 struct dvb_device *dvbdev;
33799 - struct file_operations *dvbdevfops;
33800 + file_operations_no_const *dvbdevfops;
33801 struct device *clsdev;
33802 int minor;
33803 int id;
33804 diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
33805 index 3940bb0..fb3952a 100644
33806 --- a/drivers/media/dvb/dvb-usb/cxusb.c
33807 +++ b/drivers/media/dvb/dvb-usb/cxusb.c
33808 @@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
33809
33810 struct dib0700_adapter_state {
33811 int (*set_param_save) (struct dvb_frontend *);
33812 -};
33813 +} __no_const;
33814
33815 static int dib7070_set_param_override(struct dvb_frontend *fe)
33816 {
33817 diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
33818 index 451c5a7..649f711 100644
33819 --- a/drivers/media/dvb/dvb-usb/dw2102.c
33820 +++ b/drivers/media/dvb/dvb-usb/dw2102.c
33821 @@ -95,7 +95,7 @@ struct su3000_state {
33822
33823 struct s6x0_state {
33824 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
33825 -};
33826 +} __no_const;
33827
33828 /* debug */
33829 static int dvb_usb_dw2102_debug;
33830 diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
33831 index 404f63a..4796533 100644
33832 --- a/drivers/media/dvb/frontends/dib3000.h
33833 +++ b/drivers/media/dvb/frontends/dib3000.h
33834 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
33835 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
33836 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
33837 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
33838 -};
33839 +} __no_const;
33840
33841 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
33842 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
33843 diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
33844 index 7539a5d..06531a6 100644
33845 --- a/drivers/media/dvb/ngene/ngene-cards.c
33846 +++ b/drivers/media/dvb/ngene/ngene-cards.c
33847 @@ -478,7 +478,7 @@ static struct ngene_info ngene_info_m780 = {
33848
33849 /****************************************************************************/
33850
33851 -static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
33852 +static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
33853 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
33854 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
33855 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
33856 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
33857 index 16a089f..1661b11 100644
33858 --- a/drivers/media/radio/radio-cadet.c
33859 +++ b/drivers/media/radio/radio-cadet.c
33860 @@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
33861 unsigned char readbuf[RDS_BUFFER];
33862 int i = 0;
33863
33864 + if (count > RDS_BUFFER)
33865 + return -EFAULT;
33866 mutex_lock(&dev->lock);
33867 if (dev->rdsstat == 0) {
33868 dev->rdsstat = 1;
33869 @@ -347,7 +349,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
33870 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
33871 mutex_unlock(&dev->lock);
33872
33873 - if (copy_to_user(data, readbuf, i))
33874 + if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
33875 return -EFAULT;
33876 return i;
33877 }
33878 diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
33879 index 9cde353..8c6a1c3 100644
33880 --- a/drivers/media/video/au0828/au0828.h
33881 +++ b/drivers/media/video/au0828/au0828.h
33882 @@ -191,7 +191,7 @@ struct au0828_dev {
33883
33884 /* I2C */
33885 struct i2c_adapter i2c_adap;
33886 - struct i2c_algorithm i2c_algo;
33887 + i2c_algorithm_no_const i2c_algo;
33888 struct i2c_client i2c_client;
33889 u32 i2c_rc;
33890
33891 diff --git a/drivers/media/video/cx25821/cx25821-core.c b/drivers/media/video/cx25821/cx25821-core.c
33892 index 7930ca5..235bf7d 100644
33893 --- a/drivers/media/video/cx25821/cx25821-core.c
33894 +++ b/drivers/media/video/cx25821/cx25821-core.c
33895 @@ -912,9 +912,6 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
33896 list_add_tail(&dev->devlist, &cx25821_devlist);
33897 mutex_unlock(&cx25821_devlist_mutex);
33898
33899 - strcpy(cx25821_boards[UNKNOWN_BOARD].name, "unknown");
33900 - strcpy(cx25821_boards[CX25821_BOARD].name, "cx25821");
33901 -
33902 if (dev->pci->device != 0x8210) {
33903 pr_info("%s(): Exiting. Incorrect Hardware device = 0x%02x\n",
33904 __func__, dev->pci->device);
33905 diff --git a/drivers/media/video/cx25821/cx25821.h b/drivers/media/video/cx25821/cx25821.h
33906 index b9aa801..029f293 100644
33907 --- a/drivers/media/video/cx25821/cx25821.h
33908 +++ b/drivers/media/video/cx25821/cx25821.h
33909 @@ -187,7 +187,7 @@ enum port {
33910 };
33911
33912 struct cx25821_board {
33913 - char *name;
33914 + const char *name;
33915 enum port porta;
33916 enum port portb;
33917 enum port portc;
33918 diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
33919 index 04bf662..e0ac026 100644
33920 --- a/drivers/media/video/cx88/cx88-alsa.c
33921 +++ b/drivers/media/video/cx88/cx88-alsa.c
33922 @@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
33923 * Only boards with eeprom and byte 1 at eeprom=1 have it
33924 */
33925
33926 -static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
33927 +static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
33928 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
33929 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
33930 {0, }
33931 diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
33932 index 88cf9d9..bbc4b2c 100644
33933 --- a/drivers/media/video/omap/omap_vout.c
33934 +++ b/drivers/media/video/omap/omap_vout.c
33935 @@ -64,7 +64,6 @@ enum omap_vout_channels {
33936 OMAP_VIDEO2,
33937 };
33938
33939 -static struct videobuf_queue_ops video_vbq_ops;
33940 /* Variables configurable through module params*/
33941 static u32 video1_numbuffers = 3;
33942 static u32 video2_numbuffers = 3;
33943 @@ -1000,6 +999,12 @@ static int omap_vout_open(struct file *file)
33944 {
33945 struct videobuf_queue *q;
33946 struct omap_vout_device *vout = NULL;
33947 + static struct videobuf_queue_ops video_vbq_ops = {
33948 + .buf_setup = omap_vout_buffer_setup,
33949 + .buf_prepare = omap_vout_buffer_prepare,
33950 + .buf_release = omap_vout_buffer_release,
33951 + .buf_queue = omap_vout_buffer_queue,
33952 + };
33953
33954 vout = video_drvdata(file);
33955 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
33956 @@ -1017,10 +1022,6 @@ static int omap_vout_open(struct file *file)
33957 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
33958
33959 q = &vout->vbq;
33960 - video_vbq_ops.buf_setup = omap_vout_buffer_setup;
33961 - video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
33962 - video_vbq_ops.buf_release = omap_vout_buffer_release;
33963 - video_vbq_ops.buf_queue = omap_vout_buffer_queue;
33964 spin_lock_init(&vout->vbq_lock);
33965
33966 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
33967 diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33968 index 305e6aa..0143317 100644
33969 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33970 +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33971 @@ -196,7 +196,7 @@ struct pvr2_hdw {
33972
33973 /* I2C stuff */
33974 struct i2c_adapter i2c_adap;
33975 - struct i2c_algorithm i2c_algo;
33976 + i2c_algorithm_no_const i2c_algo;
33977 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
33978 int i2c_cx25840_hack_state;
33979 int i2c_linked;
33980 diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
33981 index 02194c0..091733b 100644
33982 --- a/drivers/media/video/timblogiw.c
33983 +++ b/drivers/media/video/timblogiw.c
33984 @@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
33985
33986 /* Platform device functions */
33987
33988 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
33989 +static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
33990 .vidioc_querycap = timblogiw_querycap,
33991 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
33992 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
33993 @@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
33994 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
33995 };
33996
33997 -static __devinitconst struct v4l2_file_operations timblogiw_fops = {
33998 +static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
33999 .owner = THIS_MODULE,
34000 .open = timblogiw_open,
34001 .release = timblogiw_close,
34002 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
34003 index a5c591f..db692a3 100644
34004 --- a/drivers/message/fusion/mptbase.c
34005 +++ b/drivers/message/fusion/mptbase.c
34006 @@ -6754,8 +6754,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
34007 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
34008 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
34009
34010 +#ifdef CONFIG_GRKERNSEC_HIDESYM
34011 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
34012 +#else
34013 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
34014 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
34015 +#endif
34016 +
34017 /*
34018 * Rounding UP to nearest 4-kB boundary here...
34019 */
34020 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
34021 index 551262e..7551198 100644
34022 --- a/drivers/message/fusion/mptsas.c
34023 +++ b/drivers/message/fusion/mptsas.c
34024 @@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
34025 return 0;
34026 }
34027
34028 +static inline void
34029 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
34030 +{
34031 + if (phy_info->port_details) {
34032 + phy_info->port_details->rphy = rphy;
34033 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
34034 + ioc->name, rphy));
34035 + }
34036 +
34037 + if (rphy) {
34038 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
34039 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
34040 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
34041 + ioc->name, rphy, rphy->dev.release));
34042 + }
34043 +}
34044 +
34045 /* no mutex */
34046 static void
34047 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
34048 @@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
34049 return NULL;
34050 }
34051
34052 -static inline void
34053 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
34054 -{
34055 - if (phy_info->port_details) {
34056 - phy_info->port_details->rphy = rphy;
34057 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
34058 - ioc->name, rphy));
34059 - }
34060 -
34061 - if (rphy) {
34062 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
34063 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
34064 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
34065 - ioc->name, rphy, rphy->dev.release));
34066 - }
34067 -}
34068 -
34069 static inline struct sas_port *
34070 mptsas_get_port(struct mptsas_phyinfo *phy_info)
34071 {
34072 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
34073 index 0c3ced7..1fe34ec 100644
34074 --- a/drivers/message/fusion/mptscsih.c
34075 +++ b/drivers/message/fusion/mptscsih.c
34076 @@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
34077
34078 h = shost_priv(SChost);
34079
34080 - if (h) {
34081 - if (h->info_kbuf == NULL)
34082 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
34083 - return h->info_kbuf;
34084 - h->info_kbuf[0] = '\0';
34085 + if (!h)
34086 + return NULL;
34087
34088 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
34089 - h->info_kbuf[size-1] = '\0';
34090 - }
34091 + if (h->info_kbuf == NULL)
34092 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
34093 + return h->info_kbuf;
34094 + h->info_kbuf[0] = '\0';
34095 +
34096 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
34097 + h->info_kbuf[size-1] = '\0';
34098
34099 return h->info_kbuf;
34100 }
34101 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
34102 index 6d115c7..58ff7fd 100644
34103 --- a/drivers/message/i2o/i2o_proc.c
34104 +++ b/drivers/message/i2o/i2o_proc.c
34105 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
34106 "Array Controller Device"
34107 };
34108
34109 -static char *chtostr(u8 * chars, int n)
34110 -{
34111 - char tmp[256];
34112 - tmp[0] = 0;
34113 - return strncat(tmp, (char *)chars, n);
34114 -}
34115 -
34116 static int i2o_report_query_status(struct seq_file *seq, int block_status,
34117 char *group)
34118 {
34119 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
34120
34121 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
34122 seq_printf(seq, "%-#8x", ddm_table.module_id);
34123 - seq_printf(seq, "%-29s",
34124 - chtostr(ddm_table.module_name_version, 28));
34125 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
34126 seq_printf(seq, "%9d ", ddm_table.data_size);
34127 seq_printf(seq, "%8d", ddm_table.code_size);
34128
34129 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
34130
34131 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
34132 seq_printf(seq, "%-#8x", dst->module_id);
34133 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
34134 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
34135 + seq_printf(seq, "%-.28s", dst->module_name_version);
34136 + seq_printf(seq, "%-.8s", dst->date);
34137 seq_printf(seq, "%8d ", dst->module_size);
34138 seq_printf(seq, "%8d ", dst->mpb_size);
34139 seq_printf(seq, "0x%04x", dst->module_flags);
34140 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
34141 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
34142 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
34143 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
34144 - seq_printf(seq, "Vendor info : %s\n",
34145 - chtostr((u8 *) (work32 + 2), 16));
34146 - seq_printf(seq, "Product info : %s\n",
34147 - chtostr((u8 *) (work32 + 6), 16));
34148 - seq_printf(seq, "Description : %s\n",
34149 - chtostr((u8 *) (work32 + 10), 16));
34150 - seq_printf(seq, "Product rev. : %s\n",
34151 - chtostr((u8 *) (work32 + 14), 8));
34152 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
34153 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
34154 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
34155 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
34156
34157 seq_printf(seq, "Serial number : ");
34158 print_serial_number(seq, (u8 *) (work32 + 16),
34159 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
34160 }
34161
34162 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
34163 - seq_printf(seq, "Module name : %s\n",
34164 - chtostr(result.module_name, 24));
34165 - seq_printf(seq, "Module revision : %s\n",
34166 - chtostr(result.module_rev, 8));
34167 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
34168 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
34169
34170 seq_printf(seq, "Serial number : ");
34171 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
34172 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
34173 return 0;
34174 }
34175
34176 - seq_printf(seq, "Device name : %s\n",
34177 - chtostr(result.device_name, 64));
34178 - seq_printf(seq, "Service name : %s\n",
34179 - chtostr(result.service_name, 64));
34180 - seq_printf(seq, "Physical name : %s\n",
34181 - chtostr(result.physical_location, 64));
34182 - seq_printf(seq, "Instance number : %s\n",
34183 - chtostr(result.instance_number, 4));
34184 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
34185 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
34186 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
34187 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
34188
34189 return 0;
34190 }
34191 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
34192 index a8c08f3..155fe3d 100644
34193 --- a/drivers/message/i2o/iop.c
34194 +++ b/drivers/message/i2o/iop.c
34195 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
34196
34197 spin_lock_irqsave(&c->context_list_lock, flags);
34198
34199 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
34200 - atomic_inc(&c->context_list_counter);
34201 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
34202 + atomic_inc_unchecked(&c->context_list_counter);
34203
34204 - entry->context = atomic_read(&c->context_list_counter);
34205 + entry->context = atomic_read_unchecked(&c->context_list_counter);
34206
34207 list_add(&entry->list, &c->context_list);
34208
34209 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
34210
34211 #if BITS_PER_LONG == 64
34212 spin_lock_init(&c->context_list_lock);
34213 - atomic_set(&c->context_list_counter, 0);
34214 + atomic_set_unchecked(&c->context_list_counter, 0);
34215 INIT_LIST_HEAD(&c->context_list);
34216 #endif
34217
34218 diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
34219 index 7ce65f4..e66e9bc 100644
34220 --- a/drivers/mfd/abx500-core.c
34221 +++ b/drivers/mfd/abx500-core.c
34222 @@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
34223
34224 struct abx500_device_entry {
34225 struct list_head list;
34226 - struct abx500_ops ops;
34227 + abx500_ops_no_const ops;
34228 struct device *dev;
34229 };
34230
34231 diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
34232 index a9223ed..4127b13 100644
34233 --- a/drivers/mfd/janz-cmodio.c
34234 +++ b/drivers/mfd/janz-cmodio.c
34235 @@ -13,6 +13,7 @@
34236
34237 #include <linux/kernel.h>
34238 #include <linux/module.h>
34239 +#include <linux/slab.h>
34240 #include <linux/init.h>
34241 #include <linux/pci.h>
34242 #include <linux/interrupt.h>
34243 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
34244 index a981e2a..5ca0c8b 100644
34245 --- a/drivers/misc/lis3lv02d/lis3lv02d.c
34246 +++ b/drivers/misc/lis3lv02d/lis3lv02d.c
34247 @@ -466,7 +466,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
34248 * the lid is closed. This leads to interrupts as soon as a little move
34249 * is done.
34250 */
34251 - atomic_inc(&lis3->count);
34252 + atomic_inc_unchecked(&lis3->count);
34253
34254 wake_up_interruptible(&lis3->misc_wait);
34255 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
34256 @@ -552,7 +552,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
34257 if (lis3->pm_dev)
34258 pm_runtime_get_sync(lis3->pm_dev);
34259
34260 - atomic_set(&lis3->count, 0);
34261 + atomic_set_unchecked(&lis3->count, 0);
34262 return 0;
34263 }
34264
34265 @@ -585,7 +585,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
34266 add_wait_queue(&lis3->misc_wait, &wait);
34267 while (true) {
34268 set_current_state(TASK_INTERRUPTIBLE);
34269 - data = atomic_xchg(&lis3->count, 0);
34270 + data = atomic_xchg_unchecked(&lis3->count, 0);
34271 if (data)
34272 break;
34273
34274 @@ -626,7 +626,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
34275 struct lis3lv02d, miscdev);
34276
34277 poll_wait(file, &lis3->misc_wait, wait);
34278 - if (atomic_read(&lis3->count))
34279 + if (atomic_read_unchecked(&lis3->count))
34280 return POLLIN | POLLRDNORM;
34281 return 0;
34282 }
34283 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
34284 index 2b1482a..5d33616 100644
34285 --- a/drivers/misc/lis3lv02d/lis3lv02d.h
34286 +++ b/drivers/misc/lis3lv02d/lis3lv02d.h
34287 @@ -266,7 +266,7 @@ struct lis3lv02d {
34288 struct input_polled_dev *idev; /* input device */
34289 struct platform_device *pdev; /* platform device */
34290 struct regulator_bulk_data regulators[2];
34291 - atomic_t count; /* interrupt count after last read */
34292 + atomic_unchecked_t count; /* interrupt count after last read */
34293 union axis_conversion ac; /* hw -> logical axis */
34294 int mapped_btns[3];
34295
34296 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
34297 index 2f30bad..c4c13d0 100644
34298 --- a/drivers/misc/sgi-gru/gruhandles.c
34299 +++ b/drivers/misc/sgi-gru/gruhandles.c
34300 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
34301 unsigned long nsec;
34302
34303 nsec = CLKS2NSEC(clks);
34304 - atomic_long_inc(&mcs_op_statistics[op].count);
34305 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
34306 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
34307 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
34308 if (mcs_op_statistics[op].max < nsec)
34309 mcs_op_statistics[op].max = nsec;
34310 }
34311 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
34312 index 950dbe9..eeef0f8 100644
34313 --- a/drivers/misc/sgi-gru/gruprocfs.c
34314 +++ b/drivers/misc/sgi-gru/gruprocfs.c
34315 @@ -32,9 +32,9 @@
34316
34317 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
34318
34319 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
34320 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
34321 {
34322 - unsigned long val = atomic_long_read(v);
34323 + unsigned long val = atomic_long_read_unchecked(v);
34324
34325 seq_printf(s, "%16lu %s\n", val, id);
34326 }
34327 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
34328
34329 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
34330 for (op = 0; op < mcsop_last; op++) {
34331 - count = atomic_long_read(&mcs_op_statistics[op].count);
34332 - total = atomic_long_read(&mcs_op_statistics[op].total);
34333 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
34334 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
34335 max = mcs_op_statistics[op].max;
34336 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
34337 count ? total / count : 0, max);
34338 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
34339 index 5c3ce24..4915ccb 100644
34340 --- a/drivers/misc/sgi-gru/grutables.h
34341 +++ b/drivers/misc/sgi-gru/grutables.h
34342 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
34343 * GRU statistics.
34344 */
34345 struct gru_stats_s {
34346 - atomic_long_t vdata_alloc;
34347 - atomic_long_t vdata_free;
34348 - atomic_long_t gts_alloc;
34349 - atomic_long_t gts_free;
34350 - atomic_long_t gms_alloc;
34351 - atomic_long_t gms_free;
34352 - atomic_long_t gts_double_allocate;
34353 - atomic_long_t assign_context;
34354 - atomic_long_t assign_context_failed;
34355 - atomic_long_t free_context;
34356 - atomic_long_t load_user_context;
34357 - atomic_long_t load_kernel_context;
34358 - atomic_long_t lock_kernel_context;
34359 - atomic_long_t unlock_kernel_context;
34360 - atomic_long_t steal_user_context;
34361 - atomic_long_t steal_kernel_context;
34362 - atomic_long_t steal_context_failed;
34363 - atomic_long_t nopfn;
34364 - atomic_long_t asid_new;
34365 - atomic_long_t asid_next;
34366 - atomic_long_t asid_wrap;
34367 - atomic_long_t asid_reuse;
34368 - atomic_long_t intr;
34369 - atomic_long_t intr_cbr;
34370 - atomic_long_t intr_tfh;
34371 - atomic_long_t intr_spurious;
34372 - atomic_long_t intr_mm_lock_failed;
34373 - atomic_long_t call_os;
34374 - atomic_long_t call_os_wait_queue;
34375 - atomic_long_t user_flush_tlb;
34376 - atomic_long_t user_unload_context;
34377 - atomic_long_t user_exception;
34378 - atomic_long_t set_context_option;
34379 - atomic_long_t check_context_retarget_intr;
34380 - atomic_long_t check_context_unload;
34381 - atomic_long_t tlb_dropin;
34382 - atomic_long_t tlb_preload_page;
34383 - atomic_long_t tlb_dropin_fail_no_asid;
34384 - atomic_long_t tlb_dropin_fail_upm;
34385 - atomic_long_t tlb_dropin_fail_invalid;
34386 - atomic_long_t tlb_dropin_fail_range_active;
34387 - atomic_long_t tlb_dropin_fail_idle;
34388 - atomic_long_t tlb_dropin_fail_fmm;
34389 - atomic_long_t tlb_dropin_fail_no_exception;
34390 - atomic_long_t tfh_stale_on_fault;
34391 - atomic_long_t mmu_invalidate_range;
34392 - atomic_long_t mmu_invalidate_page;
34393 - atomic_long_t flush_tlb;
34394 - atomic_long_t flush_tlb_gru;
34395 - atomic_long_t flush_tlb_gru_tgh;
34396 - atomic_long_t flush_tlb_gru_zero_asid;
34397 + atomic_long_unchecked_t vdata_alloc;
34398 + atomic_long_unchecked_t vdata_free;
34399 + atomic_long_unchecked_t gts_alloc;
34400 + atomic_long_unchecked_t gts_free;
34401 + atomic_long_unchecked_t gms_alloc;
34402 + atomic_long_unchecked_t gms_free;
34403 + atomic_long_unchecked_t gts_double_allocate;
34404 + atomic_long_unchecked_t assign_context;
34405 + atomic_long_unchecked_t assign_context_failed;
34406 + atomic_long_unchecked_t free_context;
34407 + atomic_long_unchecked_t load_user_context;
34408 + atomic_long_unchecked_t load_kernel_context;
34409 + atomic_long_unchecked_t lock_kernel_context;
34410 + atomic_long_unchecked_t unlock_kernel_context;
34411 + atomic_long_unchecked_t steal_user_context;
34412 + atomic_long_unchecked_t steal_kernel_context;
34413 + atomic_long_unchecked_t steal_context_failed;
34414 + atomic_long_unchecked_t nopfn;
34415 + atomic_long_unchecked_t asid_new;
34416 + atomic_long_unchecked_t asid_next;
34417 + atomic_long_unchecked_t asid_wrap;
34418 + atomic_long_unchecked_t asid_reuse;
34419 + atomic_long_unchecked_t intr;
34420 + atomic_long_unchecked_t intr_cbr;
34421 + atomic_long_unchecked_t intr_tfh;
34422 + atomic_long_unchecked_t intr_spurious;
34423 + atomic_long_unchecked_t intr_mm_lock_failed;
34424 + atomic_long_unchecked_t call_os;
34425 + atomic_long_unchecked_t call_os_wait_queue;
34426 + atomic_long_unchecked_t user_flush_tlb;
34427 + atomic_long_unchecked_t user_unload_context;
34428 + atomic_long_unchecked_t user_exception;
34429 + atomic_long_unchecked_t set_context_option;
34430 + atomic_long_unchecked_t check_context_retarget_intr;
34431 + atomic_long_unchecked_t check_context_unload;
34432 + atomic_long_unchecked_t tlb_dropin;
34433 + atomic_long_unchecked_t tlb_preload_page;
34434 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
34435 + atomic_long_unchecked_t tlb_dropin_fail_upm;
34436 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
34437 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
34438 + atomic_long_unchecked_t tlb_dropin_fail_idle;
34439 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
34440 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
34441 + atomic_long_unchecked_t tfh_stale_on_fault;
34442 + atomic_long_unchecked_t mmu_invalidate_range;
34443 + atomic_long_unchecked_t mmu_invalidate_page;
34444 + atomic_long_unchecked_t flush_tlb;
34445 + atomic_long_unchecked_t flush_tlb_gru;
34446 + atomic_long_unchecked_t flush_tlb_gru_tgh;
34447 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
34448
34449 - atomic_long_t copy_gpa;
34450 - atomic_long_t read_gpa;
34451 + atomic_long_unchecked_t copy_gpa;
34452 + atomic_long_unchecked_t read_gpa;
34453
34454 - atomic_long_t mesq_receive;
34455 - atomic_long_t mesq_receive_none;
34456 - atomic_long_t mesq_send;
34457 - atomic_long_t mesq_send_failed;
34458 - atomic_long_t mesq_noop;
34459 - atomic_long_t mesq_send_unexpected_error;
34460 - atomic_long_t mesq_send_lb_overflow;
34461 - atomic_long_t mesq_send_qlimit_reached;
34462 - atomic_long_t mesq_send_amo_nacked;
34463 - atomic_long_t mesq_send_put_nacked;
34464 - atomic_long_t mesq_page_overflow;
34465 - atomic_long_t mesq_qf_locked;
34466 - atomic_long_t mesq_qf_noop_not_full;
34467 - atomic_long_t mesq_qf_switch_head_failed;
34468 - atomic_long_t mesq_qf_unexpected_error;
34469 - atomic_long_t mesq_noop_unexpected_error;
34470 - atomic_long_t mesq_noop_lb_overflow;
34471 - atomic_long_t mesq_noop_qlimit_reached;
34472 - atomic_long_t mesq_noop_amo_nacked;
34473 - atomic_long_t mesq_noop_put_nacked;
34474 - atomic_long_t mesq_noop_page_overflow;
34475 + atomic_long_unchecked_t mesq_receive;
34476 + atomic_long_unchecked_t mesq_receive_none;
34477 + atomic_long_unchecked_t mesq_send;
34478 + atomic_long_unchecked_t mesq_send_failed;
34479 + atomic_long_unchecked_t mesq_noop;
34480 + atomic_long_unchecked_t mesq_send_unexpected_error;
34481 + atomic_long_unchecked_t mesq_send_lb_overflow;
34482 + atomic_long_unchecked_t mesq_send_qlimit_reached;
34483 + atomic_long_unchecked_t mesq_send_amo_nacked;
34484 + atomic_long_unchecked_t mesq_send_put_nacked;
34485 + atomic_long_unchecked_t mesq_page_overflow;
34486 + atomic_long_unchecked_t mesq_qf_locked;
34487 + atomic_long_unchecked_t mesq_qf_noop_not_full;
34488 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
34489 + atomic_long_unchecked_t mesq_qf_unexpected_error;
34490 + atomic_long_unchecked_t mesq_noop_unexpected_error;
34491 + atomic_long_unchecked_t mesq_noop_lb_overflow;
34492 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
34493 + atomic_long_unchecked_t mesq_noop_amo_nacked;
34494 + atomic_long_unchecked_t mesq_noop_put_nacked;
34495 + atomic_long_unchecked_t mesq_noop_page_overflow;
34496
34497 };
34498
34499 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
34500 tghop_invalidate, mcsop_last};
34501
34502 struct mcs_op_statistic {
34503 - atomic_long_t count;
34504 - atomic_long_t total;
34505 + atomic_long_unchecked_t count;
34506 + atomic_long_unchecked_t total;
34507 unsigned long max;
34508 };
34509
34510 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
34511
34512 #define STAT(id) do { \
34513 if (gru_options & OPT_STATS) \
34514 - atomic_long_inc(&gru_stats.id); \
34515 + atomic_long_inc_unchecked(&gru_stats.id); \
34516 } while (0)
34517
34518 #ifdef CONFIG_SGI_GRU_DEBUG
34519 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
34520 index c862cd4..0d176fe 100644
34521 --- a/drivers/misc/sgi-xp/xp.h
34522 +++ b/drivers/misc/sgi-xp/xp.h
34523 @@ -288,7 +288,7 @@ struct xpc_interface {
34524 xpc_notify_func, void *);
34525 void (*received) (short, int, void *);
34526 enum xp_retval (*partid_to_nasids) (short, void *);
34527 -};
34528 +} __no_const;
34529
34530 extern struct xpc_interface xpc_interface;
34531
34532 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
34533 index b94d5f7..7f494c5 100644
34534 --- a/drivers/misc/sgi-xp/xpc.h
34535 +++ b/drivers/misc/sgi-xp/xpc.h
34536 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
34537 void (*received_payload) (struct xpc_channel *, void *);
34538 void (*notify_senders_of_disconnect) (struct xpc_channel *);
34539 };
34540 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
34541
34542 /* struct xpc_partition act_state values (for XPC HB) */
34543
34544 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
34545 /* found in xpc_main.c */
34546 extern struct device *xpc_part;
34547 extern struct device *xpc_chan;
34548 -extern struct xpc_arch_operations xpc_arch_ops;
34549 +extern xpc_arch_operations_no_const xpc_arch_ops;
34550 extern int xpc_disengage_timelimit;
34551 extern int xpc_disengage_timedout;
34552 extern int xpc_activate_IRQ_rcvd;
34553 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
34554 index 8d082b4..aa749ae 100644
34555 --- a/drivers/misc/sgi-xp/xpc_main.c
34556 +++ b/drivers/misc/sgi-xp/xpc_main.c
34557 @@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
34558 .notifier_call = xpc_system_die,
34559 };
34560
34561 -struct xpc_arch_operations xpc_arch_ops;
34562 +xpc_arch_operations_no_const xpc_arch_ops;
34563
34564 /*
34565 * Timer function to enforce the timelimit on the partition disengage.
34566 diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
34567 index 69ef0be..f3ef91e 100644
34568 --- a/drivers/mmc/host/sdhci-pci.c
34569 +++ b/drivers/mmc/host/sdhci-pci.c
34570 @@ -652,7 +652,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
34571 .probe = via_probe,
34572 };
34573
34574 -static const struct pci_device_id pci_ids[] __devinitdata = {
34575 +static const struct pci_device_id pci_ids[] __devinitconst = {
34576 {
34577 .vendor = PCI_VENDOR_ID_RICOH,
34578 .device = PCI_DEVICE_ID_RICOH_R5C822,
34579 diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
34580 index a4eb8b5..8c0628f 100644
34581 --- a/drivers/mtd/devices/doc2000.c
34582 +++ b/drivers/mtd/devices/doc2000.c
34583 @@ -753,7 +753,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
34584
34585 /* The ECC will not be calculated correctly if less than 512 is written */
34586 /* DBB-
34587 - if (len != 0x200 && eccbuf)
34588 + if (len != 0x200)
34589 printk(KERN_WARNING
34590 "ECC needs a full sector write (adr: %lx size %lx)\n",
34591 (long) to, (long) len);
34592 diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
34593 index a9e57d6..c6d8731 100644
34594 --- a/drivers/mtd/nand/denali.c
34595 +++ b/drivers/mtd/nand/denali.c
34596 @@ -26,6 +26,7 @@
34597 #include <linux/pci.h>
34598 #include <linux/mtd/mtd.h>
34599 #include <linux/module.h>
34600 +#include <linux/slab.h>
34601
34602 #include "denali.h"
34603
34604 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
34605 index 51b9d6a..52af9a7 100644
34606 --- a/drivers/mtd/nftlmount.c
34607 +++ b/drivers/mtd/nftlmount.c
34608 @@ -24,6 +24,7 @@
34609 #include <asm/errno.h>
34610 #include <linux/delay.h>
34611 #include <linux/slab.h>
34612 +#include <linux/sched.h>
34613 #include <linux/mtd/mtd.h>
34614 #include <linux/mtd/nand.h>
34615 #include <linux/mtd/nftl.h>
34616 diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
34617 index 6762dc4..9956862 100644
34618 --- a/drivers/net/ethernet/atheros/atlx/atl2.c
34619 +++ b/drivers/net/ethernet/atheros/atlx/atl2.c
34620 @@ -2859,7 +2859,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
34621 */
34622
34623 #define ATL2_PARAM(X, desc) \
34624 - static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
34625 + static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
34626 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
34627 MODULE_PARM_DESC(X, desc);
34628 #else
34629 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34630 index 61a7670..7da6e34 100644
34631 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34632 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34633 @@ -483,7 +483,7 @@ struct bnx2x_rx_mode_obj {
34634
34635 int (*wait_comp)(struct bnx2x *bp,
34636 struct bnx2x_rx_mode_ramrod_params *p);
34637 -};
34638 +} __no_const;
34639
34640 /********************** Set multicast group ***********************************/
34641
34642 diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
34643 index 93865f8..5448741 100644
34644 --- a/drivers/net/ethernet/broadcom/tg3.h
34645 +++ b/drivers/net/ethernet/broadcom/tg3.h
34646 @@ -140,6 +140,7 @@
34647 #define CHIPREV_ID_5750_A0 0x4000
34648 #define CHIPREV_ID_5750_A1 0x4001
34649 #define CHIPREV_ID_5750_A3 0x4003
34650 +#define CHIPREV_ID_5750_C1 0x4201
34651 #define CHIPREV_ID_5750_C2 0x4202
34652 #define CHIPREV_ID_5752_A0_HW 0x5000
34653 #define CHIPREV_ID_5752_A0 0x6000
34654 diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34655 index c4e8643..0979484 100644
34656 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34657 +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34658 @@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
34659 */
34660 struct l2t_skb_cb {
34661 arp_failure_handler_func arp_failure_handler;
34662 -};
34663 +} __no_const;
34664
34665 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
34666
34667 diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
34668 index 18b106c..2b38d36 100644
34669 --- a/drivers/net/ethernet/dec/tulip/de4x5.c
34670 +++ b/drivers/net/ethernet/dec/tulip/de4x5.c
34671 @@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34672 for (i=0; i<ETH_ALEN; i++) {
34673 tmp.addr[i] = dev->dev_addr[i];
34674 }
34675 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34676 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34677 break;
34678
34679 case DE4X5_SET_HWADDR: /* Set the hardware address */
34680 @@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
34681 spin_lock_irqsave(&lp->lock, flags);
34682 memcpy(&statbuf, &lp->pktStats, ioc->len);
34683 spin_unlock_irqrestore(&lp->lock, flags);
34684 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
34685 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
34686 return -EFAULT;
34687 break;
34688 }
34689 diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
34690 index ed7d1dc..d426748 100644
34691 --- a/drivers/net/ethernet/dec/tulip/eeprom.c
34692 +++ b/drivers/net/ethernet/dec/tulip/eeprom.c
34693 @@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
34694 {NULL}};
34695
34696
34697 -static const char *block_name[] __devinitdata = {
34698 +static const char *block_name[] __devinitconst = {
34699 "21140 non-MII",
34700 "21140 MII PHY",
34701 "21142 Serial PHY",
34702 diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
34703 index 2ac6fff..2d127d0 100644
34704 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c
34705 +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
34706 @@ -236,7 +236,7 @@ struct pci_id_info {
34707 int drv_flags; /* Driver use, intended as capability flags. */
34708 };
34709
34710 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34711 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34712 { /* Sometime a Level-One switch card. */
34713 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
34714 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
34715 diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
34716 index d783f4f..97fa1b0 100644
34717 --- a/drivers/net/ethernet/dlink/sundance.c
34718 +++ b/drivers/net/ethernet/dlink/sundance.c
34719 @@ -218,7 +218,7 @@ enum {
34720 struct pci_id_info {
34721 const char *name;
34722 };
34723 -static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34724 +static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34725 {"D-Link DFE-550TX FAST Ethernet Adapter"},
34726 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
34727 {"D-Link DFE-580TX 4 port Server Adapter"},
34728 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
34729 index 1bbf6b3..430dcd0 100644
34730 --- a/drivers/net/ethernet/emulex/benet/be_main.c
34731 +++ b/drivers/net/ethernet/emulex/benet/be_main.c
34732 @@ -403,7 +403,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
34733
34734 if (wrapped)
34735 newacc += 65536;
34736 - ACCESS_ONCE(*acc) = newacc;
34737 + ACCESS_ONCE_RW(*acc) = newacc;
34738 }
34739
34740 void be_parse_stats(struct be_adapter *adapter)
34741 diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
34742 index 16b0704..d2c07d7 100644
34743 --- a/drivers/net/ethernet/faraday/ftgmac100.c
34744 +++ b/drivers/net/ethernet/faraday/ftgmac100.c
34745 @@ -31,6 +31,8 @@
34746 #include <linux/netdevice.h>
34747 #include <linux/phy.h>
34748 #include <linux/platform_device.h>
34749 +#include <linux/interrupt.h>
34750 +#include <linux/irqreturn.h>
34751 #include <net/ip.h>
34752
34753 #include "ftgmac100.h"
34754 diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
34755 index 829b109..4ae5f6a 100644
34756 --- a/drivers/net/ethernet/faraday/ftmac100.c
34757 +++ b/drivers/net/ethernet/faraday/ftmac100.c
34758 @@ -31,6 +31,8 @@
34759 #include <linux/module.h>
34760 #include <linux/netdevice.h>
34761 #include <linux/platform_device.h>
34762 +#include <linux/interrupt.h>
34763 +#include <linux/irqreturn.h>
34764
34765 #include "ftmac100.h"
34766
34767 diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
34768 index 1637b98..c42f87b 100644
34769 --- a/drivers/net/ethernet/fealnx.c
34770 +++ b/drivers/net/ethernet/fealnx.c
34771 @@ -150,7 +150,7 @@ struct chip_info {
34772 int flags;
34773 };
34774
34775 -static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
34776 +static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
34777 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
34778 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
34779 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
34780 diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
34781 index b83897f..b2d970f 100644
34782 --- a/drivers/net/ethernet/intel/e1000e/e1000.h
34783 +++ b/drivers/net/ethernet/intel/e1000e/e1000.h
34784 @@ -181,7 +181,7 @@ struct e1000_info;
34785 #define E1000_TXDCTL_DMA_BURST_ENABLE \
34786 (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \
34787 E1000_TXDCTL_COUNT_DESC | \
34788 - (5 << 16) | /* wthresh must be +1 more than desired */\
34789 + (1 << 16) | /* wthresh must be +1 more than desired */\
34790 (1 << 8) | /* hthresh */ \
34791 0x1f) /* pthresh */
34792
34793 diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
34794 index f82ecf5..7d59ecb 100644
34795 --- a/drivers/net/ethernet/intel/e1000e/hw.h
34796 +++ b/drivers/net/ethernet/intel/e1000e/hw.h
34797 @@ -784,6 +784,7 @@ struct e1000_mac_operations {
34798 void (*config_collision_dist)(struct e1000_hw *);
34799 s32 (*read_mac_addr)(struct e1000_hw *);
34800 };
34801 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34802
34803 /*
34804 * When to use various PHY register access functions:
34805 @@ -824,6 +825,7 @@ struct e1000_phy_operations {
34806 void (*power_up)(struct e1000_hw *);
34807 void (*power_down)(struct e1000_hw *);
34808 };
34809 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34810
34811 /* Function pointers for the NVM. */
34812 struct e1000_nvm_operations {
34813 @@ -836,9 +838,10 @@ struct e1000_nvm_operations {
34814 s32 (*validate)(struct e1000_hw *);
34815 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
34816 };
34817 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
34818
34819 struct e1000_mac_info {
34820 - struct e1000_mac_operations ops;
34821 + e1000_mac_operations_no_const ops;
34822 u8 addr[ETH_ALEN];
34823 u8 perm_addr[ETH_ALEN];
34824
34825 @@ -879,7 +882,7 @@ struct e1000_mac_info {
34826 };
34827
34828 struct e1000_phy_info {
34829 - struct e1000_phy_operations ops;
34830 + e1000_phy_operations_no_const ops;
34831
34832 enum e1000_phy_type type;
34833
34834 @@ -913,7 +916,7 @@ struct e1000_phy_info {
34835 };
34836
34837 struct e1000_nvm_info {
34838 - struct e1000_nvm_operations ops;
34839 + e1000_nvm_operations_no_const ops;
34840
34841 enum e1000_nvm_type type;
34842 enum e1000_nvm_override override;
34843 diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
34844 index f67cbd3..cef9e3d 100644
34845 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h
34846 +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
34847 @@ -314,6 +314,7 @@ struct e1000_mac_operations {
34848 s32 (*read_mac_addr)(struct e1000_hw *);
34849 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
34850 };
34851 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34852
34853 struct e1000_phy_operations {
34854 s32 (*acquire)(struct e1000_hw *);
34855 @@ -330,6 +331,7 @@ struct e1000_phy_operations {
34856 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
34857 s32 (*write_reg)(struct e1000_hw *, u32, u16);
34858 };
34859 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
34860
34861 struct e1000_nvm_operations {
34862 s32 (*acquire)(struct e1000_hw *);
34863 @@ -339,6 +341,7 @@ struct e1000_nvm_operations {
34864 s32 (*update)(struct e1000_hw *);
34865 s32 (*validate)(struct e1000_hw *);
34866 };
34867 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
34868
34869 struct e1000_info {
34870 s32 (*get_invariants)(struct e1000_hw *);
34871 @@ -350,7 +353,7 @@ struct e1000_info {
34872 extern const struct e1000_info e1000_82575_info;
34873
34874 struct e1000_mac_info {
34875 - struct e1000_mac_operations ops;
34876 + e1000_mac_operations_no_const ops;
34877
34878 u8 addr[6];
34879 u8 perm_addr[6];
34880 @@ -388,7 +391,7 @@ struct e1000_mac_info {
34881 };
34882
34883 struct e1000_phy_info {
34884 - struct e1000_phy_operations ops;
34885 + e1000_phy_operations_no_const ops;
34886
34887 enum e1000_phy_type type;
34888
34889 @@ -423,7 +426,7 @@ struct e1000_phy_info {
34890 };
34891
34892 struct e1000_nvm_info {
34893 - struct e1000_nvm_operations ops;
34894 + e1000_nvm_operations_no_const ops;
34895 enum e1000_nvm_type type;
34896 enum e1000_nvm_override override;
34897
34898 @@ -468,6 +471,7 @@ struct e1000_mbx_operations {
34899 s32 (*check_for_ack)(struct e1000_hw *, u16);
34900 s32 (*check_for_rst)(struct e1000_hw *, u16);
34901 };
34902 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
34903
34904 struct e1000_mbx_stats {
34905 u32 msgs_tx;
34906 @@ -479,7 +483,7 @@ struct e1000_mbx_stats {
34907 };
34908
34909 struct e1000_mbx_info {
34910 - struct e1000_mbx_operations ops;
34911 + e1000_mbx_operations_no_const ops;
34912 struct e1000_mbx_stats stats;
34913 u32 timeout;
34914 u32 usec_delay;
34915 diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
34916 index 57db3c6..aa825fc 100644
34917 --- a/drivers/net/ethernet/intel/igbvf/vf.h
34918 +++ b/drivers/net/ethernet/intel/igbvf/vf.h
34919 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
34920 s32 (*read_mac_addr)(struct e1000_hw *);
34921 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
34922 };
34923 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34924
34925 struct e1000_mac_info {
34926 - struct e1000_mac_operations ops;
34927 + e1000_mac_operations_no_const ops;
34928 u8 addr[6];
34929 u8 perm_addr[6];
34930
34931 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
34932 s32 (*check_for_ack)(struct e1000_hw *);
34933 s32 (*check_for_rst)(struct e1000_hw *);
34934 };
34935 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
34936
34937 struct e1000_mbx_stats {
34938 u32 msgs_tx;
34939 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
34940 };
34941
34942 struct e1000_mbx_info {
34943 - struct e1000_mbx_operations ops;
34944 + e1000_mbx_operations_no_const ops;
34945 struct e1000_mbx_stats stats;
34946 u32 timeout;
34947 u32 usec_delay;
34948 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34949 index 8636e83..ab9bbc3 100644
34950 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34951 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34952 @@ -2710,6 +2710,7 @@ struct ixgbe_eeprom_operations {
34953 s32 (*update_checksum)(struct ixgbe_hw *);
34954 u16 (*calc_checksum)(struct ixgbe_hw *);
34955 };
34956 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
34957
34958 struct ixgbe_mac_operations {
34959 s32 (*init_hw)(struct ixgbe_hw *);
34960 @@ -2773,6 +2774,7 @@ struct ixgbe_mac_operations {
34961 /* Manageability interface */
34962 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
34963 };
34964 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
34965
34966 struct ixgbe_phy_operations {
34967 s32 (*identify)(struct ixgbe_hw *);
34968 @@ -2792,9 +2794,10 @@ struct ixgbe_phy_operations {
34969 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
34970 s32 (*check_overtemp)(struct ixgbe_hw *);
34971 };
34972 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
34973
34974 struct ixgbe_eeprom_info {
34975 - struct ixgbe_eeprom_operations ops;
34976 + ixgbe_eeprom_operations_no_const ops;
34977 enum ixgbe_eeprom_type type;
34978 u32 semaphore_delay;
34979 u16 word_size;
34980 @@ -2804,7 +2807,7 @@ struct ixgbe_eeprom_info {
34981
34982 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
34983 struct ixgbe_mac_info {
34984 - struct ixgbe_mac_operations ops;
34985 + ixgbe_mac_operations_no_const ops;
34986 enum ixgbe_mac_type type;
34987 u8 addr[ETH_ALEN];
34988 u8 perm_addr[ETH_ALEN];
34989 @@ -2832,7 +2835,7 @@ struct ixgbe_mac_info {
34990 };
34991
34992 struct ixgbe_phy_info {
34993 - struct ixgbe_phy_operations ops;
34994 + ixgbe_phy_operations_no_const ops;
34995 struct mdio_if_info mdio;
34996 enum ixgbe_phy_type type;
34997 u32 id;
34998 @@ -2860,6 +2863,7 @@ struct ixgbe_mbx_operations {
34999 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
35000 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
35001 };
35002 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
35003
35004 struct ixgbe_mbx_stats {
35005 u32 msgs_tx;
35006 @@ -2871,7 +2875,7 @@ struct ixgbe_mbx_stats {
35007 };
35008
35009 struct ixgbe_mbx_info {
35010 - struct ixgbe_mbx_operations ops;
35011 + ixgbe_mbx_operations_no_const ops;
35012 struct ixgbe_mbx_stats stats;
35013 u32 timeout;
35014 u32 usec_delay;
35015 diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
35016 index 307611a..d8e4562 100644
35017 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
35018 +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
35019 @@ -969,8 +969,6 @@ static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
35020 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
35021 for (i = 0; i < q_vector->txr_count; i++) {
35022 tx_ring = &(adapter->tx_ring[r_idx]);
35023 - tx_ring->total_bytes = 0;
35024 - tx_ring->total_packets = 0;
35025 ixgbevf_clean_tx_irq(adapter, tx_ring);
35026 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
35027 r_idx + 1);
35028 @@ -994,16 +992,6 @@ static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data)
35029 struct ixgbe_hw *hw = &adapter->hw;
35030 struct ixgbevf_ring *rx_ring;
35031 int r_idx;
35032 - int i;
35033 -
35034 - r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
35035 - for (i = 0; i < q_vector->rxr_count; i++) {
35036 - rx_ring = &(adapter->rx_ring[r_idx]);
35037 - rx_ring->total_bytes = 0;
35038 - rx_ring->total_packets = 0;
35039 - r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
35040 - r_idx + 1);
35041 - }
35042
35043 if (!q_vector->rxr_count)
35044 return IRQ_HANDLED;
35045 diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
35046 index 25c951d..cc7cf33 100644
35047 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h
35048 +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
35049 @@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
35050 s32 (*clear_vfta)(struct ixgbe_hw *);
35051 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
35052 };
35053 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
35054
35055 enum ixgbe_mac_type {
35056 ixgbe_mac_unknown = 0,
35057 @@ -79,7 +80,7 @@ enum ixgbe_mac_type {
35058 };
35059
35060 struct ixgbe_mac_info {
35061 - struct ixgbe_mac_operations ops;
35062 + ixgbe_mac_operations_no_const ops;
35063 u8 addr[6];
35064 u8 perm_addr[6];
35065
35066 @@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
35067 s32 (*check_for_ack)(struct ixgbe_hw *);
35068 s32 (*check_for_rst)(struct ixgbe_hw *);
35069 };
35070 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
35071
35072 struct ixgbe_mbx_stats {
35073 u32 msgs_tx;
35074 @@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
35075 };
35076
35077 struct ixgbe_mbx_info {
35078 - struct ixgbe_mbx_operations ops;
35079 + ixgbe_mbx_operations_no_const ops;
35080 struct ixgbe_mbx_stats stats;
35081 u32 timeout;
35082 u32 udelay;
35083 diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
35084 index 8bb05b4..074796f 100644
35085 --- a/drivers/net/ethernet/mellanox/mlx4/main.c
35086 +++ b/drivers/net/ethernet/mellanox/mlx4/main.c
35087 @@ -41,6 +41,7 @@
35088 #include <linux/slab.h>
35089 #include <linux/io-mapping.h>
35090 #include <linux/delay.h>
35091 +#include <linux/sched.h>
35092
35093 #include <linux/mlx4/device.h>
35094 #include <linux/mlx4/doorbell.h>
35095 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
35096 index 5046a64..71ca936 100644
35097 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
35098 +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
35099 @@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
35100 void (*link_down)(struct __vxge_hw_device *devh);
35101 void (*crit_err)(struct __vxge_hw_device *devh,
35102 enum vxge_hw_event type, u64 ext_data);
35103 -};
35104 +} __no_const;
35105
35106 /*
35107 * struct __vxge_hw_blockpool_entry - Block private data structure
35108 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35109 index 4a518a3..936b334 100644
35110 --- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35111 +++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35112 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
35113 struct vxge_hw_mempool_dma *dma_object,
35114 u32 index,
35115 u32 is_last);
35116 -};
35117 +} __no_const;
35118
35119 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
35120 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
35121 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
35122 index 161e045..0bb5b86 100644
35123 --- a/drivers/net/ethernet/realtek/r8169.c
35124 +++ b/drivers/net/ethernet/realtek/r8169.c
35125 @@ -708,17 +708,17 @@ struct rtl8169_private {
35126 struct mdio_ops {
35127 void (*write)(void __iomem *, int, int);
35128 int (*read)(void __iomem *, int);
35129 - } mdio_ops;
35130 + } __no_const mdio_ops;
35131
35132 struct pll_power_ops {
35133 void (*down)(struct rtl8169_private *);
35134 void (*up)(struct rtl8169_private *);
35135 - } pll_power_ops;
35136 + } __no_const pll_power_ops;
35137
35138 struct jumbo_ops {
35139 void (*enable)(struct rtl8169_private *);
35140 void (*disable)(struct rtl8169_private *);
35141 - } jumbo_ops;
35142 + } __no_const jumbo_ops;
35143
35144 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
35145 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
35146 diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
35147 index a9deda8..5507c31 100644
35148 --- a/drivers/net/ethernet/sis/sis190.c
35149 +++ b/drivers/net/ethernet/sis/sis190.c
35150 @@ -1620,7 +1620,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
35151 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
35152 struct net_device *dev)
35153 {
35154 - static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
35155 + static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
35156 struct sis190_private *tp = netdev_priv(dev);
35157 struct pci_dev *isa_bridge;
35158 u8 reg, tmp8;
35159 diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
35160 index c07cfe9..81cbf7e 100644
35161 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
35162 +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
35163 @@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
35164
35165 writel(value, ioaddr + MMC_CNTRL);
35166
35167 - pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
35168 - MMC_CNTRL, value);
35169 +// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
35170 +// MMC_CNTRL, value);
35171 }
35172
35173 /* To mask all all interrupts.*/
35174 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
35175 index 9bdfaba..3d8f8d4 100644
35176 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
35177 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
35178 @@ -1587,7 +1587,7 @@ static const struct file_operations stmmac_rings_status_fops = {
35179 .open = stmmac_sysfs_ring_open,
35180 .read = seq_read,
35181 .llseek = seq_lseek,
35182 - .release = seq_release,
35183 + .release = single_release,
35184 };
35185
35186 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
35187 @@ -1659,7 +1659,7 @@ static const struct file_operations stmmac_dma_cap_fops = {
35188 .open = stmmac_sysfs_dma_cap_open,
35189 .read = seq_read,
35190 .llseek = seq_lseek,
35191 - .release = seq_release,
35192 + .release = single_release,
35193 };
35194
35195 static int stmmac_init_fs(struct net_device *dev)
35196 diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
35197 index c358245..8c1de63 100644
35198 --- a/drivers/net/hyperv/hyperv_net.h
35199 +++ b/drivers/net/hyperv/hyperv_net.h
35200 @@ -98,7 +98,7 @@ struct rndis_device {
35201
35202 enum rndis_device_state state;
35203 bool link_state;
35204 - atomic_t new_req_id;
35205 + atomic_unchecked_t new_req_id;
35206
35207 spinlock_t request_lock;
35208 struct list_head req_list;
35209 diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
35210 index d6be64b..5d97e3b 100644
35211 --- a/drivers/net/hyperv/rndis_filter.c
35212 +++ b/drivers/net/hyperv/rndis_filter.c
35213 @@ -97,7 +97,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
35214 * template
35215 */
35216 set = &rndis_msg->msg.set_req;
35217 - set->req_id = atomic_inc_return(&dev->new_req_id);
35218 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35219
35220 /* Add to the request list */
35221 spin_lock_irqsave(&dev->request_lock, flags);
35222 @@ -648,7 +648,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
35223
35224 /* Setup the rndis set */
35225 halt = &request->request_msg.msg.halt_req;
35226 - halt->req_id = atomic_inc_return(&dev->new_req_id);
35227 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35228
35229 /* Ignore return since this msg is optional. */
35230 rndis_filter_send_request(dev, request);
35231 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
35232 index 21d7151..8034208 100644
35233 --- a/drivers/net/ppp/ppp_generic.c
35234 +++ b/drivers/net/ppp/ppp_generic.c
35235 @@ -986,7 +986,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
35236 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
35237 struct ppp_stats stats;
35238 struct ppp_comp_stats cstats;
35239 - char *vers;
35240
35241 switch (cmd) {
35242 case SIOCGPPPSTATS:
35243 @@ -1008,8 +1007,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
35244 break;
35245
35246 case SIOCGPPPVER:
35247 - vers = PPP_VERSION;
35248 - if (copy_to_user(addr, vers, strlen(vers) + 1))
35249 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
35250 break;
35251 err = 0;
35252 break;
35253 diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
35254 index b715e6b..6d2490f 100644
35255 --- a/drivers/net/tokenring/abyss.c
35256 +++ b/drivers/net/tokenring/abyss.c
35257 @@ -450,10 +450,12 @@ static struct pci_driver abyss_driver = {
35258
35259 static int __init abyss_init (void)
35260 {
35261 - abyss_netdev_ops = tms380tr_netdev_ops;
35262 + pax_open_kernel();
35263 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35264
35265 - abyss_netdev_ops.ndo_open = abyss_open;
35266 - abyss_netdev_ops.ndo_stop = abyss_close;
35267 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
35268 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
35269 + pax_close_kernel();
35270
35271 return pci_register_driver(&abyss_driver);
35272 }
35273 diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
35274 index 28adcdf..ae82f35 100644
35275 --- a/drivers/net/tokenring/madgemc.c
35276 +++ b/drivers/net/tokenring/madgemc.c
35277 @@ -742,9 +742,11 @@ static struct mca_driver madgemc_driver = {
35278
35279 static int __init madgemc_init (void)
35280 {
35281 - madgemc_netdev_ops = tms380tr_netdev_ops;
35282 - madgemc_netdev_ops.ndo_open = madgemc_open;
35283 - madgemc_netdev_ops.ndo_stop = madgemc_close;
35284 + pax_open_kernel();
35285 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35286 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
35287 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
35288 + pax_close_kernel();
35289
35290 return mca_register_driver (&madgemc_driver);
35291 }
35292 diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
35293 index 62d90e4..9d84237 100644
35294 --- a/drivers/net/tokenring/proteon.c
35295 +++ b/drivers/net/tokenring/proteon.c
35296 @@ -352,9 +352,11 @@ static int __init proteon_init(void)
35297 struct platform_device *pdev;
35298 int i, num = 0, err = 0;
35299
35300 - proteon_netdev_ops = tms380tr_netdev_ops;
35301 - proteon_netdev_ops.ndo_open = proteon_open;
35302 - proteon_netdev_ops.ndo_stop = tms380tr_close;
35303 + pax_open_kernel();
35304 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35305 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
35306 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
35307 + pax_close_kernel();
35308
35309 err = platform_driver_register(&proteon_driver);
35310 if (err)
35311 diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
35312 index ee11e93..c8f19c7 100644
35313 --- a/drivers/net/tokenring/skisa.c
35314 +++ b/drivers/net/tokenring/skisa.c
35315 @@ -362,9 +362,11 @@ static int __init sk_isa_init(void)
35316 struct platform_device *pdev;
35317 int i, num = 0, err = 0;
35318
35319 - sk_isa_netdev_ops = tms380tr_netdev_ops;
35320 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
35321 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
35322 + pax_open_kernel();
35323 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35324 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
35325 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
35326 + pax_close_kernel();
35327
35328 err = platform_driver_register(&sk_isa_driver);
35329 if (err)
35330 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
35331 index 2d2a688..35f2372 100644
35332 --- a/drivers/net/usb/hso.c
35333 +++ b/drivers/net/usb/hso.c
35334 @@ -71,7 +71,7 @@
35335 #include <asm/byteorder.h>
35336 #include <linux/serial_core.h>
35337 #include <linux/serial.h>
35338 -
35339 +#include <asm/local.h>
35340
35341 #define MOD_AUTHOR "Option Wireless"
35342 #define MOD_DESCRIPTION "USB High Speed Option driver"
35343 @@ -257,7 +257,7 @@ struct hso_serial {
35344
35345 /* from usb_serial_port */
35346 struct tty_struct *tty;
35347 - int open_count;
35348 + local_t open_count;
35349 spinlock_t serial_lock;
35350
35351 int (*write_data) (struct hso_serial *serial);
35352 @@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
35353 struct urb *urb;
35354
35355 urb = serial->rx_urb[0];
35356 - if (serial->open_count > 0) {
35357 + if (local_read(&serial->open_count) > 0) {
35358 count = put_rxbuf_data(urb, serial);
35359 if (count == -1)
35360 return;
35361 @@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
35362 DUMP1(urb->transfer_buffer, urb->actual_length);
35363
35364 /* Anyone listening? */
35365 - if (serial->open_count == 0)
35366 + if (local_read(&serial->open_count) == 0)
35367 return;
35368
35369 if (status == 0) {
35370 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
35371 spin_unlock_irq(&serial->serial_lock);
35372
35373 /* check for port already opened, if not set the termios */
35374 - serial->open_count++;
35375 - if (serial->open_count == 1) {
35376 + if (local_inc_return(&serial->open_count) == 1) {
35377 serial->rx_state = RX_IDLE;
35378 /* Force default termio settings */
35379 _hso_serial_set_termios(tty, NULL);
35380 @@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
35381 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
35382 if (result) {
35383 hso_stop_serial_device(serial->parent);
35384 - serial->open_count--;
35385 + local_dec(&serial->open_count);
35386 kref_put(&serial->parent->ref, hso_serial_ref_free);
35387 }
35388 } else {
35389 @@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
35390
35391 /* reset the rts and dtr */
35392 /* do the actual close */
35393 - serial->open_count--;
35394 + local_dec(&serial->open_count);
35395
35396 - if (serial->open_count <= 0) {
35397 - serial->open_count = 0;
35398 + if (local_read(&serial->open_count) <= 0) {
35399 + local_set(&serial->open_count, 0);
35400 spin_lock_irq(&serial->serial_lock);
35401 if (serial->tty == tty) {
35402 serial->tty->driver_data = NULL;
35403 @@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
35404
35405 /* the actual setup */
35406 spin_lock_irqsave(&serial->serial_lock, flags);
35407 - if (serial->open_count)
35408 + if (local_read(&serial->open_count))
35409 _hso_serial_set_termios(tty, old);
35410 else
35411 tty->termios = old;
35412 @@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
35413 D1("Pending read interrupt on port %d\n", i);
35414 spin_lock(&serial->serial_lock);
35415 if (serial->rx_state == RX_IDLE &&
35416 - serial->open_count > 0) {
35417 + local_read(&serial->open_count) > 0) {
35418 /* Setup and send a ctrl req read on
35419 * port i */
35420 if (!serial->rx_urb_filled[0]) {
35421 @@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
35422 /* Start all serial ports */
35423 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
35424 if (serial_table[i] && (serial_table[i]->interface == iface)) {
35425 - if (dev2ser(serial_table[i])->open_count) {
35426 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
35427 result =
35428 hso_start_serial_device(serial_table[i], GFP_NOIO);
35429 hso_kick_transmit(dev2ser(serial_table[i]));
35430 diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
35431 index 420d69b..74f90a2 100644
35432 --- a/drivers/net/wireless/ath/ath.h
35433 +++ b/drivers/net/wireless/ath/ath.h
35434 @@ -119,6 +119,7 @@ struct ath_ops {
35435 void (*write_flush) (void *);
35436 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
35437 };
35438 +typedef struct ath_ops __no_const ath_ops_no_const;
35439
35440 struct ath_common;
35441 struct ath_bus_ops;
35442 diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35443 index aa2abaf..5f5152d 100644
35444 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35445 +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35446 @@ -183,8 +183,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35447 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
35448 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
35449
35450 - ACCESS_ONCE(ads->ds_link) = i->link;
35451 - ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
35452 + ACCESS_ONCE_RW(ads->ds_link) = i->link;
35453 + ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
35454
35455 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
35456 ctl6 = SM(i->keytype, AR_EncrType);
35457 @@ -198,26 +198,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35458
35459 if ((i->is_first || i->is_last) &&
35460 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
35461 - ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
35462 + ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
35463 | set11nTries(i->rates, 1)
35464 | set11nTries(i->rates, 2)
35465 | set11nTries(i->rates, 3)
35466 | (i->dur_update ? AR_DurUpdateEna : 0)
35467 | SM(0, AR_BurstDur);
35468
35469 - ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
35470 + ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
35471 | set11nRate(i->rates, 1)
35472 | set11nRate(i->rates, 2)
35473 | set11nRate(i->rates, 3);
35474 } else {
35475 - ACCESS_ONCE(ads->ds_ctl2) = 0;
35476 - ACCESS_ONCE(ads->ds_ctl3) = 0;
35477 + ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
35478 + ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
35479 }
35480
35481 if (!i->is_first) {
35482 - ACCESS_ONCE(ads->ds_ctl0) = 0;
35483 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
35484 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
35485 + ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
35486 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
35487 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
35488 return;
35489 }
35490
35491 @@ -242,7 +242,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35492 break;
35493 }
35494
35495 - ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
35496 + ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
35497 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
35498 | SM(i->txpower, AR_XmitPower)
35499 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
35500 @@ -252,19 +252,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35501 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
35502 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
35503
35504 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
35505 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
35506 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
35507 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
35508
35509 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
35510 return;
35511
35512 - ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
35513 + ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
35514 | set11nPktDurRTSCTS(i->rates, 1);
35515
35516 - ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
35517 + ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
35518 | set11nPktDurRTSCTS(i->rates, 3);
35519
35520 - ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
35521 + ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
35522 | set11nRateFlags(i->rates, 1)
35523 | set11nRateFlags(i->rates, 2)
35524 | set11nRateFlags(i->rates, 3)
35525 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35526 index a66a13b..0ef399e 100644
35527 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35528 +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35529 @@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35530 (i->qcu << AR_TxQcuNum_S) | desc_len;
35531
35532 checksum += val;
35533 - ACCESS_ONCE(ads->info) = val;
35534 + ACCESS_ONCE_RW(ads->info) = val;
35535
35536 checksum += i->link;
35537 - ACCESS_ONCE(ads->link) = i->link;
35538 + ACCESS_ONCE_RW(ads->link) = i->link;
35539
35540 checksum += i->buf_addr[0];
35541 - ACCESS_ONCE(ads->data0) = i->buf_addr[0];
35542 + ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
35543 checksum += i->buf_addr[1];
35544 - ACCESS_ONCE(ads->data1) = i->buf_addr[1];
35545 + ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
35546 checksum += i->buf_addr[2];
35547 - ACCESS_ONCE(ads->data2) = i->buf_addr[2];
35548 + ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
35549 checksum += i->buf_addr[3];
35550 - ACCESS_ONCE(ads->data3) = i->buf_addr[3];
35551 + ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
35552
35553 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
35554 - ACCESS_ONCE(ads->ctl3) = val;
35555 + ACCESS_ONCE_RW(ads->ctl3) = val;
35556 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
35557 - ACCESS_ONCE(ads->ctl5) = val;
35558 + ACCESS_ONCE_RW(ads->ctl5) = val;
35559 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
35560 - ACCESS_ONCE(ads->ctl7) = val;
35561 + ACCESS_ONCE_RW(ads->ctl7) = val;
35562 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
35563 - ACCESS_ONCE(ads->ctl9) = val;
35564 + ACCESS_ONCE_RW(ads->ctl9) = val;
35565
35566 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
35567 - ACCESS_ONCE(ads->ctl10) = checksum;
35568 + ACCESS_ONCE_RW(ads->ctl10) = checksum;
35569
35570 if (i->is_first || i->is_last) {
35571 - ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
35572 + ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
35573 | set11nTries(i->rates, 1)
35574 | set11nTries(i->rates, 2)
35575 | set11nTries(i->rates, 3)
35576 | (i->dur_update ? AR_DurUpdateEna : 0)
35577 | SM(0, AR_BurstDur);
35578
35579 - ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
35580 + ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
35581 | set11nRate(i->rates, 1)
35582 | set11nRate(i->rates, 2)
35583 | set11nRate(i->rates, 3);
35584 } else {
35585 - ACCESS_ONCE(ads->ctl13) = 0;
35586 - ACCESS_ONCE(ads->ctl14) = 0;
35587 + ACCESS_ONCE_RW(ads->ctl13) = 0;
35588 + ACCESS_ONCE_RW(ads->ctl14) = 0;
35589 }
35590
35591 ads->ctl20 = 0;
35592 @@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35593
35594 ctl17 = SM(i->keytype, AR_EncrType);
35595 if (!i->is_first) {
35596 - ACCESS_ONCE(ads->ctl11) = 0;
35597 - ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
35598 - ACCESS_ONCE(ads->ctl15) = 0;
35599 - ACCESS_ONCE(ads->ctl16) = 0;
35600 - ACCESS_ONCE(ads->ctl17) = ctl17;
35601 - ACCESS_ONCE(ads->ctl18) = 0;
35602 - ACCESS_ONCE(ads->ctl19) = 0;
35603 + ACCESS_ONCE_RW(ads->ctl11) = 0;
35604 + ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
35605 + ACCESS_ONCE_RW(ads->ctl15) = 0;
35606 + ACCESS_ONCE_RW(ads->ctl16) = 0;
35607 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
35608 + ACCESS_ONCE_RW(ads->ctl18) = 0;
35609 + ACCESS_ONCE_RW(ads->ctl19) = 0;
35610 return;
35611 }
35612
35613 - ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
35614 + ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
35615 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
35616 | SM(i->txpower, AR_XmitPower)
35617 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
35618 @@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35619 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
35620 ctl12 |= SM(val, AR_PAPRDChainMask);
35621
35622 - ACCESS_ONCE(ads->ctl12) = ctl12;
35623 - ACCESS_ONCE(ads->ctl17) = ctl17;
35624 + ACCESS_ONCE_RW(ads->ctl12) = ctl12;
35625 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
35626
35627 - ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
35628 + ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
35629 | set11nPktDurRTSCTS(i->rates, 1);
35630
35631 - ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
35632 + ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
35633 | set11nPktDurRTSCTS(i->rates, 3);
35634
35635 - ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
35636 + ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
35637 | set11nRateFlags(i->rates, 1)
35638 | set11nRateFlags(i->rates, 2)
35639 | set11nRateFlags(i->rates, 3)
35640 | SM(i->rtscts_rate, AR_RTSCTSRate);
35641
35642 - ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
35643 + ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
35644 }
35645
35646 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
35647 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
35648 index e88f182..4e57f5d 100644
35649 --- a/drivers/net/wireless/ath/ath9k/hw.h
35650 +++ b/drivers/net/wireless/ath/ath9k/hw.h
35651 @@ -614,7 +614,7 @@ struct ath_hw_private_ops {
35652
35653 /* ANI */
35654 void (*ani_cache_ini_regs)(struct ath_hw *ah);
35655 -};
35656 +} __no_const;
35657
35658 /**
35659 * struct ath_hw_ops - callbacks used by hardware code and driver code
35660 @@ -644,7 +644,7 @@ struct ath_hw_ops {
35661 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
35662 struct ath_hw_antcomb_conf *antconf);
35663
35664 -};
35665 +} __no_const;
35666
35667 struct ath_nf_limits {
35668 s16 max;
35669 @@ -664,7 +664,7 @@ enum ath_cal_list {
35670 #define AH_FASTCC 0x4
35671
35672 struct ath_hw {
35673 - struct ath_ops reg_ops;
35674 + ath_ops_no_const reg_ops;
35675
35676 struct ieee80211_hw *hw;
35677 struct ath_common common;
35678 diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35679 index af00e2c..ab04d34 100644
35680 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35681 +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35682 @@ -545,7 +545,7 @@ struct phy_func_ptr {
35683 void (*carrsuppr)(struct brcms_phy *);
35684 s32 (*rxsigpwr)(struct brcms_phy *, s32);
35685 void (*detach)(struct brcms_phy *);
35686 -};
35687 +} __no_const;
35688
35689 struct brcms_phy {
35690 struct brcms_phy_pub pubpi_ro;
35691 diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
35692 index faec404..a5277f1 100644
35693 --- a/drivers/net/wireless/iwlegacy/3945-mac.c
35694 +++ b/drivers/net/wireless/iwlegacy/3945-mac.c
35695 @@ -3611,7 +3611,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
35696 */
35697 if (il3945_mod_params.disable_hw_scan) {
35698 D_INFO("Disabling hw_scan\n");
35699 - il3945_mac_ops.hw_scan = NULL;
35700 + pax_open_kernel();
35701 + *(void **)&il3945_mac_ops.hw_scan = NULL;
35702 + pax_close_kernel();
35703 }
35704
35705 D_INFO("*** LOAD DRIVER ***\n");
35706 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
35707 index b7ce6a6..5649756 100644
35708 --- a/drivers/net/wireless/mac80211_hwsim.c
35709 +++ b/drivers/net/wireless/mac80211_hwsim.c
35710 @@ -1721,9 +1721,11 @@ static int __init init_mac80211_hwsim(void)
35711 return -EINVAL;
35712
35713 if (fake_hw_scan) {
35714 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
35715 - mac80211_hwsim_ops.sw_scan_start = NULL;
35716 - mac80211_hwsim_ops.sw_scan_complete = NULL;
35717 + pax_open_kernel();
35718 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
35719 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
35720 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
35721 + pax_close_kernel();
35722 }
35723
35724 spin_lock_init(&hwsim_radio_lock);
35725 diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
35726 index 35225e9..95e6bf9 100644
35727 --- a/drivers/net/wireless/mwifiex/main.h
35728 +++ b/drivers/net/wireless/mwifiex/main.h
35729 @@ -537,7 +537,7 @@ struct mwifiex_if_ops {
35730 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
35731 int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
35732 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
35733 -};
35734 +} __no_const;
35735
35736 struct mwifiex_adapter {
35737 u8 iface_type;
35738 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
35739 index d66e298..55b0a89 100644
35740 --- a/drivers/net/wireless/rndis_wlan.c
35741 +++ b/drivers/net/wireless/rndis_wlan.c
35742 @@ -1278,7 +1278,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
35743
35744 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
35745
35746 - if (rts_threshold < 0 || rts_threshold > 2347)
35747 + if (rts_threshold > 2347)
35748 rts_threshold = 2347;
35749
35750 tmp = cpu_to_le32(rts_threshold);
35751 diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
35752 index c264dfa..08ee30e 100644
35753 --- a/drivers/net/wireless/rt2x00/rt2x00.h
35754 +++ b/drivers/net/wireless/rt2x00/rt2x00.h
35755 @@ -396,7 +396,7 @@ struct rt2x00_intf {
35756 * for hardware which doesn't support hardware
35757 * sequence counting.
35758 */
35759 - atomic_t seqno;
35760 + atomic_unchecked_t seqno;
35761 };
35762
35763 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
35764 diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
35765 index 50f92d5..f3afc41 100644
35766 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c
35767 +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
35768 @@ -229,9 +229,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
35769 * sequence counter given by mac80211.
35770 */
35771 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
35772 - seqno = atomic_add_return(0x10, &intf->seqno);
35773 + seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
35774 else
35775 - seqno = atomic_read(&intf->seqno);
35776 + seqno = atomic_read_unchecked(&intf->seqno);
35777
35778 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
35779 hdr->seq_ctrl |= cpu_to_le16(seqno);
35780 diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
35781 index 9d8f581..0f6589e 100644
35782 --- a/drivers/net/wireless/wl1251/wl1251.h
35783 +++ b/drivers/net/wireless/wl1251/wl1251.h
35784 @@ -266,7 +266,7 @@ struct wl1251_if_operations {
35785 void (*reset)(struct wl1251 *wl);
35786 void (*enable_irq)(struct wl1251 *wl);
35787 void (*disable_irq)(struct wl1251 *wl);
35788 -};
35789 +} __no_const;
35790
35791 struct wl1251 {
35792 struct ieee80211_hw *hw;
35793 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
35794 index f34b5b2..b5abb9f 100644
35795 --- a/drivers/oprofile/buffer_sync.c
35796 +++ b/drivers/oprofile/buffer_sync.c
35797 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
35798 if (cookie == NO_COOKIE)
35799 offset = pc;
35800 if (cookie == INVALID_COOKIE) {
35801 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35802 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35803 offset = pc;
35804 }
35805 if (cookie != last_cookie) {
35806 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
35807 /* add userspace sample */
35808
35809 if (!mm) {
35810 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
35811 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
35812 return 0;
35813 }
35814
35815 cookie = lookup_dcookie(mm, s->eip, &offset);
35816
35817 if (cookie == INVALID_COOKIE) {
35818 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35819 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35820 return 0;
35821 }
35822
35823 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
35824 /* ignore backtraces if failed to add a sample */
35825 if (state == sb_bt_start) {
35826 state = sb_bt_ignore;
35827 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
35828 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
35829 }
35830 }
35831 release_mm(mm);
35832 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
35833 index c0cc4e7..44d4e54 100644
35834 --- a/drivers/oprofile/event_buffer.c
35835 +++ b/drivers/oprofile/event_buffer.c
35836 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
35837 }
35838
35839 if (buffer_pos == buffer_size) {
35840 - atomic_inc(&oprofile_stats.event_lost_overflow);
35841 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
35842 return;
35843 }
35844
35845 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
35846 index ed2c3ec..deda85a 100644
35847 --- a/drivers/oprofile/oprof.c
35848 +++ b/drivers/oprofile/oprof.c
35849 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
35850 if (oprofile_ops.switch_events())
35851 return;
35852
35853 - atomic_inc(&oprofile_stats.multiplex_counter);
35854 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
35855 start_switch_worker();
35856 }
35857
35858 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
35859 index 917d28e..d62d981 100644
35860 --- a/drivers/oprofile/oprofile_stats.c
35861 +++ b/drivers/oprofile/oprofile_stats.c
35862 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
35863 cpu_buf->sample_invalid_eip = 0;
35864 }
35865
35866 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
35867 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
35868 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
35869 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
35870 - atomic_set(&oprofile_stats.multiplex_counter, 0);
35871 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
35872 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
35873 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
35874 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
35875 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
35876 }
35877
35878
35879 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
35880 index 38b6fc0..b5cbfce 100644
35881 --- a/drivers/oprofile/oprofile_stats.h
35882 +++ b/drivers/oprofile/oprofile_stats.h
35883 @@ -13,11 +13,11 @@
35884 #include <linux/atomic.h>
35885
35886 struct oprofile_stat_struct {
35887 - atomic_t sample_lost_no_mm;
35888 - atomic_t sample_lost_no_mapping;
35889 - atomic_t bt_lost_no_mapping;
35890 - atomic_t event_lost_overflow;
35891 - atomic_t multiplex_counter;
35892 + atomic_unchecked_t sample_lost_no_mm;
35893 + atomic_unchecked_t sample_lost_no_mapping;
35894 + atomic_unchecked_t bt_lost_no_mapping;
35895 + atomic_unchecked_t event_lost_overflow;
35896 + atomic_unchecked_t multiplex_counter;
35897 };
35898
35899 extern struct oprofile_stat_struct oprofile_stats;
35900 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
35901 index 849357c..b83c1e0 100644
35902 --- a/drivers/oprofile/oprofilefs.c
35903 +++ b/drivers/oprofile/oprofilefs.c
35904 @@ -185,7 +185,7 @@ static const struct file_operations atomic_ro_fops = {
35905
35906
35907 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
35908 - char const *name, atomic_t *val)
35909 + char const *name, atomic_unchecked_t *val)
35910 {
35911 return __oprofilefs_create_file(sb, root, name,
35912 &atomic_ro_fops, 0444, val);
35913 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
35914 index 3f56bc0..707d642 100644
35915 --- a/drivers/parport/procfs.c
35916 +++ b/drivers/parport/procfs.c
35917 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
35918
35919 *ppos += len;
35920
35921 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
35922 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
35923 }
35924
35925 #ifdef CONFIG_PARPORT_1284
35926 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
35927
35928 *ppos += len;
35929
35930 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
35931 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
35932 }
35933 #endif /* IEEE1284.3 support. */
35934
35935 diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
35936 index 9fff878..ad0ad53 100644
35937 --- a/drivers/pci/hotplug/cpci_hotplug.h
35938 +++ b/drivers/pci/hotplug/cpci_hotplug.h
35939 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
35940 int (*hardware_test) (struct slot* slot, u32 value);
35941 u8 (*get_power) (struct slot* slot);
35942 int (*set_power) (struct slot* slot, int value);
35943 -};
35944 +} __no_const;
35945
35946 struct cpci_hp_controller {
35947 unsigned int irq;
35948 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
35949 index 76ba8a1..20ca857 100644
35950 --- a/drivers/pci/hotplug/cpqphp_nvram.c
35951 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
35952 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
35953
35954 void compaq_nvram_init (void __iomem *rom_start)
35955 {
35956 +
35957 +#ifndef CONFIG_PAX_KERNEXEC
35958 if (rom_start) {
35959 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
35960 }
35961 +#endif
35962 +
35963 dbg("int15 entry = %p\n", compaq_int15_entry_point);
35964
35965 /* initialize our int15 lock */
35966 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
35967 index b500840..d7159d3 100644
35968 --- a/drivers/pci/pcie/aspm.c
35969 +++ b/drivers/pci/pcie/aspm.c
35970 @@ -27,9 +27,9 @@
35971 #define MODULE_PARAM_PREFIX "pcie_aspm."
35972
35973 /* Note: those are not register definitions */
35974 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
35975 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
35976 -#define ASPM_STATE_L1 (4) /* L1 state */
35977 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
35978 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
35979 +#define ASPM_STATE_L1 (4U) /* L1 state */
35980 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
35981 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
35982
35983 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
35984 index 5e1ca3c..08082fe 100644
35985 --- a/drivers/pci/probe.c
35986 +++ b/drivers/pci/probe.c
35987 @@ -215,7 +215,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
35988 u16 orig_cmd;
35989 struct pci_bus_region region;
35990
35991 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
35992 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
35993
35994 if (!dev->mmio_always_on) {
35995 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
35996 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
35997 index 27911b5..5b6db88 100644
35998 --- a/drivers/pci/proc.c
35999 +++ b/drivers/pci/proc.c
36000 @@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
36001 static int __init pci_proc_init(void)
36002 {
36003 struct pci_dev *dev = NULL;
36004 +
36005 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
36006 +#ifdef CONFIG_GRKERNSEC_PROC_USER
36007 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
36008 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
36009 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
36010 +#endif
36011 +#else
36012 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
36013 +#endif
36014 proc_create("devices", 0, proc_bus_pci_dir,
36015 &proc_bus_pci_dev_operations);
36016 proc_initialized = 1;
36017 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
36018 index d68c000..f6094ca 100644
36019 --- a/drivers/platform/x86/thinkpad_acpi.c
36020 +++ b/drivers/platform/x86/thinkpad_acpi.c
36021 @@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
36022 return 0;
36023 }
36024
36025 -void static hotkey_mask_warn_incomplete_mask(void)
36026 +static void hotkey_mask_warn_incomplete_mask(void)
36027 {
36028 /* log only what the user can fix... */
36029 const u32 wantedmask = hotkey_driver_mask &
36030 @@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
36031 }
36032 }
36033
36034 -static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36035 - struct tp_nvram_state *newn,
36036 - const u32 event_mask)
36037 -{
36038 -
36039 #define TPACPI_COMPARE_KEY(__scancode, __member) \
36040 do { \
36041 if ((event_mask & (1 << __scancode)) && \
36042 @@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36043 tpacpi_hotkey_send_key(__scancode); \
36044 } while (0)
36045
36046 - void issue_volchange(const unsigned int oldvol,
36047 - const unsigned int newvol)
36048 - {
36049 - unsigned int i = oldvol;
36050 +static void issue_volchange(const unsigned int oldvol,
36051 + const unsigned int newvol,
36052 + const u32 event_mask)
36053 +{
36054 + unsigned int i = oldvol;
36055
36056 - while (i > newvol) {
36057 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
36058 - i--;
36059 - }
36060 - while (i < newvol) {
36061 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
36062 - i++;
36063 - }
36064 + while (i > newvol) {
36065 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
36066 + i--;
36067 }
36068 + while (i < newvol) {
36069 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
36070 + i++;
36071 + }
36072 +}
36073
36074 - void issue_brightnesschange(const unsigned int oldbrt,
36075 - const unsigned int newbrt)
36076 - {
36077 - unsigned int i = oldbrt;
36078 +static void issue_brightnesschange(const unsigned int oldbrt,
36079 + const unsigned int newbrt,
36080 + const u32 event_mask)
36081 +{
36082 + unsigned int i = oldbrt;
36083
36084 - while (i > newbrt) {
36085 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
36086 - i--;
36087 - }
36088 - while (i < newbrt) {
36089 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
36090 - i++;
36091 - }
36092 + while (i > newbrt) {
36093 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
36094 + i--;
36095 + }
36096 + while (i < newbrt) {
36097 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
36098 + i++;
36099 }
36100 +}
36101
36102 +static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36103 + struct tp_nvram_state *newn,
36104 + const u32 event_mask)
36105 +{
36106 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
36107 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
36108 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
36109 @@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36110 oldn->volume_level != newn->volume_level) {
36111 /* recently muted, or repeated mute keypress, or
36112 * multiple presses ending in mute */
36113 - issue_volchange(oldn->volume_level, newn->volume_level);
36114 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
36115 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
36116 }
36117 } else {
36118 @@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36119 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
36120 }
36121 if (oldn->volume_level != newn->volume_level) {
36122 - issue_volchange(oldn->volume_level, newn->volume_level);
36123 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
36124 } else if (oldn->volume_toggle != newn->volume_toggle) {
36125 /* repeated vol up/down keypress at end of scale ? */
36126 if (newn->volume_level == 0)
36127 @@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36128 /* handle brightness */
36129 if (oldn->brightness_level != newn->brightness_level) {
36130 issue_brightnesschange(oldn->brightness_level,
36131 - newn->brightness_level);
36132 + newn->brightness_level,
36133 + event_mask);
36134 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
36135 /* repeated key presses that didn't change state */
36136 if (newn->brightness_level == 0)
36137 @@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36138 && !tp_features.bright_unkfw)
36139 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
36140 }
36141 +}
36142
36143 #undef TPACPI_COMPARE_KEY
36144 #undef TPACPI_MAY_SEND_KEY
36145 -}
36146
36147 /*
36148 * Polling driver
36149 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
36150 index 769d265..a3a05ca 100644
36151 --- a/drivers/pnp/pnpbios/bioscalls.c
36152 +++ b/drivers/pnp/pnpbios/bioscalls.c
36153 @@ -58,7 +58,7 @@ do { \
36154 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
36155 } while(0)
36156
36157 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
36158 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
36159 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
36160
36161 /*
36162 @@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
36163
36164 cpu = get_cpu();
36165 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
36166 +
36167 + pax_open_kernel();
36168 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
36169 + pax_close_kernel();
36170
36171 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
36172 spin_lock_irqsave(&pnp_bios_lock, flags);
36173 @@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
36174 :"memory");
36175 spin_unlock_irqrestore(&pnp_bios_lock, flags);
36176
36177 + pax_open_kernel();
36178 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
36179 + pax_close_kernel();
36180 +
36181 put_cpu();
36182
36183 /* If we get here and this is set then the PnP BIOS faulted on us. */
36184 @@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
36185 return status;
36186 }
36187
36188 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
36189 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
36190 {
36191 int i;
36192
36193 @@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
36194 pnp_bios_callpoint.offset = header->fields.pm16offset;
36195 pnp_bios_callpoint.segment = PNP_CS16;
36196
36197 + pax_open_kernel();
36198 +
36199 for_each_possible_cpu(i) {
36200 struct desc_struct *gdt = get_cpu_gdt_table(i);
36201 if (!gdt)
36202 @@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
36203 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
36204 (unsigned long)__va(header->fields.pm16dseg));
36205 }
36206 +
36207 + pax_close_kernel();
36208 }
36209 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
36210 index b0ecacb..7c9da2e 100644
36211 --- a/drivers/pnp/resource.c
36212 +++ b/drivers/pnp/resource.c
36213 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
36214 return 1;
36215
36216 /* check if the resource is valid */
36217 - if (*irq < 0 || *irq > 15)
36218 + if (*irq > 15)
36219 return 0;
36220
36221 /* check if the resource is reserved */
36222 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
36223 return 1;
36224
36225 /* check if the resource is valid */
36226 - if (*dma < 0 || *dma == 4 || *dma > 7)
36227 + if (*dma == 4 || *dma > 7)
36228 return 0;
36229
36230 /* check if the resource is reserved */
36231 diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
36232 index 222ccd8..6275fa5 100644
36233 --- a/drivers/power/bq27x00_battery.c
36234 +++ b/drivers/power/bq27x00_battery.c
36235 @@ -72,7 +72,7 @@
36236 struct bq27x00_device_info;
36237 struct bq27x00_access_methods {
36238 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
36239 -};
36240 +} __no_const;
36241
36242 enum bq27x00_chip { BQ27000, BQ27500 };
36243
36244 diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
36245 index 4c5b053..104263e 100644
36246 --- a/drivers/regulator/max8660.c
36247 +++ b/drivers/regulator/max8660.c
36248 @@ -385,8 +385,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
36249 max8660->shadow_regs[MAX8660_OVER1] = 5;
36250 } else {
36251 /* Otherwise devices can be toggled via software */
36252 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
36253 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
36254 + pax_open_kernel();
36255 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
36256 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
36257 + pax_close_kernel();
36258 }
36259
36260 /*
36261 diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
36262 index 845aa22..99ec402 100644
36263 --- a/drivers/regulator/mc13892-regulator.c
36264 +++ b/drivers/regulator/mc13892-regulator.c
36265 @@ -574,10 +574,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
36266 }
36267 mc13xxx_unlock(mc13892);
36268
36269 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
36270 + pax_open_kernel();
36271 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
36272 = mc13892_vcam_set_mode;
36273 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
36274 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
36275 = mc13892_vcam_get_mode;
36276 + pax_close_kernel();
36277
36278 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
36279 ARRAY_SIZE(mc13892_regulators));
36280 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
36281 index cace6d3..f623fda 100644
36282 --- a/drivers/rtc/rtc-dev.c
36283 +++ b/drivers/rtc/rtc-dev.c
36284 @@ -14,6 +14,7 @@
36285 #include <linux/module.h>
36286 #include <linux/rtc.h>
36287 #include <linux/sched.h>
36288 +#include <linux/grsecurity.h>
36289 #include "rtc-core.h"
36290
36291 static dev_t rtc_devt;
36292 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
36293 if (copy_from_user(&tm, uarg, sizeof(tm)))
36294 return -EFAULT;
36295
36296 + gr_log_timechange();
36297 +
36298 return rtc_set_time(rtc, &tm);
36299
36300 case RTC_PIE_ON:
36301 diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
36302 index 3fcf627..f334910 100644
36303 --- a/drivers/scsi/aacraid/aacraid.h
36304 +++ b/drivers/scsi/aacraid/aacraid.h
36305 @@ -492,7 +492,7 @@ struct adapter_ops
36306 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
36307 /* Administrative operations */
36308 int (*adapter_comm)(struct aac_dev * dev, int comm);
36309 -};
36310 +} __no_const;
36311
36312 /*
36313 * Define which interrupt handler needs to be installed
36314 diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
36315 index 0d279c44..3d25a97 100644
36316 --- a/drivers/scsi/aacraid/linit.c
36317 +++ b/drivers/scsi/aacraid/linit.c
36318 @@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
36319 #elif defined(__devinitconst)
36320 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
36321 #else
36322 -static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
36323 +static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
36324 #endif
36325 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
36326 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
36327 diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
36328 index ff80552..1c4120c 100644
36329 --- a/drivers/scsi/aic94xx/aic94xx_init.c
36330 +++ b/drivers/scsi/aic94xx/aic94xx_init.c
36331 @@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
36332 .lldd_ata_set_dmamode = asd_set_dmamode,
36333 };
36334
36335 -static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
36336 +static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
36337 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
36338 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
36339 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
36340 diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
36341 index 4ad7e36..d004679 100644
36342 --- a/drivers/scsi/bfa/bfa.h
36343 +++ b/drivers/scsi/bfa/bfa.h
36344 @@ -196,7 +196,7 @@ struct bfa_hwif_s {
36345 u32 *end);
36346 int cpe_vec_q0;
36347 int rme_vec_q0;
36348 -};
36349 +} __no_const;
36350 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
36351
36352 struct bfa_faa_cbfn_s {
36353 diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
36354 index f0f80e2..8ec946b 100644
36355 --- a/drivers/scsi/bfa/bfa_fcpim.c
36356 +++ b/drivers/scsi/bfa/bfa_fcpim.c
36357 @@ -3715,7 +3715,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
36358
36359 bfa_iotag_attach(fcp);
36360
36361 - fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
36362 + fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
36363 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
36364 (fcp->num_itns * sizeof(struct bfa_itn_s));
36365 memset(fcp->itn_arr, 0,
36366 @@ -3773,7 +3773,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
36367 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
36368 {
36369 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
36370 - struct bfa_itn_s *itn;
36371 + bfa_itn_s_no_const *itn;
36372
36373 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
36374 itn->isr = isr;
36375 diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
36376 index 36f26da..38a34a8 100644
36377 --- a/drivers/scsi/bfa/bfa_fcpim.h
36378 +++ b/drivers/scsi/bfa/bfa_fcpim.h
36379 @@ -37,6 +37,7 @@ struct bfa_iotag_s {
36380 struct bfa_itn_s {
36381 bfa_isr_func_t isr;
36382 };
36383 +typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
36384
36385 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
36386 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
36387 @@ -147,7 +148,7 @@ struct bfa_fcp_mod_s {
36388 struct list_head iotag_tio_free_q; /* free IO resources */
36389 struct list_head iotag_unused_q; /* unused IO resources*/
36390 struct bfa_iotag_s *iotag_arr;
36391 - struct bfa_itn_s *itn_arr;
36392 + bfa_itn_s_no_const *itn_arr;
36393 int num_ioim_reqs;
36394 int num_fwtio_reqs;
36395 int num_itns;
36396 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
36397 index 1a99d4b..e85d64b 100644
36398 --- a/drivers/scsi/bfa/bfa_ioc.h
36399 +++ b/drivers/scsi/bfa/bfa_ioc.h
36400 @@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
36401 bfa_ioc_disable_cbfn_t disable_cbfn;
36402 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
36403 bfa_ioc_reset_cbfn_t reset_cbfn;
36404 -};
36405 +} __no_const;
36406
36407 /*
36408 * IOC event notification mechanism.
36409 @@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
36410 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
36411 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
36412 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
36413 -};
36414 +} __no_const;
36415
36416 /*
36417 * Queue element to wait for room in request queue. FIFO order is
36418 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
36419 index a3a056a..b9bbc2f 100644
36420 --- a/drivers/scsi/hosts.c
36421 +++ b/drivers/scsi/hosts.c
36422 @@ -42,7 +42,7 @@
36423 #include "scsi_logging.h"
36424
36425
36426 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
36427 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
36428
36429
36430 static void scsi_host_cls_release(struct device *dev)
36431 @@ -360,7 +360,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
36432 * subtract one because we increment first then return, but we need to
36433 * know what the next host number was before increment
36434 */
36435 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
36436 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
36437 shost->dma_channel = 0xff;
36438
36439 /* These three are default values which can be overridden */
36440 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
36441 index 500e20d..ebd3059 100644
36442 --- a/drivers/scsi/hpsa.c
36443 +++ b/drivers/scsi/hpsa.c
36444 @@ -521,7 +521,7 @@ static inline u32 next_command(struct ctlr_info *h)
36445 u32 a;
36446
36447 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
36448 - return h->access.command_completed(h);
36449 + return h->access->command_completed(h);
36450
36451 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
36452 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
36453 @@ -3002,7 +3002,7 @@ static void start_io(struct ctlr_info *h)
36454 while (!list_empty(&h->reqQ)) {
36455 c = list_entry(h->reqQ.next, struct CommandList, list);
36456 /* can't do anything if fifo is full */
36457 - if ((h->access.fifo_full(h))) {
36458 + if ((h->access->fifo_full(h))) {
36459 dev_warn(&h->pdev->dev, "fifo full\n");
36460 break;
36461 }
36462 @@ -3012,7 +3012,7 @@ static void start_io(struct ctlr_info *h)
36463 h->Qdepth--;
36464
36465 /* Tell the controller execute command */
36466 - h->access.submit_command(h, c);
36467 + h->access->submit_command(h, c);
36468
36469 /* Put job onto the completed Q */
36470 addQ(&h->cmpQ, c);
36471 @@ -3021,17 +3021,17 @@ static void start_io(struct ctlr_info *h)
36472
36473 static inline unsigned long get_next_completion(struct ctlr_info *h)
36474 {
36475 - return h->access.command_completed(h);
36476 + return h->access->command_completed(h);
36477 }
36478
36479 static inline bool interrupt_pending(struct ctlr_info *h)
36480 {
36481 - return h->access.intr_pending(h);
36482 + return h->access->intr_pending(h);
36483 }
36484
36485 static inline long interrupt_not_for_us(struct ctlr_info *h)
36486 {
36487 - return (h->access.intr_pending(h) == 0) ||
36488 + return (h->access->intr_pending(h) == 0) ||
36489 (h->interrupts_enabled == 0);
36490 }
36491
36492 @@ -3930,7 +3930,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
36493 if (prod_index < 0)
36494 return -ENODEV;
36495 h->product_name = products[prod_index].product_name;
36496 - h->access = *(products[prod_index].access);
36497 + h->access = products[prod_index].access;
36498
36499 if (hpsa_board_disabled(h->pdev)) {
36500 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
36501 @@ -4175,7 +4175,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
36502
36503 assert_spin_locked(&lockup_detector_lock);
36504 remove_ctlr_from_lockup_detector_list(h);
36505 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36506 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36507 spin_lock_irqsave(&h->lock, flags);
36508 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
36509 spin_unlock_irqrestore(&h->lock, flags);
36510 @@ -4355,7 +4355,7 @@ reinit_after_soft_reset:
36511 }
36512
36513 /* make sure the board interrupts are off */
36514 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36515 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36516
36517 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
36518 goto clean2;
36519 @@ -4389,7 +4389,7 @@ reinit_after_soft_reset:
36520 * fake ones to scoop up any residual completions.
36521 */
36522 spin_lock_irqsave(&h->lock, flags);
36523 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36524 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36525 spin_unlock_irqrestore(&h->lock, flags);
36526 free_irq(h->intr[h->intr_mode], h);
36527 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
36528 @@ -4408,9 +4408,9 @@ reinit_after_soft_reset:
36529 dev_info(&h->pdev->dev, "Board READY.\n");
36530 dev_info(&h->pdev->dev,
36531 "Waiting for stale completions to drain.\n");
36532 - h->access.set_intr_mask(h, HPSA_INTR_ON);
36533 + h->access->set_intr_mask(h, HPSA_INTR_ON);
36534 msleep(10000);
36535 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36536 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36537
36538 rc = controller_reset_failed(h->cfgtable);
36539 if (rc)
36540 @@ -4431,7 +4431,7 @@ reinit_after_soft_reset:
36541 }
36542
36543 /* Turn the interrupts on so we can service requests */
36544 - h->access.set_intr_mask(h, HPSA_INTR_ON);
36545 + h->access->set_intr_mask(h, HPSA_INTR_ON);
36546
36547 hpsa_hba_inquiry(h);
36548 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
36549 @@ -4483,7 +4483,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
36550 * To write all data in the battery backed cache to disks
36551 */
36552 hpsa_flush_cache(h);
36553 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
36554 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
36555 free_irq(h->intr[h->intr_mode], h);
36556 #ifdef CONFIG_PCI_MSI
36557 if (h->msix_vector)
36558 @@ -4657,7 +4657,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
36559 return;
36560 }
36561 /* Change the access methods to the performant access methods */
36562 - h->access = SA5_performant_access;
36563 + h->access = &SA5_performant_access;
36564 h->transMethod = CFGTBL_Trans_Performant;
36565 }
36566
36567 diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
36568 index 7b28d54..952f23a 100644
36569 --- a/drivers/scsi/hpsa.h
36570 +++ b/drivers/scsi/hpsa.h
36571 @@ -72,7 +72,7 @@ struct ctlr_info {
36572 unsigned int msix_vector;
36573 unsigned int msi_vector;
36574 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
36575 - struct access_method access;
36576 + struct access_method *access;
36577
36578 /* queue and queue Info */
36579 struct list_head reqQ;
36580 diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
36581 index f2df059..a3a9930 100644
36582 --- a/drivers/scsi/ips.h
36583 +++ b/drivers/scsi/ips.h
36584 @@ -1027,7 +1027,7 @@ typedef struct {
36585 int (*intr)(struct ips_ha *);
36586 void (*enableint)(struct ips_ha *);
36587 uint32_t (*statupd)(struct ips_ha *);
36588 -} ips_hw_func_t;
36589 +} __no_const ips_hw_func_t;
36590
36591 typedef struct ips_ha {
36592 uint8_t ha_id[IPS_MAX_CHANNELS+1];
36593 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
36594 index aceffad..c35c08d 100644
36595 --- a/drivers/scsi/libfc/fc_exch.c
36596 +++ b/drivers/scsi/libfc/fc_exch.c
36597 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
36598 * all together if not used XXX
36599 */
36600 struct {
36601 - atomic_t no_free_exch;
36602 - atomic_t no_free_exch_xid;
36603 - atomic_t xid_not_found;
36604 - atomic_t xid_busy;
36605 - atomic_t seq_not_found;
36606 - atomic_t non_bls_resp;
36607 + atomic_unchecked_t no_free_exch;
36608 + atomic_unchecked_t no_free_exch_xid;
36609 + atomic_unchecked_t xid_not_found;
36610 + atomic_unchecked_t xid_busy;
36611 + atomic_unchecked_t seq_not_found;
36612 + atomic_unchecked_t non_bls_resp;
36613 } stats;
36614 };
36615
36616 @@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
36617 /* allocate memory for exchange */
36618 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
36619 if (!ep) {
36620 - atomic_inc(&mp->stats.no_free_exch);
36621 + atomic_inc_unchecked(&mp->stats.no_free_exch);
36622 goto out;
36623 }
36624 memset(ep, 0, sizeof(*ep));
36625 @@ -780,7 +780,7 @@ out:
36626 return ep;
36627 err:
36628 spin_unlock_bh(&pool->lock);
36629 - atomic_inc(&mp->stats.no_free_exch_xid);
36630 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
36631 mempool_free(ep, mp->ep_pool);
36632 return NULL;
36633 }
36634 @@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36635 xid = ntohs(fh->fh_ox_id); /* we originated exch */
36636 ep = fc_exch_find(mp, xid);
36637 if (!ep) {
36638 - atomic_inc(&mp->stats.xid_not_found);
36639 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36640 reject = FC_RJT_OX_ID;
36641 goto out;
36642 }
36643 @@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36644 ep = fc_exch_find(mp, xid);
36645 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
36646 if (ep) {
36647 - atomic_inc(&mp->stats.xid_busy);
36648 + atomic_inc_unchecked(&mp->stats.xid_busy);
36649 reject = FC_RJT_RX_ID;
36650 goto rel;
36651 }
36652 @@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36653 }
36654 xid = ep->xid; /* get our XID */
36655 } else if (!ep) {
36656 - atomic_inc(&mp->stats.xid_not_found);
36657 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36658 reject = FC_RJT_RX_ID; /* XID not found */
36659 goto out;
36660 }
36661 @@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
36662 } else {
36663 sp = &ep->seq;
36664 if (sp->id != fh->fh_seq_id) {
36665 - atomic_inc(&mp->stats.seq_not_found);
36666 + atomic_inc_unchecked(&mp->stats.seq_not_found);
36667 if (f_ctl & FC_FC_END_SEQ) {
36668 /*
36669 * Update sequence_id based on incoming last
36670 @@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36671
36672 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
36673 if (!ep) {
36674 - atomic_inc(&mp->stats.xid_not_found);
36675 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36676 goto out;
36677 }
36678 if (ep->esb_stat & ESB_ST_COMPLETE) {
36679 - atomic_inc(&mp->stats.xid_not_found);
36680 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36681 goto rel;
36682 }
36683 if (ep->rxid == FC_XID_UNKNOWN)
36684 ep->rxid = ntohs(fh->fh_rx_id);
36685 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
36686 - atomic_inc(&mp->stats.xid_not_found);
36687 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36688 goto rel;
36689 }
36690 if (ep->did != ntoh24(fh->fh_s_id) &&
36691 ep->did != FC_FID_FLOGI) {
36692 - atomic_inc(&mp->stats.xid_not_found);
36693 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36694 goto rel;
36695 }
36696 sof = fr_sof(fp);
36697 @@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36698 sp->ssb_stat |= SSB_ST_RESP;
36699 sp->id = fh->fh_seq_id;
36700 } else if (sp->id != fh->fh_seq_id) {
36701 - atomic_inc(&mp->stats.seq_not_found);
36702 + atomic_inc_unchecked(&mp->stats.seq_not_found);
36703 goto rel;
36704 }
36705
36706 @@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
36707 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
36708
36709 if (!sp)
36710 - atomic_inc(&mp->stats.xid_not_found);
36711 + atomic_inc_unchecked(&mp->stats.xid_not_found);
36712 else
36713 - atomic_inc(&mp->stats.non_bls_resp);
36714 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
36715
36716 fc_frame_free(fp);
36717 }
36718 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
36719 index d109cc3..09f4e7d 100644
36720 --- a/drivers/scsi/libsas/sas_ata.c
36721 +++ b/drivers/scsi/libsas/sas_ata.c
36722 @@ -529,7 +529,7 @@ static struct ata_port_operations sas_sata_ops = {
36723 .postreset = ata_std_postreset,
36724 .error_handler = ata_std_error_handler,
36725 .post_internal_cmd = sas_ata_post_internal,
36726 - .qc_defer = ata_std_qc_defer,
36727 + .qc_defer = ata_std_qc_defer,
36728 .qc_prep = ata_noop_qc_prep,
36729 .qc_issue = sas_ata_qc_issue,
36730 .qc_fill_rtf = sas_ata_qc_fill_rtf,
36731 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
36732 index 3a1ffdd..8eb7c71 100644
36733 --- a/drivers/scsi/lpfc/lpfc.h
36734 +++ b/drivers/scsi/lpfc/lpfc.h
36735 @@ -413,7 +413,7 @@ struct lpfc_vport {
36736 struct dentry *debug_nodelist;
36737 struct dentry *vport_debugfs_root;
36738 struct lpfc_debugfs_trc *disc_trc;
36739 - atomic_t disc_trc_cnt;
36740 + atomic_unchecked_t disc_trc_cnt;
36741 #endif
36742 uint8_t stat_data_enabled;
36743 uint8_t stat_data_blocked;
36744 @@ -826,8 +826,8 @@ struct lpfc_hba {
36745 struct timer_list fabric_block_timer;
36746 unsigned long bit_flags;
36747 #define FABRIC_COMANDS_BLOCKED 0
36748 - atomic_t num_rsrc_err;
36749 - atomic_t num_cmd_success;
36750 + atomic_unchecked_t num_rsrc_err;
36751 + atomic_unchecked_t num_cmd_success;
36752 unsigned long last_rsrc_error_time;
36753 unsigned long last_ramp_down_time;
36754 unsigned long last_ramp_up_time;
36755 @@ -863,7 +863,7 @@ struct lpfc_hba {
36756
36757 struct dentry *debug_slow_ring_trc;
36758 struct lpfc_debugfs_trc *slow_ring_trc;
36759 - atomic_t slow_ring_trc_cnt;
36760 + atomic_unchecked_t slow_ring_trc_cnt;
36761 /* iDiag debugfs sub-directory */
36762 struct dentry *idiag_root;
36763 struct dentry *idiag_pci_cfg;
36764 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
36765 index af04b0d..8f1a97e 100644
36766 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
36767 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
36768 @@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
36769
36770 #include <linux/debugfs.h>
36771
36772 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36773 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36774 static unsigned long lpfc_debugfs_start_time = 0L;
36775
36776 /* iDiag */
36777 @@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
36778 lpfc_debugfs_enable = 0;
36779
36780 len = 0;
36781 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
36782 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
36783 (lpfc_debugfs_max_disc_trc - 1);
36784 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
36785 dtp = vport->disc_trc + i;
36786 @@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
36787 lpfc_debugfs_enable = 0;
36788
36789 len = 0;
36790 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
36791 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
36792 (lpfc_debugfs_max_slow_ring_trc - 1);
36793 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
36794 dtp = phba->slow_ring_trc + i;
36795 @@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
36796 !vport || !vport->disc_trc)
36797 return;
36798
36799 - index = atomic_inc_return(&vport->disc_trc_cnt) &
36800 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
36801 (lpfc_debugfs_max_disc_trc - 1);
36802 dtp = vport->disc_trc + index;
36803 dtp->fmt = fmt;
36804 dtp->data1 = data1;
36805 dtp->data2 = data2;
36806 dtp->data3 = data3;
36807 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36808 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36809 dtp->jif = jiffies;
36810 #endif
36811 return;
36812 @@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
36813 !phba || !phba->slow_ring_trc)
36814 return;
36815
36816 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
36817 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
36818 (lpfc_debugfs_max_slow_ring_trc - 1);
36819 dtp = phba->slow_ring_trc + index;
36820 dtp->fmt = fmt;
36821 dtp->data1 = data1;
36822 dtp->data2 = data2;
36823 dtp->data3 = data3;
36824 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36825 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36826 dtp->jif = jiffies;
36827 #endif
36828 return;
36829 @@ -4090,7 +4090,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36830 "slow_ring buffer\n");
36831 goto debug_failed;
36832 }
36833 - atomic_set(&phba->slow_ring_trc_cnt, 0);
36834 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
36835 memset(phba->slow_ring_trc, 0,
36836 (sizeof(struct lpfc_debugfs_trc) *
36837 lpfc_debugfs_max_slow_ring_trc));
36838 @@ -4136,7 +4136,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
36839 "buffer\n");
36840 goto debug_failed;
36841 }
36842 - atomic_set(&vport->disc_trc_cnt, 0);
36843 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
36844
36845 snprintf(name, sizeof(name), "discovery_trace");
36846 vport->debug_disc_trc =
36847 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
36848 index 9598fdc..7e9f3d9 100644
36849 --- a/drivers/scsi/lpfc/lpfc_init.c
36850 +++ b/drivers/scsi/lpfc/lpfc_init.c
36851 @@ -10266,8 +10266,10 @@ lpfc_init(void)
36852 "misc_register returned with status %d", error);
36853
36854 if (lpfc_enable_npiv) {
36855 - lpfc_transport_functions.vport_create = lpfc_vport_create;
36856 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36857 + pax_open_kernel();
36858 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
36859 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36860 + pax_close_kernel();
36861 }
36862 lpfc_transport_template =
36863 fc_attach_transport(&lpfc_transport_functions);
36864 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
36865 index 88f3a83..686d3fa 100644
36866 --- a/drivers/scsi/lpfc/lpfc_scsi.c
36867 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
36868 @@ -311,7 +311,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
36869 uint32_t evt_posted;
36870
36871 spin_lock_irqsave(&phba->hbalock, flags);
36872 - atomic_inc(&phba->num_rsrc_err);
36873 + atomic_inc_unchecked(&phba->num_rsrc_err);
36874 phba->last_rsrc_error_time = jiffies;
36875
36876 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
36877 @@ -352,7 +352,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
36878 unsigned long flags;
36879 struct lpfc_hba *phba = vport->phba;
36880 uint32_t evt_posted;
36881 - atomic_inc(&phba->num_cmd_success);
36882 + atomic_inc_unchecked(&phba->num_cmd_success);
36883
36884 if (vport->cfg_lun_queue_depth <= queue_depth)
36885 return;
36886 @@ -396,8 +396,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36887 unsigned long num_rsrc_err, num_cmd_success;
36888 int i;
36889
36890 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
36891 - num_cmd_success = atomic_read(&phba->num_cmd_success);
36892 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
36893 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
36894
36895 vports = lpfc_create_vport_work_array(phba);
36896 if (vports != NULL)
36897 @@ -417,8 +417,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
36898 }
36899 }
36900 lpfc_destroy_vport_work_array(phba, vports);
36901 - atomic_set(&phba->num_rsrc_err, 0);
36902 - atomic_set(&phba->num_cmd_success, 0);
36903 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
36904 + atomic_set_unchecked(&phba->num_cmd_success, 0);
36905 }
36906
36907 /**
36908 @@ -452,8 +452,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
36909 }
36910 }
36911 lpfc_destroy_vport_work_array(phba, vports);
36912 - atomic_set(&phba->num_rsrc_err, 0);
36913 - atomic_set(&phba->num_cmd_success, 0);
36914 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
36915 + atomic_set_unchecked(&phba->num_cmd_success, 0);
36916 }
36917
36918 /**
36919 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
36920 index ea8a0b4..812a124 100644
36921 --- a/drivers/scsi/pmcraid.c
36922 +++ b/drivers/scsi/pmcraid.c
36923 @@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
36924 res->scsi_dev = scsi_dev;
36925 scsi_dev->hostdata = res;
36926 res->change_detected = 0;
36927 - atomic_set(&res->read_failures, 0);
36928 - atomic_set(&res->write_failures, 0);
36929 + atomic_set_unchecked(&res->read_failures, 0);
36930 + atomic_set_unchecked(&res->write_failures, 0);
36931 rc = 0;
36932 }
36933 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
36934 @@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
36935
36936 /* If this was a SCSI read/write command keep count of errors */
36937 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
36938 - atomic_inc(&res->read_failures);
36939 + atomic_inc_unchecked(&res->read_failures);
36940 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
36941 - atomic_inc(&res->write_failures);
36942 + atomic_inc_unchecked(&res->write_failures);
36943
36944 if (!RES_IS_GSCSI(res->cfg_entry) &&
36945 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
36946 @@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
36947 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36948 * hrrq_id assigned here in queuecommand
36949 */
36950 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36951 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36952 pinstance->num_hrrq;
36953 cmd->cmd_done = pmcraid_io_done;
36954
36955 @@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
36956 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36957 * hrrq_id assigned here in queuecommand
36958 */
36959 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36960 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36961 pinstance->num_hrrq;
36962
36963 if (request_size) {
36964 @@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
36965
36966 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
36967 /* add resources only after host is added into system */
36968 - if (!atomic_read(&pinstance->expose_resources))
36969 + if (!atomic_read_unchecked(&pinstance->expose_resources))
36970 return;
36971
36972 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
36973 @@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
36974 init_waitqueue_head(&pinstance->reset_wait_q);
36975
36976 atomic_set(&pinstance->outstanding_cmds, 0);
36977 - atomic_set(&pinstance->last_message_id, 0);
36978 - atomic_set(&pinstance->expose_resources, 0);
36979 + atomic_set_unchecked(&pinstance->last_message_id, 0);
36980 + atomic_set_unchecked(&pinstance->expose_resources, 0);
36981
36982 INIT_LIST_HEAD(&pinstance->free_res_q);
36983 INIT_LIST_HEAD(&pinstance->used_res_q);
36984 @@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
36985 /* Schedule worker thread to handle CCN and take care of adding and
36986 * removing devices to OS
36987 */
36988 - atomic_set(&pinstance->expose_resources, 1);
36989 + atomic_set_unchecked(&pinstance->expose_resources, 1);
36990 schedule_work(&pinstance->worker_q);
36991 return rc;
36992
36993 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
36994 index e1d150f..6c6df44 100644
36995 --- a/drivers/scsi/pmcraid.h
36996 +++ b/drivers/scsi/pmcraid.h
36997 @@ -748,7 +748,7 @@ struct pmcraid_instance {
36998 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
36999
37000 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
37001 - atomic_t last_message_id;
37002 + atomic_unchecked_t last_message_id;
37003
37004 /* configuration table */
37005 struct pmcraid_config_table *cfg_table;
37006 @@ -777,7 +777,7 @@ struct pmcraid_instance {
37007 atomic_t outstanding_cmds;
37008
37009 /* should add/delete resources to mid-layer now ?*/
37010 - atomic_t expose_resources;
37011 + atomic_unchecked_t expose_resources;
37012
37013
37014
37015 @@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
37016 struct pmcraid_config_table_entry_ext cfg_entry_ext;
37017 };
37018 struct scsi_device *scsi_dev; /* Link scsi_device structure */
37019 - atomic_t read_failures; /* count of failed READ commands */
37020 - atomic_t write_failures; /* count of failed WRITE commands */
37021 + atomic_unchecked_t read_failures; /* count of failed READ commands */
37022 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
37023
37024 /* To indicate add/delete/modify during CCN */
37025 u8 change_detected;
37026 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
37027 index a244303..6015eb7 100644
37028 --- a/drivers/scsi/qla2xxx/qla_def.h
37029 +++ b/drivers/scsi/qla2xxx/qla_def.h
37030 @@ -2264,7 +2264,7 @@ struct isp_operations {
37031 int (*start_scsi) (srb_t *);
37032 int (*abort_isp) (struct scsi_qla_host *);
37033 int (*iospace_config)(struct qla_hw_data*);
37034 -};
37035 +} __no_const;
37036
37037 /* MSI-X Support *************************************************************/
37038
37039 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
37040 index 7f2492e..5113877 100644
37041 --- a/drivers/scsi/qla4xxx/ql4_def.h
37042 +++ b/drivers/scsi/qla4xxx/ql4_def.h
37043 @@ -268,7 +268,7 @@ struct ddb_entry {
37044 * (4000 only) */
37045 atomic_t relogin_timer; /* Max Time to wait for
37046 * relogin to complete */
37047 - atomic_t relogin_retry_count; /* Num of times relogin has been
37048 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
37049 * retried */
37050 uint32_t default_time2wait; /* Default Min time between
37051 * relogins (+aens) */
37052 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
37053 index ee47820..a83b1f4 100644
37054 --- a/drivers/scsi/qla4xxx/ql4_os.c
37055 +++ b/drivers/scsi/qla4xxx/ql4_os.c
37056 @@ -2551,12 +2551,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
37057 */
37058 if (!iscsi_is_session_online(cls_sess)) {
37059 /* Reset retry relogin timer */
37060 - atomic_inc(&ddb_entry->relogin_retry_count);
37061 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
37062 DEBUG2(ql4_printk(KERN_INFO, ha,
37063 "%s: index[%d] relogin timed out-retrying"
37064 " relogin (%d), retry (%d)\n", __func__,
37065 ddb_entry->fw_ddb_index,
37066 - atomic_read(&ddb_entry->relogin_retry_count),
37067 + atomic_read_unchecked(&ddb_entry->relogin_retry_count),
37068 ddb_entry->default_time2wait + 4));
37069 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
37070 atomic_set(&ddb_entry->retry_relogin_timer,
37071 @@ -4453,7 +4453,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
37072
37073 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
37074 atomic_set(&ddb_entry->relogin_timer, 0);
37075 - atomic_set(&ddb_entry->relogin_retry_count, 0);
37076 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
37077 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
37078 ddb_entry->default_relogin_timeout =
37079 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
37080 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
37081 index 07322ec..91ccc23 100644
37082 --- a/drivers/scsi/scsi.c
37083 +++ b/drivers/scsi/scsi.c
37084 @@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
37085 unsigned long timeout;
37086 int rtn = 0;
37087
37088 - atomic_inc(&cmd->device->iorequest_cnt);
37089 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37090
37091 /* check if the device is still usable */
37092 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
37093 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
37094 index 4037fd5..a19fcc7 100644
37095 --- a/drivers/scsi/scsi_lib.c
37096 +++ b/drivers/scsi/scsi_lib.c
37097 @@ -1415,7 +1415,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
37098 shost = sdev->host;
37099 scsi_init_cmd_errh(cmd);
37100 cmd->result = DID_NO_CONNECT << 16;
37101 - atomic_inc(&cmd->device->iorequest_cnt);
37102 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37103
37104 /*
37105 * SCSI request completion path will do scsi_device_unbusy(),
37106 @@ -1441,9 +1441,9 @@ static void scsi_softirq_done(struct request *rq)
37107
37108 INIT_LIST_HEAD(&cmd->eh_entry);
37109
37110 - atomic_inc(&cmd->device->iodone_cnt);
37111 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
37112 if (cmd->result)
37113 - atomic_inc(&cmd->device->ioerr_cnt);
37114 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
37115
37116 disposition = scsi_decide_disposition(cmd);
37117 if (disposition != SUCCESS &&
37118 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
37119 index 04c2a27..9d8bd66 100644
37120 --- a/drivers/scsi/scsi_sysfs.c
37121 +++ b/drivers/scsi/scsi_sysfs.c
37122 @@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
37123 char *buf) \
37124 { \
37125 struct scsi_device *sdev = to_scsi_device(dev); \
37126 - unsigned long long count = atomic_read(&sdev->field); \
37127 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
37128 return snprintf(buf, 20, "0x%llx\n", count); \
37129 } \
37130 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
37131 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
37132 index 84a1fdf..693b0d6 100644
37133 --- a/drivers/scsi/scsi_tgt_lib.c
37134 +++ b/drivers/scsi/scsi_tgt_lib.c
37135 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
37136 int err;
37137
37138 dprintk("%lx %u\n", uaddr, len);
37139 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
37140 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
37141 if (err) {
37142 /*
37143 * TODO: need to fixup sg_tablesize, max_segment_size,
37144 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
37145 index 80fbe2a..efa223b 100644
37146 --- a/drivers/scsi/scsi_transport_fc.c
37147 +++ b/drivers/scsi/scsi_transport_fc.c
37148 @@ -498,7 +498,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
37149 * Netlink Infrastructure
37150 */
37151
37152 -static atomic_t fc_event_seq;
37153 +static atomic_unchecked_t fc_event_seq;
37154
37155 /**
37156 * fc_get_event_number - Obtain the next sequential FC event number
37157 @@ -511,7 +511,7 @@ static atomic_t fc_event_seq;
37158 u32
37159 fc_get_event_number(void)
37160 {
37161 - return atomic_add_return(1, &fc_event_seq);
37162 + return atomic_add_return_unchecked(1, &fc_event_seq);
37163 }
37164 EXPORT_SYMBOL(fc_get_event_number);
37165
37166 @@ -659,7 +659,7 @@ static __init int fc_transport_init(void)
37167 {
37168 int error;
37169
37170 - atomic_set(&fc_event_seq, 0);
37171 + atomic_set_unchecked(&fc_event_seq, 0);
37172
37173 error = transport_class_register(&fc_host_class);
37174 if (error)
37175 @@ -849,7 +849,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
37176 char *cp;
37177
37178 *val = simple_strtoul(buf, &cp, 0);
37179 - if ((*cp && (*cp != '\n')) || (*val < 0))
37180 + if (*cp && (*cp != '\n'))
37181 return -EINVAL;
37182 /*
37183 * Check for overflow; dev_loss_tmo is u32
37184 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
37185 index 1cf640e..78e9014 100644
37186 --- a/drivers/scsi/scsi_transport_iscsi.c
37187 +++ b/drivers/scsi/scsi_transport_iscsi.c
37188 @@ -79,7 +79,7 @@ struct iscsi_internal {
37189 struct transport_container session_cont;
37190 };
37191
37192 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
37193 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
37194 static struct workqueue_struct *iscsi_eh_timer_workq;
37195
37196 static DEFINE_IDA(iscsi_sess_ida);
37197 @@ -1064,7 +1064,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
37198 int err;
37199
37200 ihost = shost->shost_data;
37201 - session->sid = atomic_add_return(1, &iscsi_session_nr);
37202 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
37203
37204 if (target_id == ISCSI_MAX_TARGET) {
37205 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
37206 @@ -2940,7 +2940,7 @@ static __init int iscsi_transport_init(void)
37207 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
37208 ISCSI_TRANSPORT_VERSION);
37209
37210 - atomic_set(&iscsi_session_nr, 0);
37211 + atomic_set_unchecked(&iscsi_session_nr, 0);
37212
37213 err = class_register(&iscsi_transport_class);
37214 if (err)
37215 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
37216 index 21a045e..ec89e03 100644
37217 --- a/drivers/scsi/scsi_transport_srp.c
37218 +++ b/drivers/scsi/scsi_transport_srp.c
37219 @@ -33,7 +33,7 @@
37220 #include "scsi_transport_srp_internal.h"
37221
37222 struct srp_host_attrs {
37223 - atomic_t next_port_id;
37224 + atomic_unchecked_t next_port_id;
37225 };
37226 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
37227
37228 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
37229 struct Scsi_Host *shost = dev_to_shost(dev);
37230 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
37231
37232 - atomic_set(&srp_host->next_port_id, 0);
37233 + atomic_set_unchecked(&srp_host->next_port_id, 0);
37234 return 0;
37235 }
37236
37237 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
37238 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
37239 rport->roles = ids->roles;
37240
37241 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
37242 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
37243 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
37244
37245 transport_setup_device(&rport->dev);
37246 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
37247 index eacd46b..e3f4d62 100644
37248 --- a/drivers/scsi/sg.c
37249 +++ b/drivers/scsi/sg.c
37250 @@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
37251 sdp->disk->disk_name,
37252 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
37253 NULL,
37254 - (char *)arg);
37255 + (char __user *)arg);
37256 case BLKTRACESTART:
37257 return blk_trace_startstop(sdp->device->request_queue, 1);
37258 case BLKTRACESTOP:
37259 @@ -2312,7 +2312,7 @@ struct sg_proc_leaf {
37260 const struct file_operations * fops;
37261 };
37262
37263 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
37264 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
37265 {"allow_dio", &adio_fops},
37266 {"debug", &debug_fops},
37267 {"def_reserved_size", &dressz_fops},
37268 @@ -2332,7 +2332,7 @@ sg_proc_init(void)
37269 if (!sg_proc_sgp)
37270 return 1;
37271 for (k = 0; k < num_leaves; ++k) {
37272 - struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
37273 + const struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
37274 umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
37275 proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
37276 }
37277 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
37278 index 3d8f662..070f1a5 100644
37279 --- a/drivers/spi/spi.c
37280 +++ b/drivers/spi/spi.c
37281 @@ -1361,7 +1361,7 @@ int spi_bus_unlock(struct spi_master *master)
37282 EXPORT_SYMBOL_GPL(spi_bus_unlock);
37283
37284 /* portable code must never pass more than 32 bytes */
37285 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
37286 +#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
37287
37288 static u8 *buf;
37289
37290 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
37291 index d91751f..a3a9e36 100644
37292 --- a/drivers/staging/octeon/ethernet-rx.c
37293 +++ b/drivers/staging/octeon/ethernet-rx.c
37294 @@ -421,11 +421,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
37295 /* Increment RX stats for virtual ports */
37296 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
37297 #ifdef CONFIG_64BIT
37298 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
37299 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
37300 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
37301 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
37302 #else
37303 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
37304 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
37305 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
37306 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
37307 #endif
37308 }
37309 netif_receive_skb(skb);
37310 @@ -437,9 +437,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
37311 dev->name);
37312 */
37313 #ifdef CONFIG_64BIT
37314 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
37315 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37316 #else
37317 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
37318 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
37319 #endif
37320 dev_kfree_skb_irq(skb);
37321 }
37322 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
37323 index 60cba81..71eb239 100644
37324 --- a/drivers/staging/octeon/ethernet.c
37325 +++ b/drivers/staging/octeon/ethernet.c
37326 @@ -259,11 +259,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
37327 * since the RX tasklet also increments it.
37328 */
37329 #ifdef CONFIG_64BIT
37330 - atomic64_add(rx_status.dropped_packets,
37331 - (atomic64_t *)&priv->stats.rx_dropped);
37332 + atomic64_add_unchecked(rx_status.dropped_packets,
37333 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37334 #else
37335 - atomic_add(rx_status.dropped_packets,
37336 - (atomic_t *)&priv->stats.rx_dropped);
37337 + atomic_add_unchecked(rx_status.dropped_packets,
37338 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
37339 #endif
37340 }
37341
37342 diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
37343 index d3d8727..f9327bb8 100644
37344 --- a/drivers/staging/rtl8712/rtl871x_io.h
37345 +++ b/drivers/staging/rtl8712/rtl871x_io.h
37346 @@ -108,7 +108,7 @@ struct _io_ops {
37347 u8 *pmem);
37348 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
37349 u8 *pmem);
37350 -};
37351 +} __no_const;
37352
37353 struct io_req {
37354 struct list_head list;
37355 diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
37356 index c7b5e8b..783d6cb 100644
37357 --- a/drivers/staging/sbe-2t3e3/netdev.c
37358 +++ b/drivers/staging/sbe-2t3e3/netdev.c
37359 @@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
37360 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
37361
37362 if (rlen)
37363 - if (copy_to_user(data, &resp, rlen))
37364 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
37365 return -EFAULT;
37366
37367 return 0;
37368 diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
37369 index 42cdafe..2769103 100644
37370 --- a/drivers/staging/speakup/speakup_soft.c
37371 +++ b/drivers/staging/speakup/speakup_soft.c
37372 @@ -241,11 +241,11 @@ static ssize_t softsynth_read(struct file *fp, char *buf, size_t count,
37373 break;
37374 } else if (!initialized) {
37375 if (*init) {
37376 - ch = *init;
37377 init++;
37378 } else {
37379 initialized = 1;
37380 }
37381 + ch = *init;
37382 } else {
37383 ch = synth_buffer_getc();
37384 }
37385 diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
37386 index c7b888c..c94be93 100644
37387 --- a/drivers/staging/usbip/usbip_common.h
37388 +++ b/drivers/staging/usbip/usbip_common.h
37389 @@ -289,7 +289,7 @@ struct usbip_device {
37390 void (*shutdown)(struct usbip_device *);
37391 void (*reset)(struct usbip_device *);
37392 void (*unusable)(struct usbip_device *);
37393 - } eh_ops;
37394 + } __no_const eh_ops;
37395 };
37396
37397 /* usbip_common.c */
37398 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
37399 index 88b3298..3783eee 100644
37400 --- a/drivers/staging/usbip/vhci.h
37401 +++ b/drivers/staging/usbip/vhci.h
37402 @@ -88,7 +88,7 @@ struct vhci_hcd {
37403 unsigned resuming:1;
37404 unsigned long re_timeout;
37405
37406 - atomic_t seqnum;
37407 + atomic_unchecked_t seqnum;
37408
37409 /*
37410 * NOTE:
37411 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
37412 index dca9bf1..80735c9 100644
37413 --- a/drivers/staging/usbip/vhci_hcd.c
37414 +++ b/drivers/staging/usbip/vhci_hcd.c
37415 @@ -488,7 +488,7 @@ static void vhci_tx_urb(struct urb *urb)
37416 return;
37417 }
37418
37419 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
37420 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37421 if (priv->seqnum == 0xffff)
37422 dev_info(&urb->dev->dev, "seqnum max\n");
37423
37424 @@ -740,7 +740,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
37425 return -ENOMEM;
37426 }
37427
37428 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
37429 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37430 if (unlink->seqnum == 0xffff)
37431 pr_info("seqnum max\n");
37432
37433 @@ -928,7 +928,7 @@ static int vhci_start(struct usb_hcd *hcd)
37434 vdev->rhport = rhport;
37435 }
37436
37437 - atomic_set(&vhci->seqnum, 0);
37438 + atomic_set_unchecked(&vhci->seqnum, 0);
37439 spin_lock_init(&vhci->lock);
37440
37441 hcd->power_budget = 0; /* no limit */
37442 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
37443 index f5fba732..210a16c 100644
37444 --- a/drivers/staging/usbip/vhci_rx.c
37445 +++ b/drivers/staging/usbip/vhci_rx.c
37446 @@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
37447 if (!urb) {
37448 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
37449 pr_info("max seqnum %d\n",
37450 - atomic_read(&the_controller->seqnum));
37451 + atomic_read_unchecked(&the_controller->seqnum));
37452 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
37453 return;
37454 }
37455 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
37456 index 7735027..30eed13 100644
37457 --- a/drivers/staging/vt6655/hostap.c
37458 +++ b/drivers/staging/vt6655/hostap.c
37459 @@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
37460 *
37461 */
37462
37463 +static net_device_ops_no_const apdev_netdev_ops;
37464 +
37465 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37466 {
37467 PSDevice apdev_priv;
37468 struct net_device *dev = pDevice->dev;
37469 int ret;
37470 - const struct net_device_ops apdev_netdev_ops = {
37471 - .ndo_start_xmit = pDevice->tx_80211,
37472 - };
37473
37474 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
37475
37476 @@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37477 *apdev_priv = *pDevice;
37478 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37479
37480 + /* only half broken now */
37481 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37482 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37483
37484 pDevice->apdev->type = ARPHRD_IEEE80211;
37485 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
37486 index 51b5adf..098e320 100644
37487 --- a/drivers/staging/vt6656/hostap.c
37488 +++ b/drivers/staging/vt6656/hostap.c
37489 @@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
37490 *
37491 */
37492
37493 +static net_device_ops_no_const apdev_netdev_ops;
37494 +
37495 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37496 {
37497 PSDevice apdev_priv;
37498 struct net_device *dev = pDevice->dev;
37499 int ret;
37500 - const struct net_device_ops apdev_netdev_ops = {
37501 - .ndo_start_xmit = pDevice->tx_80211,
37502 - };
37503
37504 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
37505
37506 @@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
37507 *apdev_priv = *pDevice;
37508 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37509
37510 + /* only half broken now */
37511 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37512 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37513
37514 pDevice->apdev->type = ARPHRD_IEEE80211;
37515 diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
37516 index 7843dfd..3db105f 100644
37517 --- a/drivers/staging/wlan-ng/hfa384x_usb.c
37518 +++ b/drivers/staging/wlan-ng/hfa384x_usb.c
37519 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
37520
37521 struct usbctlx_completor {
37522 int (*complete) (struct usbctlx_completor *);
37523 -};
37524 +} __no_const;
37525
37526 static int
37527 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
37528 diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
37529 index 1ca66ea..76f1343 100644
37530 --- a/drivers/staging/zcache/tmem.c
37531 +++ b/drivers/staging/zcache/tmem.c
37532 @@ -39,7 +39,7 @@
37533 * A tmem host implementation must use this function to register callbacks
37534 * for memory allocation.
37535 */
37536 -static struct tmem_hostops tmem_hostops;
37537 +static tmem_hostops_no_const tmem_hostops;
37538
37539 static void tmem_objnode_tree_init(void);
37540
37541 @@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
37542 * A tmem host implementation must use this function to register
37543 * callbacks for a page-accessible memory (PAM) implementation
37544 */
37545 -static struct tmem_pamops tmem_pamops;
37546 +static tmem_pamops_no_const tmem_pamops;
37547
37548 void tmem_register_pamops(struct tmem_pamops *m)
37549 {
37550 diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
37551 index 0d4aa82..f7832d4 100644
37552 --- a/drivers/staging/zcache/tmem.h
37553 +++ b/drivers/staging/zcache/tmem.h
37554 @@ -180,6 +180,7 @@ struct tmem_pamops {
37555 void (*new_obj)(struct tmem_obj *);
37556 int (*replace_in_obj)(void *, struct tmem_obj *);
37557 };
37558 +typedef struct tmem_pamops __no_const tmem_pamops_no_const;
37559 extern void tmem_register_pamops(struct tmem_pamops *m);
37560
37561 /* memory allocation methods provided by the host implementation */
37562 @@ -189,6 +190,7 @@ struct tmem_hostops {
37563 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
37564 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
37565 };
37566 +typedef struct tmem_hostops __no_const tmem_hostops_no_const;
37567 extern void tmem_register_hostops(struct tmem_hostops *m);
37568
37569 /* core tmem accessor functions */
37570 diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
37571 index 30a6770..fa323f8 100644
37572 --- a/drivers/target/target_core_cdb.c
37573 +++ b/drivers/target/target_core_cdb.c
37574 @@ -1107,7 +1107,7 @@ int target_emulate_write_same(struct se_task *task)
37575 if (num_blocks != 0)
37576 range = num_blocks;
37577 else
37578 - range = (dev->transport->get_blocks(dev) - lba);
37579 + range = (dev->transport->get_blocks(dev) - lba) + 1;
37580
37581 pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n",
37582 (unsigned long long)lba, (unsigned long long)range);
37583 diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
37584 index c3148b1..89d10e6 100644
37585 --- a/drivers/target/target_core_pr.c
37586 +++ b/drivers/target/target_core_pr.c
37587 @@ -2038,7 +2038,7 @@ static int __core_scsi3_write_aptpl_to_file(
37588 if (IS_ERR(file) || !file || !file->f_dentry) {
37589 pr_err("filp_open(%s) for APTPL metadata"
37590 " failed\n", path);
37591 - return (PTR_ERR(file) < 0 ? PTR_ERR(file) : -ENOENT);
37592 + return IS_ERR(file) ? PTR_ERR(file) : -ENOENT;
37593 }
37594
37595 iov[0].iov_base = &buf[0];
37596 @@ -3826,7 +3826,7 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
37597 " SPC-2 reservation is held, returning"
37598 " RESERVATION_CONFLICT\n");
37599 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
37600 - ret = EINVAL;
37601 + ret = -EINVAL;
37602 goto out;
37603 }
37604
37605 @@ -3836,7 +3836,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
37606 */
37607 if (!cmd->se_sess) {
37608 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
37609 - return -EINVAL;
37610 + ret = -EINVAL;
37611 + goto out;
37612 }
37613
37614 if (cmd->data_length < 24) {
37615 diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
37616 index f015839..b15dfc4 100644
37617 --- a/drivers/target/target_core_tmr.c
37618 +++ b/drivers/target/target_core_tmr.c
37619 @@ -327,7 +327,7 @@ static void core_tmr_drain_task_list(
37620 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
37621 cmd->t_task_list_num,
37622 atomic_read(&cmd->t_task_cdbs_left),
37623 - atomic_read(&cmd->t_task_cdbs_sent),
37624 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37625 (cmd->transport_state & CMD_T_ACTIVE) != 0,
37626 (cmd->transport_state & CMD_T_STOP) != 0,
37627 (cmd->transport_state & CMD_T_SENT) != 0);
37628 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
37629 index 443704f..92d3517 100644
37630 --- a/drivers/target/target_core_transport.c
37631 +++ b/drivers/target/target_core_transport.c
37632 @@ -1355,7 +1355,7 @@ struct se_device *transport_add_device_to_core_hba(
37633 spin_lock_init(&dev->se_port_lock);
37634 spin_lock_init(&dev->se_tmr_lock);
37635 spin_lock_init(&dev->qf_cmd_lock);
37636 - atomic_set(&dev->dev_ordered_id, 0);
37637 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
37638
37639 se_dev_set_default_attribs(dev, dev_limits);
37640
37641 @@ -1542,7 +1542,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
37642 * Used to determine when ORDERED commands should go from
37643 * Dormant to Active status.
37644 */
37645 - cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
37646 + cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
37647 smp_mb__after_atomic_inc();
37648 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
37649 cmd->se_ordered_id, cmd->sam_task_attr,
37650 @@ -1956,7 +1956,7 @@ void transport_generic_request_failure(struct se_cmd *cmd)
37651 " CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
37652 cmd->t_task_list_num,
37653 atomic_read(&cmd->t_task_cdbs_left),
37654 - atomic_read(&cmd->t_task_cdbs_sent),
37655 + atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37656 atomic_read(&cmd->t_task_cdbs_ex_left),
37657 (cmd->transport_state & CMD_T_ACTIVE) != 0,
37658 (cmd->transport_state & CMD_T_STOP) != 0,
37659 @@ -2216,9 +2216,9 @@ check_depth:
37660 cmd = task->task_se_cmd;
37661 spin_lock_irqsave(&cmd->t_state_lock, flags);
37662 task->task_flags |= (TF_ACTIVE | TF_SENT);
37663 - atomic_inc(&cmd->t_task_cdbs_sent);
37664 + atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
37665
37666 - if (atomic_read(&cmd->t_task_cdbs_sent) ==
37667 + if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
37668 cmd->t_task_list_num)
37669 cmd->transport_state |= CMD_T_SENT;
37670
37671 diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
37672 index a375f25..da90f64 100644
37673 --- a/drivers/target/tcm_fc/tfc_cmd.c
37674 +++ b/drivers/target/tcm_fc/tfc_cmd.c
37675 @@ -240,6 +240,8 @@ u32 ft_get_task_tag(struct se_cmd *se_cmd)
37676 {
37677 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
37678
37679 + if (cmd->aborted)
37680 + return ~0;
37681 return fc_seq_exch(cmd->seq)->rxid;
37682 }
37683
37684 diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
37685 index 3436436..772237b 100644
37686 --- a/drivers/tty/hvc/hvcs.c
37687 +++ b/drivers/tty/hvc/hvcs.c
37688 @@ -83,6 +83,7 @@
37689 #include <asm/hvcserver.h>
37690 #include <asm/uaccess.h>
37691 #include <asm/vio.h>
37692 +#include <asm/local.h>
37693
37694 /*
37695 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
37696 @@ -270,7 +271,7 @@ struct hvcs_struct {
37697 unsigned int index;
37698
37699 struct tty_struct *tty;
37700 - int open_count;
37701 + local_t open_count;
37702
37703 /*
37704 * Used to tell the driver kernel_thread what operations need to take
37705 @@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
37706
37707 spin_lock_irqsave(&hvcsd->lock, flags);
37708
37709 - if (hvcsd->open_count > 0) {
37710 + if (local_read(&hvcsd->open_count) > 0) {
37711 spin_unlock_irqrestore(&hvcsd->lock, flags);
37712 printk(KERN_INFO "HVCS: vterm state unchanged. "
37713 "The hvcs device node is still in use.\n");
37714 @@ -1138,7 +1139,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
37715 if ((retval = hvcs_partner_connect(hvcsd)))
37716 goto error_release;
37717
37718 - hvcsd->open_count = 1;
37719 + local_set(&hvcsd->open_count, 1);
37720 hvcsd->tty = tty;
37721 tty->driver_data = hvcsd;
37722
37723 @@ -1172,7 +1173,7 @@ fast_open:
37724
37725 spin_lock_irqsave(&hvcsd->lock, flags);
37726 kref_get(&hvcsd->kref);
37727 - hvcsd->open_count++;
37728 + local_inc(&hvcsd->open_count);
37729 hvcsd->todo_mask |= HVCS_SCHED_READ;
37730 spin_unlock_irqrestore(&hvcsd->lock, flags);
37731
37732 @@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37733 hvcsd = tty->driver_data;
37734
37735 spin_lock_irqsave(&hvcsd->lock, flags);
37736 - if (--hvcsd->open_count == 0) {
37737 + if (local_dec_and_test(&hvcsd->open_count)) {
37738
37739 vio_disable_interrupts(hvcsd->vdev);
37740
37741 @@ -1242,10 +1243,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
37742 free_irq(irq, hvcsd);
37743 kref_put(&hvcsd->kref, destroy_hvcs_struct);
37744 return;
37745 - } else if (hvcsd->open_count < 0) {
37746 + } else if (local_read(&hvcsd->open_count) < 0) {
37747 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
37748 " is missmanaged.\n",
37749 - hvcsd->vdev->unit_address, hvcsd->open_count);
37750 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
37751 }
37752
37753 spin_unlock_irqrestore(&hvcsd->lock, flags);
37754 @@ -1261,7 +1262,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37755
37756 spin_lock_irqsave(&hvcsd->lock, flags);
37757 /* Preserve this so that we know how many kref refs to put */
37758 - temp_open_count = hvcsd->open_count;
37759 + temp_open_count = local_read(&hvcsd->open_count);
37760
37761 /*
37762 * Don't kref put inside the spinlock because the destruction
37763 @@ -1276,7 +1277,7 @@ static void hvcs_hangup(struct tty_struct * tty)
37764 hvcsd->tty->driver_data = NULL;
37765 hvcsd->tty = NULL;
37766
37767 - hvcsd->open_count = 0;
37768 + local_set(&hvcsd->open_count, 0);
37769
37770 /* This will drop any buffered data on the floor which is OK in a hangup
37771 * scenario. */
37772 @@ -1347,7 +1348,7 @@ static int hvcs_write(struct tty_struct *tty,
37773 * the middle of a write operation? This is a crummy place to do this
37774 * but we want to keep it all in the spinlock.
37775 */
37776 - if (hvcsd->open_count <= 0) {
37777 + if (local_read(&hvcsd->open_count) <= 0) {
37778 spin_unlock_irqrestore(&hvcsd->lock, flags);
37779 return -ENODEV;
37780 }
37781 @@ -1421,7 +1422,7 @@ static int hvcs_write_room(struct tty_struct *tty)
37782 {
37783 struct hvcs_struct *hvcsd = tty->driver_data;
37784
37785 - if (!hvcsd || hvcsd->open_count <= 0)
37786 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
37787 return 0;
37788
37789 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
37790 diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
37791 index 4daf962..b4a2281 100644
37792 --- a/drivers/tty/ipwireless/tty.c
37793 +++ b/drivers/tty/ipwireless/tty.c
37794 @@ -29,6 +29,7 @@
37795 #include <linux/tty_driver.h>
37796 #include <linux/tty_flip.h>
37797 #include <linux/uaccess.h>
37798 +#include <asm/local.h>
37799
37800 #include "tty.h"
37801 #include "network.h"
37802 @@ -51,7 +52,7 @@ struct ipw_tty {
37803 int tty_type;
37804 struct ipw_network *network;
37805 struct tty_struct *linux_tty;
37806 - int open_count;
37807 + local_t open_count;
37808 unsigned int control_lines;
37809 struct mutex ipw_tty_mutex;
37810 int tx_bytes_queued;
37811 @@ -117,10 +118,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
37812 mutex_unlock(&tty->ipw_tty_mutex);
37813 return -ENODEV;
37814 }
37815 - if (tty->open_count == 0)
37816 + if (local_read(&tty->open_count) == 0)
37817 tty->tx_bytes_queued = 0;
37818
37819 - tty->open_count++;
37820 + local_inc(&tty->open_count);
37821
37822 tty->linux_tty = linux_tty;
37823 linux_tty->driver_data = tty;
37824 @@ -136,9 +137,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
37825
37826 static void do_ipw_close(struct ipw_tty *tty)
37827 {
37828 - tty->open_count--;
37829 -
37830 - if (tty->open_count == 0) {
37831 + if (local_dec_return(&tty->open_count) == 0) {
37832 struct tty_struct *linux_tty = tty->linux_tty;
37833
37834 if (linux_tty != NULL) {
37835 @@ -159,7 +158,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
37836 return;
37837
37838 mutex_lock(&tty->ipw_tty_mutex);
37839 - if (tty->open_count == 0) {
37840 + if (local_read(&tty->open_count) == 0) {
37841 mutex_unlock(&tty->ipw_tty_mutex);
37842 return;
37843 }
37844 @@ -188,7 +187,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
37845 return;
37846 }
37847
37848 - if (!tty->open_count) {
37849 + if (!local_read(&tty->open_count)) {
37850 mutex_unlock(&tty->ipw_tty_mutex);
37851 return;
37852 }
37853 @@ -230,7 +229,7 @@ static int ipw_write(struct tty_struct *linux_tty,
37854 return -ENODEV;
37855
37856 mutex_lock(&tty->ipw_tty_mutex);
37857 - if (!tty->open_count) {
37858 + if (!local_read(&tty->open_count)) {
37859 mutex_unlock(&tty->ipw_tty_mutex);
37860 return -EINVAL;
37861 }
37862 @@ -270,7 +269,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
37863 if (!tty)
37864 return -ENODEV;
37865
37866 - if (!tty->open_count)
37867 + if (!local_read(&tty->open_count))
37868 return -EINVAL;
37869
37870 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
37871 @@ -312,7 +311,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
37872 if (!tty)
37873 return 0;
37874
37875 - if (!tty->open_count)
37876 + if (!local_read(&tty->open_count))
37877 return 0;
37878
37879 return tty->tx_bytes_queued;
37880 @@ -393,7 +392,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
37881 if (!tty)
37882 return -ENODEV;
37883
37884 - if (!tty->open_count)
37885 + if (!local_read(&tty->open_count))
37886 return -EINVAL;
37887
37888 return get_control_lines(tty);
37889 @@ -409,7 +408,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
37890 if (!tty)
37891 return -ENODEV;
37892
37893 - if (!tty->open_count)
37894 + if (!local_read(&tty->open_count))
37895 return -EINVAL;
37896
37897 return set_control_lines(tty, set, clear);
37898 @@ -423,7 +422,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
37899 if (!tty)
37900 return -ENODEV;
37901
37902 - if (!tty->open_count)
37903 + if (!local_read(&tty->open_count))
37904 return -EINVAL;
37905
37906 /* FIXME: Exactly how is the tty object locked here .. */
37907 @@ -572,7 +571,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
37908 against a parallel ioctl etc */
37909 mutex_lock(&ttyj->ipw_tty_mutex);
37910 }
37911 - while (ttyj->open_count)
37912 + while (local_read(&ttyj->open_count))
37913 do_ipw_close(ttyj);
37914 ipwireless_disassociate_network_ttys(network,
37915 ttyj->channel_idx);
37916 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
37917 index c43b683..0a88f1c 100644
37918 --- a/drivers/tty/n_gsm.c
37919 +++ b/drivers/tty/n_gsm.c
37920 @@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
37921 kref_init(&dlci->ref);
37922 mutex_init(&dlci->mutex);
37923 dlci->fifo = &dlci->_fifo;
37924 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
37925 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
37926 kfree(dlci);
37927 return NULL;
37928 }
37929 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
37930 index 94b6eda..15f7cec 100644
37931 --- a/drivers/tty/n_tty.c
37932 +++ b/drivers/tty/n_tty.c
37933 @@ -2122,6 +2122,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
37934 {
37935 *ops = tty_ldisc_N_TTY;
37936 ops->owner = NULL;
37937 - ops->refcount = ops->flags = 0;
37938 + atomic_set(&ops->refcount, 0);
37939 + ops->flags = 0;
37940 }
37941 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
37942 diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
37943 index eeae7fa..177a743 100644
37944 --- a/drivers/tty/pty.c
37945 +++ b/drivers/tty/pty.c
37946 @@ -707,8 +707,10 @@ static void __init unix98_pty_init(void)
37947 panic("Couldn't register Unix98 pts driver");
37948
37949 /* Now create the /dev/ptmx special device */
37950 + pax_open_kernel();
37951 tty_default_fops(&ptmx_fops);
37952 - ptmx_fops.open = ptmx_open;
37953 + *(void **)&ptmx_fops.open = ptmx_open;
37954 + pax_close_kernel();
37955
37956 cdev_init(&ptmx_cdev, &ptmx_fops);
37957 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
37958 diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
37959 index 2b42a01..32a2ed3 100644
37960 --- a/drivers/tty/serial/kgdboc.c
37961 +++ b/drivers/tty/serial/kgdboc.c
37962 @@ -24,8 +24,9 @@
37963 #define MAX_CONFIG_LEN 40
37964
37965 static struct kgdb_io kgdboc_io_ops;
37966 +static struct kgdb_io kgdboc_io_ops_console;
37967
37968 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
37969 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
37970 static int configured = -1;
37971
37972 static char config[MAX_CONFIG_LEN];
37973 @@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
37974 kgdboc_unregister_kbd();
37975 if (configured == 1)
37976 kgdb_unregister_io_module(&kgdboc_io_ops);
37977 + else if (configured == 2)
37978 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
37979 }
37980
37981 static int configure_kgdboc(void)
37982 @@ -157,13 +160,13 @@ static int configure_kgdboc(void)
37983 int err;
37984 char *cptr = config;
37985 struct console *cons;
37986 + int is_console = 0;
37987
37988 err = kgdboc_option_setup(config);
37989 if (err || !strlen(config) || isspace(config[0]))
37990 goto noconfig;
37991
37992 err = -ENODEV;
37993 - kgdboc_io_ops.is_console = 0;
37994 kgdb_tty_driver = NULL;
37995
37996 kgdboc_use_kms = 0;
37997 @@ -184,7 +187,7 @@ static int configure_kgdboc(void)
37998 int idx;
37999 if (cons->device && cons->device(cons, &idx) == p &&
38000 idx == tty_line) {
38001 - kgdboc_io_ops.is_console = 1;
38002 + is_console = 1;
38003 break;
38004 }
38005 cons = cons->next;
38006 @@ -194,12 +197,16 @@ static int configure_kgdboc(void)
38007 kgdb_tty_line = tty_line;
38008
38009 do_register:
38010 - err = kgdb_register_io_module(&kgdboc_io_ops);
38011 + if (is_console) {
38012 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
38013 + configured = 2;
38014 + } else {
38015 + err = kgdb_register_io_module(&kgdboc_io_ops);
38016 + configured = 1;
38017 + }
38018 if (err)
38019 goto noconfig;
38020
38021 - configured = 1;
38022 -
38023 return 0;
38024
38025 noconfig:
38026 @@ -213,7 +220,7 @@ noconfig:
38027 static int __init init_kgdboc(void)
38028 {
38029 /* Already configured? */
38030 - if (configured == 1)
38031 + if (configured >= 1)
38032 return 0;
38033
38034 return configure_kgdboc();
38035 @@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
38036 if (config[len - 1] == '\n')
38037 config[len - 1] = '\0';
38038
38039 - if (configured == 1)
38040 + if (configured >= 1)
38041 cleanup_kgdboc();
38042
38043 /* Go and configure with the new params. */
38044 @@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
38045 .post_exception = kgdboc_post_exp_handler,
38046 };
38047
38048 +static struct kgdb_io kgdboc_io_ops_console = {
38049 + .name = "kgdboc",
38050 + .read_char = kgdboc_get_char,
38051 + .write_char = kgdboc_put_char,
38052 + .pre_exception = kgdboc_pre_exp_handler,
38053 + .post_exception = kgdboc_post_exp_handler,
38054 + .is_console = 1
38055 +};
38056 +
38057 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
38058 /* This is only available if kgdboc is a built in for early debugging */
38059 static int __init kgdboc_early_init(char *opt)
38060 diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
38061 index 05728894..b9d44c6 100644
38062 --- a/drivers/tty/sysrq.c
38063 +++ b/drivers/tty/sysrq.c
38064 @@ -865,7 +865,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
38065 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
38066 size_t count, loff_t *ppos)
38067 {
38068 - if (count) {
38069 + if (count && capable(CAP_SYS_ADMIN)) {
38070 char c;
38071
38072 if (get_user(c, buf))
38073 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
38074 index d939bd7..33d92cd 100644
38075 --- a/drivers/tty/tty_io.c
38076 +++ b/drivers/tty/tty_io.c
38077 @@ -3278,7 +3278,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
38078
38079 void tty_default_fops(struct file_operations *fops)
38080 {
38081 - *fops = tty_fops;
38082 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
38083 }
38084
38085 /*
38086 diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
38087 index 24b95db..9c078d0 100644
38088 --- a/drivers/tty/tty_ldisc.c
38089 +++ b/drivers/tty/tty_ldisc.c
38090 @@ -57,7 +57,7 @@ static void put_ldisc(struct tty_ldisc *ld)
38091 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
38092 struct tty_ldisc_ops *ldo = ld->ops;
38093
38094 - ldo->refcount--;
38095 + atomic_dec(&ldo->refcount);
38096 module_put(ldo->owner);
38097 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38098
38099 @@ -92,7 +92,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
38100 spin_lock_irqsave(&tty_ldisc_lock, flags);
38101 tty_ldiscs[disc] = new_ldisc;
38102 new_ldisc->num = disc;
38103 - new_ldisc->refcount = 0;
38104 + atomic_set(&new_ldisc->refcount, 0);
38105 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38106
38107 return ret;
38108 @@ -120,7 +120,7 @@ int tty_unregister_ldisc(int disc)
38109 return -EINVAL;
38110
38111 spin_lock_irqsave(&tty_ldisc_lock, flags);
38112 - if (tty_ldiscs[disc]->refcount)
38113 + if (atomic_read(&tty_ldiscs[disc]->refcount))
38114 ret = -EBUSY;
38115 else
38116 tty_ldiscs[disc] = NULL;
38117 @@ -141,7 +141,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
38118 if (ldops) {
38119 ret = ERR_PTR(-EAGAIN);
38120 if (try_module_get(ldops->owner)) {
38121 - ldops->refcount++;
38122 + atomic_inc(&ldops->refcount);
38123 ret = ldops;
38124 }
38125 }
38126 @@ -154,7 +154,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
38127 unsigned long flags;
38128
38129 spin_lock_irqsave(&tty_ldisc_lock, flags);
38130 - ldops->refcount--;
38131 + atomic_dec(&ldops->refcount);
38132 module_put(ldops->owner);
38133 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38134 }
38135 diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
38136 index 3b0c4e3..f98a992 100644
38137 --- a/drivers/tty/vt/keyboard.c
38138 +++ b/drivers/tty/vt/keyboard.c
38139 @@ -663,6 +663,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
38140 kbd->kbdmode == VC_OFF) &&
38141 value != KVAL(K_SAK))
38142 return; /* SAK is allowed even in raw mode */
38143 +
38144 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
38145 + {
38146 + void *func = fn_handler[value];
38147 + if (func == fn_show_state || func == fn_show_ptregs ||
38148 + func == fn_show_mem)
38149 + return;
38150 + }
38151 +#endif
38152 +
38153 fn_handler[value](vc);
38154 }
38155
38156 @@ -1812,9 +1822,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
38157 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
38158 return -EFAULT;
38159
38160 - if (!capable(CAP_SYS_TTY_CONFIG))
38161 - perm = 0;
38162 -
38163 switch (cmd) {
38164 case KDGKBENT:
38165 /* Ensure another thread doesn't free it under us */
38166 @@ -1829,6 +1836,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
38167 spin_unlock_irqrestore(&kbd_event_lock, flags);
38168 return put_user(val, &user_kbe->kb_value);
38169 case KDSKBENT:
38170 + if (!capable(CAP_SYS_TTY_CONFIG))
38171 + perm = 0;
38172 +
38173 if (!perm)
38174 return -EPERM;
38175 if (!i && v == K_NOSUCHMAP) {
38176 @@ -1919,9 +1929,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
38177 int i, j, k;
38178 int ret;
38179
38180 - if (!capable(CAP_SYS_TTY_CONFIG))
38181 - perm = 0;
38182 -
38183 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
38184 if (!kbs) {
38185 ret = -ENOMEM;
38186 @@ -1955,6 +1962,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
38187 kfree(kbs);
38188 return ((p && *p) ? -EOVERFLOW : 0);
38189 case KDSKBSENT:
38190 + if (!capable(CAP_SYS_TTY_CONFIG))
38191 + perm = 0;
38192 +
38193 if (!perm) {
38194 ret = -EPERM;
38195 goto reterr;
38196 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
38197 index a783d53..cb30d94 100644
38198 --- a/drivers/uio/uio.c
38199 +++ b/drivers/uio/uio.c
38200 @@ -25,6 +25,7 @@
38201 #include <linux/kobject.h>
38202 #include <linux/cdev.h>
38203 #include <linux/uio_driver.h>
38204 +#include <asm/local.h>
38205
38206 #define UIO_MAX_DEVICES (1U << MINORBITS)
38207
38208 @@ -32,10 +33,10 @@ struct uio_device {
38209 struct module *owner;
38210 struct device *dev;
38211 int minor;
38212 - atomic_t event;
38213 + atomic_unchecked_t event;
38214 struct fasync_struct *async_queue;
38215 wait_queue_head_t wait;
38216 - int vma_count;
38217 + local_t vma_count;
38218 struct uio_info *info;
38219 struct kobject *map_dir;
38220 struct kobject *portio_dir;
38221 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
38222 struct device_attribute *attr, char *buf)
38223 {
38224 struct uio_device *idev = dev_get_drvdata(dev);
38225 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
38226 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
38227 }
38228
38229 static struct device_attribute uio_class_attributes[] = {
38230 @@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
38231 {
38232 struct uio_device *idev = info->uio_dev;
38233
38234 - atomic_inc(&idev->event);
38235 + atomic_inc_unchecked(&idev->event);
38236 wake_up_interruptible(&idev->wait);
38237 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
38238 }
38239 @@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
38240 }
38241
38242 listener->dev = idev;
38243 - listener->event_count = atomic_read(&idev->event);
38244 + listener->event_count = atomic_read_unchecked(&idev->event);
38245 filep->private_data = listener;
38246
38247 if (idev->info->open) {
38248 @@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
38249 return -EIO;
38250
38251 poll_wait(filep, &idev->wait, wait);
38252 - if (listener->event_count != atomic_read(&idev->event))
38253 + if (listener->event_count != atomic_read_unchecked(&idev->event))
38254 return POLLIN | POLLRDNORM;
38255 return 0;
38256 }
38257 @@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
38258 do {
38259 set_current_state(TASK_INTERRUPTIBLE);
38260
38261 - event_count = atomic_read(&idev->event);
38262 + event_count = atomic_read_unchecked(&idev->event);
38263 if (event_count != listener->event_count) {
38264 if (copy_to_user(buf, &event_count, count))
38265 retval = -EFAULT;
38266 @@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
38267 static void uio_vma_open(struct vm_area_struct *vma)
38268 {
38269 struct uio_device *idev = vma->vm_private_data;
38270 - idev->vma_count++;
38271 + local_inc(&idev->vma_count);
38272 }
38273
38274 static void uio_vma_close(struct vm_area_struct *vma)
38275 {
38276 struct uio_device *idev = vma->vm_private_data;
38277 - idev->vma_count--;
38278 + local_dec(&idev->vma_count);
38279 }
38280
38281 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
38282 @@ -821,7 +822,7 @@ int __uio_register_device(struct module *owner,
38283 idev->owner = owner;
38284 idev->info = info;
38285 init_waitqueue_head(&idev->wait);
38286 - atomic_set(&idev->event, 0);
38287 + atomic_set_unchecked(&idev->event, 0);
38288
38289 ret = uio_get_minor(idev);
38290 if (ret)
38291 diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
38292 index 98b89fe..aff824e 100644
38293 --- a/drivers/usb/atm/cxacru.c
38294 +++ b/drivers/usb/atm/cxacru.c
38295 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
38296 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
38297 if (ret < 2)
38298 return -EINVAL;
38299 - if (index < 0 || index > 0x7f)
38300 + if (index > 0x7f)
38301 return -EINVAL;
38302 pos += tmp;
38303
38304 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
38305 index d3448ca..d2864ca 100644
38306 --- a/drivers/usb/atm/usbatm.c
38307 +++ b/drivers/usb/atm/usbatm.c
38308 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38309 if (printk_ratelimit())
38310 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
38311 __func__, vpi, vci);
38312 - atomic_inc(&vcc->stats->rx_err);
38313 + atomic_inc_unchecked(&vcc->stats->rx_err);
38314 return;
38315 }
38316
38317 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38318 if (length > ATM_MAX_AAL5_PDU) {
38319 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
38320 __func__, length, vcc);
38321 - atomic_inc(&vcc->stats->rx_err);
38322 + atomic_inc_unchecked(&vcc->stats->rx_err);
38323 goto out;
38324 }
38325
38326 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38327 if (sarb->len < pdu_length) {
38328 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
38329 __func__, pdu_length, sarb->len, vcc);
38330 - atomic_inc(&vcc->stats->rx_err);
38331 + atomic_inc_unchecked(&vcc->stats->rx_err);
38332 goto out;
38333 }
38334
38335 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
38336 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
38337 __func__, vcc);
38338 - atomic_inc(&vcc->stats->rx_err);
38339 + atomic_inc_unchecked(&vcc->stats->rx_err);
38340 goto out;
38341 }
38342
38343 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38344 if (printk_ratelimit())
38345 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
38346 __func__, length);
38347 - atomic_inc(&vcc->stats->rx_drop);
38348 + atomic_inc_unchecked(&vcc->stats->rx_drop);
38349 goto out;
38350 }
38351
38352 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
38353
38354 vcc->push(vcc, skb);
38355
38356 - atomic_inc(&vcc->stats->rx);
38357 + atomic_inc_unchecked(&vcc->stats->rx);
38358 out:
38359 skb_trim(sarb, 0);
38360 }
38361 @@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
38362 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
38363
38364 usbatm_pop(vcc, skb);
38365 - atomic_inc(&vcc->stats->tx);
38366 + atomic_inc_unchecked(&vcc->stats->tx);
38367
38368 skb = skb_dequeue(&instance->sndqueue);
38369 }
38370 @@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
38371 if (!left--)
38372 return sprintf(page,
38373 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
38374 - atomic_read(&atm_dev->stats.aal5.tx),
38375 - atomic_read(&atm_dev->stats.aal5.tx_err),
38376 - atomic_read(&atm_dev->stats.aal5.rx),
38377 - atomic_read(&atm_dev->stats.aal5.rx_err),
38378 - atomic_read(&atm_dev->stats.aal5.rx_drop));
38379 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
38380 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
38381 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
38382 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
38383 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
38384
38385 if (!left--) {
38386 if (instance->disconnected)
38387 diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
38388 index d956965..4179a77 100644
38389 --- a/drivers/usb/core/devices.c
38390 +++ b/drivers/usb/core/devices.c
38391 @@ -126,7 +126,7 @@ static const char format_endpt[] =
38392 * time it gets called.
38393 */
38394 static struct device_connect_event {
38395 - atomic_t count;
38396 + atomic_unchecked_t count;
38397 wait_queue_head_t wait;
38398 } device_event = {
38399 .count = ATOMIC_INIT(1),
38400 @@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
38401
38402 void usbfs_conn_disc_event(void)
38403 {
38404 - atomic_add(2, &device_event.count);
38405 + atomic_add_unchecked(2, &device_event.count);
38406 wake_up(&device_event.wait);
38407 }
38408
38409 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
38410
38411 poll_wait(file, &device_event.wait, wait);
38412
38413 - event_count = atomic_read(&device_event.count);
38414 + event_count = atomic_read_unchecked(&device_event.count);
38415 if (file->f_version != event_count) {
38416 file->f_version = event_count;
38417 return POLLIN | POLLRDNORM;
38418 diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
38419 index 1fc8f12..20647c1 100644
38420 --- a/drivers/usb/early/ehci-dbgp.c
38421 +++ b/drivers/usb/early/ehci-dbgp.c
38422 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
38423
38424 #ifdef CONFIG_KGDB
38425 static struct kgdb_io kgdbdbgp_io_ops;
38426 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
38427 +static struct kgdb_io kgdbdbgp_io_ops_console;
38428 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
38429 #else
38430 #define dbgp_kgdb_mode (0)
38431 #endif
38432 @@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
38433 .write_char = kgdbdbgp_write_char,
38434 };
38435
38436 +static struct kgdb_io kgdbdbgp_io_ops_console = {
38437 + .name = "kgdbdbgp",
38438 + .read_char = kgdbdbgp_read_char,
38439 + .write_char = kgdbdbgp_write_char,
38440 + .is_console = 1
38441 +};
38442 +
38443 static int kgdbdbgp_wait_time;
38444
38445 static int __init kgdbdbgp_parse_config(char *str)
38446 @@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
38447 ptr++;
38448 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
38449 }
38450 - kgdb_register_io_module(&kgdbdbgp_io_ops);
38451 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
38452 + if (early_dbgp_console.index != -1)
38453 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
38454 + else
38455 + kgdb_register_io_module(&kgdbdbgp_io_ops);
38456
38457 return 0;
38458 }
38459 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
38460 index d6bea3e..60b250e 100644
38461 --- a/drivers/usb/wusbcore/wa-hc.h
38462 +++ b/drivers/usb/wusbcore/wa-hc.h
38463 @@ -192,7 +192,7 @@ struct wahc {
38464 struct list_head xfer_delayed_list;
38465 spinlock_t xfer_list_lock;
38466 struct work_struct xfer_work;
38467 - atomic_t xfer_id_count;
38468 + atomic_unchecked_t xfer_id_count;
38469 };
38470
38471
38472 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
38473 INIT_LIST_HEAD(&wa->xfer_delayed_list);
38474 spin_lock_init(&wa->xfer_list_lock);
38475 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
38476 - atomic_set(&wa->xfer_id_count, 1);
38477 + atomic_set_unchecked(&wa->xfer_id_count, 1);
38478 }
38479
38480 /**
38481 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
38482 index 57c01ab..8a05959 100644
38483 --- a/drivers/usb/wusbcore/wa-xfer.c
38484 +++ b/drivers/usb/wusbcore/wa-xfer.c
38485 @@ -296,7 +296,7 @@ out:
38486 */
38487 static void wa_xfer_id_init(struct wa_xfer *xfer)
38488 {
38489 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
38490 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
38491 }
38492
38493 /*
38494 diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
38495 index 51e4c1e..9d87e2a 100644
38496 --- a/drivers/vhost/vhost.c
38497 +++ b/drivers/vhost/vhost.c
38498 @@ -632,7 +632,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
38499 return 0;
38500 }
38501
38502 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
38503 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
38504 {
38505 struct file *eventfp, *filep = NULL,
38506 *pollstart = NULL, *pollstop = NULL;
38507 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
38508 index b0b2ac3..89a4399 100644
38509 --- a/drivers/video/aty/aty128fb.c
38510 +++ b/drivers/video/aty/aty128fb.c
38511 @@ -148,7 +148,7 @@ enum {
38512 };
38513
38514 /* Must match above enum */
38515 -static const char *r128_family[] __devinitdata = {
38516 +static const char *r128_family[] __devinitconst = {
38517 "AGP",
38518 "PCI",
38519 "PRO AGP",
38520 diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
38521 index 5c3960d..15cf8fc 100644
38522 --- a/drivers/video/fbcmap.c
38523 +++ b/drivers/video/fbcmap.c
38524 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
38525 rc = -ENODEV;
38526 goto out;
38527 }
38528 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
38529 - !info->fbops->fb_setcmap)) {
38530 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
38531 rc = -EINVAL;
38532 goto out1;
38533 }
38534 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
38535 index c6ce416..3b9b642 100644
38536 --- a/drivers/video/fbmem.c
38537 +++ b/drivers/video/fbmem.c
38538 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
38539 image->dx += image->width + 8;
38540 }
38541 } else if (rotate == FB_ROTATE_UD) {
38542 - for (x = 0; x < num && image->dx >= 0; x++) {
38543 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
38544 info->fbops->fb_imageblit(info, image);
38545 image->dx -= image->width + 8;
38546 }
38547 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
38548 image->dy += image->height + 8;
38549 }
38550 } else if (rotate == FB_ROTATE_CCW) {
38551 - for (x = 0; x < num && image->dy >= 0; x++) {
38552 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
38553 info->fbops->fb_imageblit(info, image);
38554 image->dy -= image->height + 8;
38555 }
38556 @@ -1157,7 +1157,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
38557 return -EFAULT;
38558 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
38559 return -EINVAL;
38560 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
38561 + if (con2fb.framebuffer >= FB_MAX)
38562 return -EINVAL;
38563 if (!registered_fb[con2fb.framebuffer])
38564 request_module("fb%d", con2fb.framebuffer);
38565 diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
38566 index 5a5d092..265c5ed 100644
38567 --- a/drivers/video/geode/gx1fb_core.c
38568 +++ b/drivers/video/geode/gx1fb_core.c
38569 @@ -29,7 +29,7 @@ static int crt_option = 1;
38570 static char panel_option[32] = "";
38571
38572 /* Modes relevant to the GX1 (taken from modedb.c) */
38573 -static const struct fb_videomode __devinitdata gx1_modedb[] = {
38574 +static const struct fb_videomode __devinitconst gx1_modedb[] = {
38575 /* 640x480-60 VESA */
38576 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
38577 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
38578 diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
38579 index 0fad23f..0e9afa4 100644
38580 --- a/drivers/video/gxt4500.c
38581 +++ b/drivers/video/gxt4500.c
38582 @@ -156,7 +156,7 @@ struct gxt4500_par {
38583 static char *mode_option;
38584
38585 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
38586 -static const struct fb_videomode defaultmode __devinitdata = {
38587 +static const struct fb_videomode defaultmode __devinitconst = {
38588 .refresh = 60,
38589 .xres = 1280,
38590 .yres = 1024,
38591 @@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
38592 return 0;
38593 }
38594
38595 -static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
38596 +static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
38597 .id = "IBM GXT4500P",
38598 .type = FB_TYPE_PACKED_PIXELS,
38599 .visual = FB_VISUAL_PSEUDOCOLOR,
38600 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
38601 index 7672d2e..b56437f 100644
38602 --- a/drivers/video/i810/i810_accel.c
38603 +++ b/drivers/video/i810/i810_accel.c
38604 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
38605 }
38606 }
38607 printk("ringbuffer lockup!!!\n");
38608 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
38609 i810_report_error(mmio);
38610 par->dev_flags |= LOCKUP;
38611 info->pixmap.scan_align = 1;
38612 diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
38613 index b83f361..2b05a91 100644
38614 --- a/drivers/video/i810/i810_main.c
38615 +++ b/drivers/video/i810/i810_main.c
38616 @@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
38617 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
38618
38619 /* PCI */
38620 -static const char *i810_pci_list[] __devinitdata = {
38621 +static const char *i810_pci_list[] __devinitconst = {
38622 "Intel(R) 810 Framebuffer Device" ,
38623 "Intel(R) 810-DC100 Framebuffer Device" ,
38624 "Intel(R) 810E Framebuffer Device" ,
38625 diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
38626 index de36693..3c63fc2 100644
38627 --- a/drivers/video/jz4740_fb.c
38628 +++ b/drivers/video/jz4740_fb.c
38629 @@ -136,7 +136,7 @@ struct jzfb {
38630 uint32_t pseudo_palette[16];
38631 };
38632
38633 -static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
38634 +static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
38635 .id = "JZ4740 FB",
38636 .type = FB_TYPE_PACKED_PIXELS,
38637 .visual = FB_VISUAL_TRUECOLOR,
38638 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
38639 index 3c14e43..eafa544 100644
38640 --- a/drivers/video/logo/logo_linux_clut224.ppm
38641 +++ b/drivers/video/logo/logo_linux_clut224.ppm
38642 @@ -1,1604 +1,1123 @@
38643 P3
38644 -# Standard 224-color Linux logo
38645 80 80
38646 255
38647 - 0 0 0 0 0 0 0 0 0 0 0 0
38648 - 0 0 0 0 0 0 0 0 0 0 0 0
38649 - 0 0 0 0 0 0 0 0 0 0 0 0
38650 - 0 0 0 0 0 0 0 0 0 0 0 0
38651 - 0 0 0 0 0 0 0 0 0 0 0 0
38652 - 0 0 0 0 0 0 0 0 0 0 0 0
38653 - 0 0 0 0 0 0 0 0 0 0 0 0
38654 - 0 0 0 0 0 0 0 0 0 0 0 0
38655 - 0 0 0 0 0 0 0 0 0 0 0 0
38656 - 6 6 6 6 6 6 10 10 10 10 10 10
38657 - 10 10 10 6 6 6 6 6 6 6 6 6
38658 - 0 0 0 0 0 0 0 0 0 0 0 0
38659 - 0 0 0 0 0 0 0 0 0 0 0 0
38660 - 0 0 0 0 0 0 0 0 0 0 0 0
38661 - 0 0 0 0 0 0 0 0 0 0 0 0
38662 - 0 0 0 0 0 0 0 0 0 0 0 0
38663 - 0 0 0 0 0 0 0 0 0 0 0 0
38664 - 0 0 0 0 0 0 0 0 0 0 0 0
38665 - 0 0 0 0 0 0 0 0 0 0 0 0
38666 - 0 0 0 0 0 0 0 0 0 0 0 0
38667 - 0 0 0 0 0 0 0 0 0 0 0 0
38668 - 0 0 0 0 0 0 0 0 0 0 0 0
38669 - 0 0 0 0 0 0 0 0 0 0 0 0
38670 - 0 0 0 0 0 0 0 0 0 0 0 0
38671 - 0 0 0 0 0 0 0 0 0 0 0 0
38672 - 0 0 0 0 0 0 0 0 0 0 0 0
38673 - 0 0 0 0 0 0 0 0 0 0 0 0
38674 - 0 0 0 0 0 0 0 0 0 0 0 0
38675 - 0 0 0 6 6 6 10 10 10 14 14 14
38676 - 22 22 22 26 26 26 30 30 30 34 34 34
38677 - 30 30 30 30 30 30 26 26 26 18 18 18
38678 - 14 14 14 10 10 10 6 6 6 0 0 0
38679 - 0 0 0 0 0 0 0 0 0 0 0 0
38680 - 0 0 0 0 0 0 0 0 0 0 0 0
38681 - 0 0 0 0 0 0 0 0 0 0 0 0
38682 - 0 0 0 0 0 0 0 0 0 0 0 0
38683 - 0 0 0 0 0 0 0 0 0 0 0 0
38684 - 0 0 0 0 0 0 0 0 0 0 0 0
38685 - 0 0 0 0 0 0 0 0 0 0 0 0
38686 - 0 0 0 0 0 0 0 0 0 0 0 0
38687 - 0 0 0 0 0 0 0 0 0 0 0 0
38688 - 0 0 0 0 0 1 0 0 1 0 0 0
38689 - 0 0 0 0 0 0 0 0 0 0 0 0
38690 - 0 0 0 0 0 0 0 0 0 0 0 0
38691 - 0 0 0 0 0 0 0 0 0 0 0 0
38692 - 0 0 0 0 0 0 0 0 0 0 0 0
38693 - 0 0 0 0 0 0 0 0 0 0 0 0
38694 - 0 0 0 0 0 0 0 0 0 0 0 0
38695 - 6 6 6 14 14 14 26 26 26 42 42 42
38696 - 54 54 54 66 66 66 78 78 78 78 78 78
38697 - 78 78 78 74 74 74 66 66 66 54 54 54
38698 - 42 42 42 26 26 26 18 18 18 10 10 10
38699 - 6 6 6 0 0 0 0 0 0 0 0 0
38700 - 0 0 0 0 0 0 0 0 0 0 0 0
38701 - 0 0 0 0 0 0 0 0 0 0 0 0
38702 - 0 0 0 0 0 0 0 0 0 0 0 0
38703 - 0 0 0 0 0 0 0 0 0 0 0 0
38704 - 0 0 0 0 0 0 0 0 0 0 0 0
38705 - 0 0 0 0 0 0 0 0 0 0 0 0
38706 - 0 0 0 0 0 0 0 0 0 0 0 0
38707 - 0 0 0 0 0 0 0 0 0 0 0 0
38708 - 0 0 1 0 0 0 0 0 0 0 0 0
38709 - 0 0 0 0 0 0 0 0 0 0 0 0
38710 - 0 0 0 0 0 0 0 0 0 0 0 0
38711 - 0 0 0 0 0 0 0 0 0 0 0 0
38712 - 0 0 0 0 0 0 0 0 0 0 0 0
38713 - 0 0 0 0 0 0 0 0 0 0 0 0
38714 - 0 0 0 0 0 0 0 0 0 10 10 10
38715 - 22 22 22 42 42 42 66 66 66 86 86 86
38716 - 66 66 66 38 38 38 38 38 38 22 22 22
38717 - 26 26 26 34 34 34 54 54 54 66 66 66
38718 - 86 86 86 70 70 70 46 46 46 26 26 26
38719 - 14 14 14 6 6 6 0 0 0 0 0 0
38720 - 0 0 0 0 0 0 0 0 0 0 0 0
38721 - 0 0 0 0 0 0 0 0 0 0 0 0
38722 - 0 0 0 0 0 0 0 0 0 0 0 0
38723 - 0 0 0 0 0 0 0 0 0 0 0 0
38724 - 0 0 0 0 0 0 0 0 0 0 0 0
38725 - 0 0 0 0 0 0 0 0 0 0 0 0
38726 - 0 0 0 0 0 0 0 0 0 0 0 0
38727 - 0 0 0 0 0 0 0 0 0 0 0 0
38728 - 0 0 1 0 0 1 0 0 1 0 0 0
38729 - 0 0 0 0 0 0 0 0 0 0 0 0
38730 - 0 0 0 0 0 0 0 0 0 0 0 0
38731 - 0 0 0 0 0 0 0 0 0 0 0 0
38732 - 0 0 0 0 0 0 0 0 0 0 0 0
38733 - 0 0 0 0 0 0 0 0 0 0 0 0
38734 - 0 0 0 0 0 0 10 10 10 26 26 26
38735 - 50 50 50 82 82 82 58 58 58 6 6 6
38736 - 2 2 6 2 2 6 2 2 6 2 2 6
38737 - 2 2 6 2 2 6 2 2 6 2 2 6
38738 - 6 6 6 54 54 54 86 86 86 66 66 66
38739 - 38 38 38 18 18 18 6 6 6 0 0 0
38740 - 0 0 0 0 0 0 0 0 0 0 0 0
38741 - 0 0 0 0 0 0 0 0 0 0 0 0
38742 - 0 0 0 0 0 0 0 0 0 0 0 0
38743 - 0 0 0 0 0 0 0 0 0 0 0 0
38744 - 0 0 0 0 0 0 0 0 0 0 0 0
38745 - 0 0 0 0 0 0 0 0 0 0 0 0
38746 - 0 0 0 0 0 0 0 0 0 0 0 0
38747 - 0 0 0 0 0 0 0 0 0 0 0 0
38748 - 0 0 0 0 0 0 0 0 0 0 0 0
38749 - 0 0 0 0 0 0 0 0 0 0 0 0
38750 - 0 0 0 0 0 0 0 0 0 0 0 0
38751 - 0 0 0 0 0 0 0 0 0 0 0 0
38752 - 0 0 0 0 0 0 0 0 0 0 0 0
38753 - 0 0 0 0 0 0 0 0 0 0 0 0
38754 - 0 0 0 6 6 6 22 22 22 50 50 50
38755 - 78 78 78 34 34 34 2 2 6 2 2 6
38756 - 2 2 6 2 2 6 2 2 6 2 2 6
38757 - 2 2 6 2 2 6 2 2 6 2 2 6
38758 - 2 2 6 2 2 6 6 6 6 70 70 70
38759 - 78 78 78 46 46 46 22 22 22 6 6 6
38760 - 0 0 0 0 0 0 0 0 0 0 0 0
38761 - 0 0 0 0 0 0 0 0 0 0 0 0
38762 - 0 0 0 0 0 0 0 0 0 0 0 0
38763 - 0 0 0 0 0 0 0 0 0 0 0 0
38764 - 0 0 0 0 0 0 0 0 0 0 0 0
38765 - 0 0 0 0 0 0 0 0 0 0 0 0
38766 - 0 0 0 0 0 0 0 0 0 0 0 0
38767 - 0 0 0 0 0 0 0 0 0 0 0 0
38768 - 0 0 1 0 0 1 0 0 1 0 0 0
38769 - 0 0 0 0 0 0 0 0 0 0 0 0
38770 - 0 0 0 0 0 0 0 0 0 0 0 0
38771 - 0 0 0 0 0 0 0 0 0 0 0 0
38772 - 0 0 0 0 0 0 0 0 0 0 0 0
38773 - 0 0 0 0 0 0 0 0 0 0 0 0
38774 - 6 6 6 18 18 18 42 42 42 82 82 82
38775 - 26 26 26 2 2 6 2 2 6 2 2 6
38776 - 2 2 6 2 2 6 2 2 6 2 2 6
38777 - 2 2 6 2 2 6 2 2 6 14 14 14
38778 - 46 46 46 34 34 34 6 6 6 2 2 6
38779 - 42 42 42 78 78 78 42 42 42 18 18 18
38780 - 6 6 6 0 0 0 0 0 0 0 0 0
38781 - 0 0 0 0 0 0 0 0 0 0 0 0
38782 - 0 0 0 0 0 0 0 0 0 0 0 0
38783 - 0 0 0 0 0 0 0 0 0 0 0 0
38784 - 0 0 0 0 0 0 0 0 0 0 0 0
38785 - 0 0 0 0 0 0 0 0 0 0 0 0
38786 - 0 0 0 0 0 0 0 0 0 0 0 0
38787 - 0 0 0 0 0 0 0 0 0 0 0 0
38788 - 0 0 1 0 0 0 0 0 1 0 0 0
38789 - 0 0 0 0 0 0 0 0 0 0 0 0
38790 - 0 0 0 0 0 0 0 0 0 0 0 0
38791 - 0 0 0 0 0 0 0 0 0 0 0 0
38792 - 0 0 0 0 0 0 0 0 0 0 0 0
38793 - 0 0 0 0 0 0 0 0 0 0 0 0
38794 - 10 10 10 30 30 30 66 66 66 58 58 58
38795 - 2 2 6 2 2 6 2 2 6 2 2 6
38796 - 2 2 6 2 2 6 2 2 6 2 2 6
38797 - 2 2 6 2 2 6 2 2 6 26 26 26
38798 - 86 86 86 101 101 101 46 46 46 10 10 10
38799 - 2 2 6 58 58 58 70 70 70 34 34 34
38800 - 10 10 10 0 0 0 0 0 0 0 0 0
38801 - 0 0 0 0 0 0 0 0 0 0 0 0
38802 - 0 0 0 0 0 0 0 0 0 0 0 0
38803 - 0 0 0 0 0 0 0 0 0 0 0 0
38804 - 0 0 0 0 0 0 0 0 0 0 0 0
38805 - 0 0 0 0 0 0 0 0 0 0 0 0
38806 - 0 0 0 0 0 0 0 0 0 0 0 0
38807 - 0 0 0 0 0 0 0 0 0 0 0 0
38808 - 0 0 1 0 0 1 0 0 1 0 0 0
38809 - 0 0 0 0 0 0 0 0 0 0 0 0
38810 - 0 0 0 0 0 0 0 0 0 0 0 0
38811 - 0 0 0 0 0 0 0 0 0 0 0 0
38812 - 0 0 0 0 0 0 0 0 0 0 0 0
38813 - 0 0 0 0 0 0 0 0 0 0 0 0
38814 - 14 14 14 42 42 42 86 86 86 10 10 10
38815 - 2 2 6 2 2 6 2 2 6 2 2 6
38816 - 2 2 6 2 2 6 2 2 6 2 2 6
38817 - 2 2 6 2 2 6 2 2 6 30 30 30
38818 - 94 94 94 94 94 94 58 58 58 26 26 26
38819 - 2 2 6 6 6 6 78 78 78 54 54 54
38820 - 22 22 22 6 6 6 0 0 0 0 0 0
38821 - 0 0 0 0 0 0 0 0 0 0 0 0
38822 - 0 0 0 0 0 0 0 0 0 0 0 0
38823 - 0 0 0 0 0 0 0 0 0 0 0 0
38824 - 0 0 0 0 0 0 0 0 0 0 0 0
38825 - 0 0 0 0 0 0 0 0 0 0 0 0
38826 - 0 0 0 0 0 0 0 0 0 0 0 0
38827 - 0 0 0 0 0 0 0 0 0 0 0 0
38828 - 0 0 0 0 0 0 0 0 0 0 0 0
38829 - 0 0 0 0 0 0 0 0 0 0 0 0
38830 - 0 0 0 0 0 0 0 0 0 0 0 0
38831 - 0 0 0 0 0 0 0 0 0 0 0 0
38832 - 0 0 0 0 0 0 0 0 0 0 0 0
38833 - 0 0 0 0 0 0 0 0 0 6 6 6
38834 - 22 22 22 62 62 62 62 62 62 2 2 6
38835 - 2 2 6 2 2 6 2 2 6 2 2 6
38836 - 2 2 6 2 2 6 2 2 6 2 2 6
38837 - 2 2 6 2 2 6 2 2 6 26 26 26
38838 - 54 54 54 38 38 38 18 18 18 10 10 10
38839 - 2 2 6 2 2 6 34 34 34 82 82 82
38840 - 38 38 38 14 14 14 0 0 0 0 0 0
38841 - 0 0 0 0 0 0 0 0 0 0 0 0
38842 - 0 0 0 0 0 0 0 0 0 0 0 0
38843 - 0 0 0 0 0 0 0 0 0 0 0 0
38844 - 0 0 0 0 0 0 0 0 0 0 0 0
38845 - 0 0 0 0 0 0 0 0 0 0 0 0
38846 - 0 0 0 0 0 0 0 0 0 0 0 0
38847 - 0 0 0 0 0 0 0 0 0 0 0 0
38848 - 0 0 0 0 0 1 0 0 1 0 0 0
38849 - 0 0 0 0 0 0 0 0 0 0 0 0
38850 - 0 0 0 0 0 0 0 0 0 0 0 0
38851 - 0 0 0 0 0 0 0 0 0 0 0 0
38852 - 0 0 0 0 0 0 0 0 0 0 0 0
38853 - 0 0 0 0 0 0 0 0 0 6 6 6
38854 - 30 30 30 78 78 78 30 30 30 2 2 6
38855 - 2 2 6 2 2 6 2 2 6 2 2 6
38856 - 2 2 6 2 2 6 2 2 6 2 2 6
38857 - 2 2 6 2 2 6 2 2 6 10 10 10
38858 - 10 10 10 2 2 6 2 2 6 2 2 6
38859 - 2 2 6 2 2 6 2 2 6 78 78 78
38860 - 50 50 50 18 18 18 6 6 6 0 0 0
38861 - 0 0 0 0 0 0 0 0 0 0 0 0
38862 - 0 0 0 0 0 0 0 0 0 0 0 0
38863 - 0 0 0 0 0 0 0 0 0 0 0 0
38864 - 0 0 0 0 0 0 0 0 0 0 0 0
38865 - 0 0 0 0 0 0 0 0 0 0 0 0
38866 - 0 0 0 0 0 0 0 0 0 0 0 0
38867 - 0 0 0 0 0 0 0 0 0 0 0 0
38868 - 0 0 1 0 0 0 0 0 0 0 0 0
38869 - 0 0 0 0 0 0 0 0 0 0 0 0
38870 - 0 0 0 0 0 0 0 0 0 0 0 0
38871 - 0 0 0 0 0 0 0 0 0 0 0 0
38872 - 0 0 0 0 0 0 0 0 0 0 0 0
38873 - 0 0 0 0 0 0 0 0 0 10 10 10
38874 - 38 38 38 86 86 86 14 14 14 2 2 6
38875 - 2 2 6 2 2 6 2 2 6 2 2 6
38876 - 2 2 6 2 2 6 2 2 6 2 2 6
38877 - 2 2 6 2 2 6 2 2 6 2 2 6
38878 - 2 2 6 2 2 6 2 2 6 2 2 6
38879 - 2 2 6 2 2 6 2 2 6 54 54 54
38880 - 66 66 66 26 26 26 6 6 6 0 0 0
38881 - 0 0 0 0 0 0 0 0 0 0 0 0
38882 - 0 0 0 0 0 0 0 0 0 0 0 0
38883 - 0 0 0 0 0 0 0 0 0 0 0 0
38884 - 0 0 0 0 0 0 0 0 0 0 0 0
38885 - 0 0 0 0 0 0 0 0 0 0 0 0
38886 - 0 0 0 0 0 0 0 0 0 0 0 0
38887 - 0 0 0 0 0 0 0 0 0 0 0 0
38888 - 0 0 0 0 0 1 0 0 1 0 0 0
38889 - 0 0 0 0 0 0 0 0 0 0 0 0
38890 - 0 0 0 0 0 0 0 0 0 0 0 0
38891 - 0 0 0 0 0 0 0 0 0 0 0 0
38892 - 0 0 0 0 0 0 0 0 0 0 0 0
38893 - 0 0 0 0 0 0 0 0 0 14 14 14
38894 - 42 42 42 82 82 82 2 2 6 2 2 6
38895 - 2 2 6 6 6 6 10 10 10 2 2 6
38896 - 2 2 6 2 2 6 2 2 6 2 2 6
38897 - 2 2 6 2 2 6 2 2 6 6 6 6
38898 - 14 14 14 10 10 10 2 2 6 2 2 6
38899 - 2 2 6 2 2 6 2 2 6 18 18 18
38900 - 82 82 82 34 34 34 10 10 10 0 0 0
38901 - 0 0 0 0 0 0 0 0 0 0 0 0
38902 - 0 0 0 0 0 0 0 0 0 0 0 0
38903 - 0 0 0 0 0 0 0 0 0 0 0 0
38904 - 0 0 0 0 0 0 0 0 0 0 0 0
38905 - 0 0 0 0 0 0 0 0 0 0 0 0
38906 - 0 0 0 0 0 0 0 0 0 0 0 0
38907 - 0 0 0 0 0 0 0 0 0 0 0 0
38908 - 0 0 1 0 0 0 0 0 0 0 0 0
38909 - 0 0 0 0 0 0 0 0 0 0 0 0
38910 - 0 0 0 0 0 0 0 0 0 0 0 0
38911 - 0 0 0 0 0 0 0 0 0 0 0 0
38912 - 0 0 0 0 0 0 0 0 0 0 0 0
38913 - 0 0 0 0 0 0 0 0 0 14 14 14
38914 - 46 46 46 86 86 86 2 2 6 2 2 6
38915 - 6 6 6 6 6 6 22 22 22 34 34 34
38916 - 6 6 6 2 2 6 2 2 6 2 2 6
38917 - 2 2 6 2 2 6 18 18 18 34 34 34
38918 - 10 10 10 50 50 50 22 22 22 2 2 6
38919 - 2 2 6 2 2 6 2 2 6 10 10 10
38920 - 86 86 86 42 42 42 14 14 14 0 0 0
38921 - 0 0 0 0 0 0 0 0 0 0 0 0
38922 - 0 0 0 0 0 0 0 0 0 0 0 0
38923 - 0 0 0 0 0 0 0 0 0 0 0 0
38924 - 0 0 0 0 0 0 0 0 0 0 0 0
38925 - 0 0 0 0 0 0 0 0 0 0 0 0
38926 - 0 0 0 0 0 0 0 0 0 0 0 0
38927 - 0 0 0 0 0 0 0 0 0 0 0 0
38928 - 0 0 1 0 0 1 0 0 1 0 0 0
38929 - 0 0 0 0 0 0 0 0 0 0 0 0
38930 - 0 0 0 0 0 0 0 0 0 0 0 0
38931 - 0 0 0 0 0 0 0 0 0 0 0 0
38932 - 0 0 0 0 0 0 0 0 0 0 0 0
38933 - 0 0 0 0 0 0 0 0 0 14 14 14
38934 - 46 46 46 86 86 86 2 2 6 2 2 6
38935 - 38 38 38 116 116 116 94 94 94 22 22 22
38936 - 22 22 22 2 2 6 2 2 6 2 2 6
38937 - 14 14 14 86 86 86 138 138 138 162 162 162
38938 -154 154 154 38 38 38 26 26 26 6 6 6
38939 - 2 2 6 2 2 6 2 2 6 2 2 6
38940 - 86 86 86 46 46 46 14 14 14 0 0 0
38941 - 0 0 0 0 0 0 0 0 0 0 0 0
38942 - 0 0 0 0 0 0 0 0 0 0 0 0
38943 - 0 0 0 0 0 0 0 0 0 0 0 0
38944 - 0 0 0 0 0 0 0 0 0 0 0 0
38945 - 0 0 0 0 0 0 0 0 0 0 0 0
38946 - 0 0 0 0 0 0 0 0 0 0 0 0
38947 - 0 0 0 0 0 0 0 0 0 0 0 0
38948 - 0 0 0 0 0 0 0 0 0 0 0 0
38949 - 0 0 0 0 0 0 0 0 0 0 0 0
38950 - 0 0 0 0 0 0 0 0 0 0 0 0
38951 - 0 0 0 0 0 0 0 0 0 0 0 0
38952 - 0 0 0 0 0 0 0 0 0 0 0 0
38953 - 0 0 0 0 0 0 0 0 0 14 14 14
38954 - 46 46 46 86 86 86 2 2 6 14 14 14
38955 -134 134 134 198 198 198 195 195 195 116 116 116
38956 - 10 10 10 2 2 6 2 2 6 6 6 6
38957 -101 98 89 187 187 187 210 210 210 218 218 218
38958 -214 214 214 134 134 134 14 14 14 6 6 6
38959 - 2 2 6 2 2 6 2 2 6 2 2 6
38960 - 86 86 86 50 50 50 18 18 18 6 6 6
38961 - 0 0 0 0 0 0 0 0 0 0 0 0
38962 - 0 0 0 0 0 0 0 0 0 0 0 0
38963 - 0 0 0 0 0 0 0 0 0 0 0 0
38964 - 0 0 0 0 0 0 0 0 0 0 0 0
38965 - 0 0 0 0 0 0 0 0 0 0 0 0
38966 - 0 0 0 0 0 0 0 0 0 0 0 0
38967 - 0 0 0 0 0 0 0 0 1 0 0 0
38968 - 0 0 1 0 0 1 0 0 1 0 0 0
38969 - 0 0 0 0 0 0 0 0 0 0 0 0
38970 - 0 0 0 0 0 0 0 0 0 0 0 0
38971 - 0 0 0 0 0 0 0 0 0 0 0 0
38972 - 0 0 0 0 0 0 0 0 0 0 0 0
38973 - 0 0 0 0 0 0 0 0 0 14 14 14
38974 - 46 46 46 86 86 86 2 2 6 54 54 54
38975 -218 218 218 195 195 195 226 226 226 246 246 246
38976 - 58 58 58 2 2 6 2 2 6 30 30 30
38977 -210 210 210 253 253 253 174 174 174 123 123 123
38978 -221 221 221 234 234 234 74 74 74 2 2 6
38979 - 2 2 6 2 2 6 2 2 6 2 2 6
38980 - 70 70 70 58 58 58 22 22 22 6 6 6
38981 - 0 0 0 0 0 0 0 0 0 0 0 0
38982 - 0 0 0 0 0 0 0 0 0 0 0 0
38983 - 0 0 0 0 0 0 0 0 0 0 0 0
38984 - 0 0 0 0 0 0 0 0 0 0 0 0
38985 - 0 0 0 0 0 0 0 0 0 0 0 0
38986 - 0 0 0 0 0 0 0 0 0 0 0 0
38987 - 0 0 0 0 0 0 0 0 0 0 0 0
38988 - 0 0 0 0 0 0 0 0 0 0 0 0
38989 - 0 0 0 0 0 0 0 0 0 0 0 0
38990 - 0 0 0 0 0 0 0 0 0 0 0 0
38991 - 0 0 0 0 0 0 0 0 0 0 0 0
38992 - 0 0 0 0 0 0 0 0 0 0 0 0
38993 - 0 0 0 0 0 0 0 0 0 14 14 14
38994 - 46 46 46 82 82 82 2 2 6 106 106 106
38995 -170 170 170 26 26 26 86 86 86 226 226 226
38996 -123 123 123 10 10 10 14 14 14 46 46 46
38997 -231 231 231 190 190 190 6 6 6 70 70 70
38998 - 90 90 90 238 238 238 158 158 158 2 2 6
38999 - 2 2 6 2 2 6 2 2 6 2 2 6
39000 - 70 70 70 58 58 58 22 22 22 6 6 6
39001 - 0 0 0 0 0 0 0 0 0 0 0 0
39002 - 0 0 0 0 0 0 0 0 0 0 0 0
39003 - 0 0 0 0 0 0 0 0 0 0 0 0
39004 - 0 0 0 0 0 0 0 0 0 0 0 0
39005 - 0 0 0 0 0 0 0 0 0 0 0 0
39006 - 0 0 0 0 0 0 0 0 0 0 0 0
39007 - 0 0 0 0 0 0 0 0 1 0 0 0
39008 - 0 0 1 0 0 1 0 0 1 0 0 0
39009 - 0 0 0 0 0 0 0 0 0 0 0 0
39010 - 0 0 0 0 0 0 0 0 0 0 0 0
39011 - 0 0 0 0 0 0 0 0 0 0 0 0
39012 - 0 0 0 0 0 0 0 0 0 0 0 0
39013 - 0 0 0 0 0 0 0 0 0 14 14 14
39014 - 42 42 42 86 86 86 6 6 6 116 116 116
39015 -106 106 106 6 6 6 70 70 70 149 149 149
39016 -128 128 128 18 18 18 38 38 38 54 54 54
39017 -221 221 221 106 106 106 2 2 6 14 14 14
39018 - 46 46 46 190 190 190 198 198 198 2 2 6
39019 - 2 2 6 2 2 6 2 2 6 2 2 6
39020 - 74 74 74 62 62 62 22 22 22 6 6 6
39021 - 0 0 0 0 0 0 0 0 0 0 0 0
39022 - 0 0 0 0 0 0 0 0 0 0 0 0
39023 - 0 0 0 0 0 0 0 0 0 0 0 0
39024 - 0 0 0 0 0 0 0 0 0 0 0 0
39025 - 0 0 0 0 0 0 0 0 0 0 0 0
39026 - 0 0 0 0 0 0 0 0 0 0 0 0
39027 - 0 0 0 0 0 0 0 0 1 0 0 0
39028 - 0 0 1 0 0 0 0 0 1 0 0 0
39029 - 0 0 0 0 0 0 0 0 0 0 0 0
39030 - 0 0 0 0 0 0 0 0 0 0 0 0
39031 - 0 0 0 0 0 0 0 0 0 0 0 0
39032 - 0 0 0 0 0 0 0 0 0 0 0 0
39033 - 0 0 0 0 0 0 0 0 0 14 14 14
39034 - 42 42 42 94 94 94 14 14 14 101 101 101
39035 -128 128 128 2 2 6 18 18 18 116 116 116
39036 -118 98 46 121 92 8 121 92 8 98 78 10
39037 -162 162 162 106 106 106 2 2 6 2 2 6
39038 - 2 2 6 195 195 195 195 195 195 6 6 6
39039 - 2 2 6 2 2 6 2 2 6 2 2 6
39040 - 74 74 74 62 62 62 22 22 22 6 6 6
39041 - 0 0 0 0 0 0 0 0 0 0 0 0
39042 - 0 0 0 0 0 0 0 0 0 0 0 0
39043 - 0 0 0 0 0 0 0 0 0 0 0 0
39044 - 0 0 0 0 0 0 0 0 0 0 0 0
39045 - 0 0 0 0 0 0 0 0 0 0 0 0
39046 - 0 0 0 0 0 0 0 0 0 0 0 0
39047 - 0 0 0 0 0 0 0 0 1 0 0 1
39048 - 0 0 1 0 0 0 0 0 1 0 0 0
39049 - 0 0 0 0 0 0 0 0 0 0 0 0
39050 - 0 0 0 0 0 0 0 0 0 0 0 0
39051 - 0 0 0 0 0 0 0 0 0 0 0 0
39052 - 0 0 0 0 0 0 0 0 0 0 0 0
39053 - 0 0 0 0 0 0 0 0 0 10 10 10
39054 - 38 38 38 90 90 90 14 14 14 58 58 58
39055 -210 210 210 26 26 26 54 38 6 154 114 10
39056 -226 170 11 236 186 11 225 175 15 184 144 12
39057 -215 174 15 175 146 61 37 26 9 2 2 6
39058 - 70 70 70 246 246 246 138 138 138 2 2 6
39059 - 2 2 6 2 2 6 2 2 6 2 2 6
39060 - 70 70 70 66 66 66 26 26 26 6 6 6
39061 - 0 0 0 0 0 0 0 0 0 0 0 0
39062 - 0 0 0 0 0 0 0 0 0 0 0 0
39063 - 0 0 0 0 0 0 0 0 0 0 0 0
39064 - 0 0 0 0 0 0 0 0 0 0 0 0
39065 - 0 0 0 0 0 0 0 0 0 0 0 0
39066 - 0 0 0 0 0 0 0 0 0 0 0 0
39067 - 0 0 0 0 0 0 0 0 0 0 0 0
39068 - 0 0 0 0 0 0 0 0 0 0 0 0
39069 - 0 0 0 0 0 0 0 0 0 0 0 0
39070 - 0 0 0 0 0 0 0 0 0 0 0 0
39071 - 0 0 0 0 0 0 0 0 0 0 0 0
39072 - 0 0 0 0 0 0 0 0 0 0 0 0
39073 - 0 0 0 0 0 0 0 0 0 10 10 10
39074 - 38 38 38 86 86 86 14 14 14 10 10 10
39075 -195 195 195 188 164 115 192 133 9 225 175 15
39076 -239 182 13 234 190 10 232 195 16 232 200 30
39077 -245 207 45 241 208 19 232 195 16 184 144 12
39078 -218 194 134 211 206 186 42 42 42 2 2 6
39079 - 2 2 6 2 2 6 2 2 6 2 2 6
39080 - 50 50 50 74 74 74 30 30 30 6 6 6
39081 - 0 0 0 0 0 0 0 0 0 0 0 0
39082 - 0 0 0 0 0 0 0 0 0 0 0 0
39083 - 0 0 0 0 0 0 0 0 0 0 0 0
39084 - 0 0 0 0 0 0 0 0 0 0 0 0
39085 - 0 0 0 0 0 0 0 0 0 0 0 0
39086 - 0 0 0 0 0 0 0 0 0 0 0 0
39087 - 0 0 0 0 0 0 0 0 0 0 0 0
39088 - 0 0 0 0 0 0 0 0 0 0 0 0
39089 - 0 0 0 0 0 0 0 0 0 0 0 0
39090 - 0 0 0 0 0 0 0 0 0 0 0 0
39091 - 0 0 0 0 0 0 0 0 0 0 0 0
39092 - 0 0 0 0 0 0 0 0 0 0 0 0
39093 - 0 0 0 0 0 0 0 0 0 10 10 10
39094 - 34 34 34 86 86 86 14 14 14 2 2 6
39095 -121 87 25 192 133 9 219 162 10 239 182 13
39096 -236 186 11 232 195 16 241 208 19 244 214 54
39097 -246 218 60 246 218 38 246 215 20 241 208 19
39098 -241 208 19 226 184 13 121 87 25 2 2 6
39099 - 2 2 6 2 2 6 2 2 6 2 2 6
39100 - 50 50 50 82 82 82 34 34 34 10 10 10
39101 - 0 0 0 0 0 0 0 0 0 0 0 0
39102 - 0 0 0 0 0 0 0 0 0 0 0 0
39103 - 0 0 0 0 0 0 0 0 0 0 0 0
39104 - 0 0 0 0 0 0 0 0 0 0 0 0
39105 - 0 0 0 0 0 0 0 0 0 0 0 0
39106 - 0 0 0 0 0 0 0 0 0 0 0 0
39107 - 0 0 0 0 0 0 0 0 0 0 0 0
39108 - 0 0 0 0 0 0 0 0 0 0 0 0
39109 - 0 0 0 0 0 0 0 0 0 0 0 0
39110 - 0 0 0 0 0 0 0 0 0 0 0 0
39111 - 0 0 0 0 0 0 0 0 0 0 0 0
39112 - 0 0 0 0 0 0 0 0 0 0 0 0
39113 - 0 0 0 0 0 0 0 0 0 10 10 10
39114 - 34 34 34 82 82 82 30 30 30 61 42 6
39115 -180 123 7 206 145 10 230 174 11 239 182 13
39116 -234 190 10 238 202 15 241 208 19 246 218 74
39117 -246 218 38 246 215 20 246 215 20 246 215 20
39118 -226 184 13 215 174 15 184 144 12 6 6 6
39119 - 2 2 6 2 2 6 2 2 6 2 2 6
39120 - 26 26 26 94 94 94 42 42 42 14 14 14
39121 - 0 0 0 0 0 0 0 0 0 0 0 0
39122 - 0 0 0 0 0 0 0 0 0 0 0 0
39123 - 0 0 0 0 0 0 0 0 0 0 0 0
39124 - 0 0 0 0 0 0 0 0 0 0 0 0
39125 - 0 0 0 0 0 0 0 0 0 0 0 0
39126 - 0 0 0 0 0 0 0 0 0 0 0 0
39127 - 0 0 0 0 0 0 0 0 0 0 0 0
39128 - 0 0 0 0 0 0 0 0 0 0 0 0
39129 - 0 0 0 0 0 0 0 0 0 0 0 0
39130 - 0 0 0 0 0 0 0 0 0 0 0 0
39131 - 0 0 0 0 0 0 0 0 0 0 0 0
39132 - 0 0 0 0 0 0 0 0 0 0 0 0
39133 - 0 0 0 0 0 0 0 0 0 10 10 10
39134 - 30 30 30 78 78 78 50 50 50 104 69 6
39135 -192 133 9 216 158 10 236 178 12 236 186 11
39136 -232 195 16 241 208 19 244 214 54 245 215 43
39137 -246 215 20 246 215 20 241 208 19 198 155 10
39138 -200 144 11 216 158 10 156 118 10 2 2 6
39139 - 2 2 6 2 2 6 2 2 6 2 2 6
39140 - 6 6 6 90 90 90 54 54 54 18 18 18
39141 - 6 6 6 0 0 0 0 0 0 0 0 0
39142 - 0 0 0 0 0 0 0 0 0 0 0 0
39143 - 0 0 0 0 0 0 0 0 0 0 0 0
39144 - 0 0 0 0 0 0 0 0 0 0 0 0
39145 - 0 0 0 0 0 0 0 0 0 0 0 0
39146 - 0 0 0 0 0 0 0 0 0 0 0 0
39147 - 0 0 0 0 0 0 0 0 0 0 0 0
39148 - 0 0 0 0 0 0 0 0 0 0 0 0
39149 - 0 0 0 0 0 0 0 0 0 0 0 0
39150 - 0 0 0 0 0 0 0 0 0 0 0 0
39151 - 0 0 0 0 0 0 0 0 0 0 0 0
39152 - 0 0 0 0 0 0 0 0 0 0 0 0
39153 - 0 0 0 0 0 0 0 0 0 10 10 10
39154 - 30 30 30 78 78 78 46 46 46 22 22 22
39155 -137 92 6 210 162 10 239 182 13 238 190 10
39156 -238 202 15 241 208 19 246 215 20 246 215 20
39157 -241 208 19 203 166 17 185 133 11 210 150 10
39158 -216 158 10 210 150 10 102 78 10 2 2 6
39159 - 6 6 6 54 54 54 14 14 14 2 2 6
39160 - 2 2 6 62 62 62 74 74 74 30 30 30
39161 - 10 10 10 0 0 0 0 0 0 0 0 0
39162 - 0 0 0 0 0 0 0 0 0 0 0 0
39163 - 0 0 0 0 0 0 0 0 0 0 0 0
39164 - 0 0 0 0 0 0 0 0 0 0 0 0
39165 - 0 0 0 0 0 0 0 0 0 0 0 0
39166 - 0 0 0 0 0 0 0 0 0 0 0 0
39167 - 0 0 0 0 0 0 0 0 0 0 0 0
39168 - 0 0 0 0 0 0 0 0 0 0 0 0
39169 - 0 0 0 0 0 0 0 0 0 0 0 0
39170 - 0 0 0 0 0 0 0 0 0 0 0 0
39171 - 0 0 0 0 0 0 0 0 0 0 0 0
39172 - 0 0 0 0 0 0 0 0 0 0 0 0
39173 - 0 0 0 0 0 0 0 0 0 10 10 10
39174 - 34 34 34 78 78 78 50 50 50 6 6 6
39175 - 94 70 30 139 102 15 190 146 13 226 184 13
39176 -232 200 30 232 195 16 215 174 15 190 146 13
39177 -168 122 10 192 133 9 210 150 10 213 154 11
39178 -202 150 34 182 157 106 101 98 89 2 2 6
39179 - 2 2 6 78 78 78 116 116 116 58 58 58
39180 - 2 2 6 22 22 22 90 90 90 46 46 46
39181 - 18 18 18 6 6 6 0 0 0 0 0 0
39182 - 0 0 0 0 0 0 0 0 0 0 0 0
39183 - 0 0 0 0 0 0 0 0 0 0 0 0
39184 - 0 0 0 0 0 0 0 0 0 0 0 0
39185 - 0 0 0 0 0 0 0 0 0 0 0 0
39186 - 0 0 0 0 0 0 0 0 0 0 0 0
39187 - 0 0 0 0 0 0 0 0 0 0 0 0
39188 - 0 0 0 0 0 0 0 0 0 0 0 0
39189 - 0 0 0 0 0 0 0 0 0 0 0 0
39190 - 0 0 0 0 0 0 0 0 0 0 0 0
39191 - 0 0 0 0 0 0 0 0 0 0 0 0
39192 - 0 0 0 0 0 0 0 0 0 0 0 0
39193 - 0 0 0 0 0 0 0 0 0 10 10 10
39194 - 38 38 38 86 86 86 50 50 50 6 6 6
39195 -128 128 128 174 154 114 156 107 11 168 122 10
39196 -198 155 10 184 144 12 197 138 11 200 144 11
39197 -206 145 10 206 145 10 197 138 11 188 164 115
39198 -195 195 195 198 198 198 174 174 174 14 14 14
39199 - 2 2 6 22 22 22 116 116 116 116 116 116
39200 - 22 22 22 2 2 6 74 74 74 70 70 70
39201 - 30 30 30 10 10 10 0 0 0 0 0 0
39202 - 0 0 0 0 0 0 0 0 0 0 0 0
39203 - 0 0 0 0 0 0 0 0 0 0 0 0
39204 - 0 0 0 0 0 0 0 0 0 0 0 0
39205 - 0 0 0 0 0 0 0 0 0 0 0 0
39206 - 0 0 0 0 0 0 0 0 0 0 0 0
39207 - 0 0 0 0 0 0 0 0 0 0 0 0
39208 - 0 0 0 0 0 0 0 0 0 0 0 0
39209 - 0 0 0 0 0 0 0 0 0 0 0 0
39210 - 0 0 0 0 0 0 0 0 0 0 0 0
39211 - 0 0 0 0 0 0 0 0 0 0 0 0
39212 - 0 0 0 0 0 0 0 0 0 0 0 0
39213 - 0 0 0 0 0 0 6 6 6 18 18 18
39214 - 50 50 50 101 101 101 26 26 26 10 10 10
39215 -138 138 138 190 190 190 174 154 114 156 107 11
39216 -197 138 11 200 144 11 197 138 11 192 133 9
39217 -180 123 7 190 142 34 190 178 144 187 187 187
39218 -202 202 202 221 221 221 214 214 214 66 66 66
39219 - 2 2 6 2 2 6 50 50 50 62 62 62
39220 - 6 6 6 2 2 6 10 10 10 90 90 90
39221 - 50 50 50 18 18 18 6 6 6 0 0 0
39222 - 0 0 0 0 0 0 0 0 0 0 0 0
39223 - 0 0 0 0 0 0 0 0 0 0 0 0
39224 - 0 0 0 0 0 0 0 0 0 0 0 0
39225 - 0 0 0 0 0 0 0 0 0 0 0 0
39226 - 0 0 0 0 0 0 0 0 0 0 0 0
39227 - 0 0 0 0 0 0 0 0 0 0 0 0
39228 - 0 0 0 0 0 0 0 0 0 0 0 0
39229 - 0 0 0 0 0 0 0 0 0 0 0 0
39230 - 0 0 0 0 0 0 0 0 0 0 0 0
39231 - 0 0 0 0 0 0 0 0 0 0 0 0
39232 - 0 0 0 0 0 0 0 0 0 0 0 0
39233 - 0 0 0 0 0 0 10 10 10 34 34 34
39234 - 74 74 74 74 74 74 2 2 6 6 6 6
39235 -144 144 144 198 198 198 190 190 190 178 166 146
39236 -154 121 60 156 107 11 156 107 11 168 124 44
39237 -174 154 114 187 187 187 190 190 190 210 210 210
39238 -246 246 246 253 253 253 253 253 253 182 182 182
39239 - 6 6 6 2 2 6 2 2 6 2 2 6
39240 - 2 2 6 2 2 6 2 2 6 62 62 62
39241 - 74 74 74 34 34 34 14 14 14 0 0 0
39242 - 0 0 0 0 0 0 0 0 0 0 0 0
39243 - 0 0 0 0 0 0 0 0 0 0 0 0
39244 - 0 0 0 0 0 0 0 0 0 0 0 0
39245 - 0 0 0 0 0 0 0 0 0 0 0 0
39246 - 0 0 0 0 0 0 0 0 0 0 0 0
39247 - 0 0 0 0 0 0 0 0 0 0 0 0
39248 - 0 0 0 0 0 0 0 0 0 0 0 0
39249 - 0 0 0 0 0 0 0 0 0 0 0 0
39250 - 0 0 0 0 0 0 0 0 0 0 0 0
39251 - 0 0 0 0 0 0 0 0 0 0 0 0
39252 - 0 0 0 0 0 0 0 0 0 0 0 0
39253 - 0 0 0 10 10 10 22 22 22 54 54 54
39254 - 94 94 94 18 18 18 2 2 6 46 46 46
39255 -234 234 234 221 221 221 190 190 190 190 190 190
39256 -190 190 190 187 187 187 187 187 187 190 190 190
39257 -190 190 190 195 195 195 214 214 214 242 242 242
39258 -253 253 253 253 253 253 253 253 253 253 253 253
39259 - 82 82 82 2 2 6 2 2 6 2 2 6
39260 - 2 2 6 2 2 6 2 2 6 14 14 14
39261 - 86 86 86 54 54 54 22 22 22 6 6 6
39262 - 0 0 0 0 0 0 0 0 0 0 0 0
39263 - 0 0 0 0 0 0 0 0 0 0 0 0
39264 - 0 0 0 0 0 0 0 0 0 0 0 0
39265 - 0 0 0 0 0 0 0 0 0 0 0 0
39266 - 0 0 0 0 0 0 0 0 0 0 0 0
39267 - 0 0 0 0 0 0 0 0 0 0 0 0
39268 - 0 0 0 0 0 0 0 0 0 0 0 0
39269 - 0 0 0 0 0 0 0 0 0 0 0 0
39270 - 0 0 0 0 0 0 0 0 0 0 0 0
39271 - 0 0 0 0 0 0 0 0 0 0 0 0
39272 - 0 0 0 0 0 0 0 0 0 0 0 0
39273 - 6 6 6 18 18 18 46 46 46 90 90 90
39274 - 46 46 46 18 18 18 6 6 6 182 182 182
39275 -253 253 253 246 246 246 206 206 206 190 190 190
39276 -190 190 190 190 190 190 190 190 190 190 190 190
39277 -206 206 206 231 231 231 250 250 250 253 253 253
39278 -253 253 253 253 253 253 253 253 253 253 253 253
39279 -202 202 202 14 14 14 2 2 6 2 2 6
39280 - 2 2 6 2 2 6 2 2 6 2 2 6
39281 - 42 42 42 86 86 86 42 42 42 18 18 18
39282 - 6 6 6 0 0 0 0 0 0 0 0 0
39283 - 0 0 0 0 0 0 0 0 0 0 0 0
39284 - 0 0 0 0 0 0 0 0 0 0 0 0
39285 - 0 0 0 0 0 0 0 0 0 0 0 0
39286 - 0 0 0 0 0 0 0 0 0 0 0 0
39287 - 0 0 0 0 0 0 0 0 0 0 0 0
39288 - 0 0 0 0 0 0 0 0 0 0 0 0
39289 - 0 0 0 0 0 0 0 0 0 0 0 0
39290 - 0 0 0 0 0 0 0 0 0 0 0 0
39291 - 0 0 0 0 0 0 0 0 0 0 0 0
39292 - 0 0 0 0 0 0 0 0 0 6 6 6
39293 - 14 14 14 38 38 38 74 74 74 66 66 66
39294 - 2 2 6 6 6 6 90 90 90 250 250 250
39295 -253 253 253 253 253 253 238 238 238 198 198 198
39296 -190 190 190 190 190 190 195 195 195 221 221 221
39297 -246 246 246 253 253 253 253 253 253 253 253 253
39298 -253 253 253 253 253 253 253 253 253 253 253 253
39299 -253 253 253 82 82 82 2 2 6 2 2 6
39300 - 2 2 6 2 2 6 2 2 6 2 2 6
39301 - 2 2 6 78 78 78 70 70 70 34 34 34
39302 - 14 14 14 6 6 6 0 0 0 0 0 0
39303 - 0 0 0 0 0 0 0 0 0 0 0 0
39304 - 0 0 0 0 0 0 0 0 0 0 0 0
39305 - 0 0 0 0 0 0 0 0 0 0 0 0
39306 - 0 0 0 0 0 0 0 0 0 0 0 0
39307 - 0 0 0 0 0 0 0 0 0 0 0 0
39308 - 0 0 0 0 0 0 0 0 0 0 0 0
39309 - 0 0 0 0 0 0 0 0 0 0 0 0
39310 - 0 0 0 0 0 0 0 0 0 0 0 0
39311 - 0 0 0 0 0 0 0 0 0 0 0 0
39312 - 0 0 0 0 0 0 0 0 0 14 14 14
39313 - 34 34 34 66 66 66 78 78 78 6 6 6
39314 - 2 2 6 18 18 18 218 218 218 253 253 253
39315 -253 253 253 253 253 253 253 253 253 246 246 246
39316 -226 226 226 231 231 231 246 246 246 253 253 253
39317 -253 253 253 253 253 253 253 253 253 253 253 253
39318 -253 253 253 253 253 253 253 253 253 253 253 253
39319 -253 253 253 178 178 178 2 2 6 2 2 6
39320 - 2 2 6 2 2 6 2 2 6 2 2 6
39321 - 2 2 6 18 18 18 90 90 90 62 62 62
39322 - 30 30 30 10 10 10 0 0 0 0 0 0
39323 - 0 0 0 0 0 0 0 0 0 0 0 0
39324 - 0 0 0 0 0 0 0 0 0 0 0 0
39325 - 0 0 0 0 0 0 0 0 0 0 0 0
39326 - 0 0 0 0 0 0 0 0 0 0 0 0
39327 - 0 0 0 0 0 0 0 0 0 0 0 0
39328 - 0 0 0 0 0 0 0 0 0 0 0 0
39329 - 0 0 0 0 0 0 0 0 0 0 0 0
39330 - 0 0 0 0 0 0 0 0 0 0 0 0
39331 - 0 0 0 0 0 0 0 0 0 0 0 0
39332 - 0 0 0 0 0 0 10 10 10 26 26 26
39333 - 58 58 58 90 90 90 18 18 18 2 2 6
39334 - 2 2 6 110 110 110 253 253 253 253 253 253
39335 -253 253 253 253 253 253 253 253 253 253 253 253
39336 -250 250 250 253 253 253 253 253 253 253 253 253
39337 -253 253 253 253 253 253 253 253 253 253 253 253
39338 -253 253 253 253 253 253 253 253 253 253 253 253
39339 -253 253 253 231 231 231 18 18 18 2 2 6
39340 - 2 2 6 2 2 6 2 2 6 2 2 6
39341 - 2 2 6 2 2 6 18 18 18 94 94 94
39342 - 54 54 54 26 26 26 10 10 10 0 0 0
39343 - 0 0 0 0 0 0 0 0 0 0 0 0
39344 - 0 0 0 0 0 0 0 0 0 0 0 0
39345 - 0 0 0 0 0 0 0 0 0 0 0 0
39346 - 0 0 0 0 0 0 0 0 0 0 0 0
39347 - 0 0 0 0 0 0 0 0 0 0 0 0
39348 - 0 0 0 0 0 0 0 0 0 0 0 0
39349 - 0 0 0 0 0 0 0 0 0 0 0 0
39350 - 0 0 0 0 0 0 0 0 0 0 0 0
39351 - 0 0 0 0 0 0 0 0 0 0 0 0
39352 - 0 0 0 6 6 6 22 22 22 50 50 50
39353 - 90 90 90 26 26 26 2 2 6 2 2 6
39354 - 14 14 14 195 195 195 250 250 250 253 253 253
39355 -253 253 253 253 253 253 253 253 253 253 253 253
39356 -253 253 253 253 253 253 253 253 253 253 253 253
39357 -253 253 253 253 253 253 253 253 253 253 253 253
39358 -253 253 253 253 253 253 253 253 253 253 253 253
39359 -250 250 250 242 242 242 54 54 54 2 2 6
39360 - 2 2 6 2 2 6 2 2 6 2 2 6
39361 - 2 2 6 2 2 6 2 2 6 38 38 38
39362 - 86 86 86 50 50 50 22 22 22 6 6 6
39363 - 0 0 0 0 0 0 0 0 0 0 0 0
39364 - 0 0 0 0 0 0 0 0 0 0 0 0
39365 - 0 0 0 0 0 0 0 0 0 0 0 0
39366 - 0 0 0 0 0 0 0 0 0 0 0 0
39367 - 0 0 0 0 0 0 0 0 0 0 0 0
39368 - 0 0 0 0 0 0 0 0 0 0 0 0
39369 - 0 0 0 0 0 0 0 0 0 0 0 0
39370 - 0 0 0 0 0 0 0 0 0 0 0 0
39371 - 0 0 0 0 0 0 0 0 0 0 0 0
39372 - 6 6 6 14 14 14 38 38 38 82 82 82
39373 - 34 34 34 2 2 6 2 2 6 2 2 6
39374 - 42 42 42 195 195 195 246 246 246 253 253 253
39375 -253 253 253 253 253 253 253 253 253 250 250 250
39376 -242 242 242 242 242 242 250 250 250 253 253 253
39377 -253 253 253 253 253 253 253 253 253 253 253 253
39378 -253 253 253 250 250 250 246 246 246 238 238 238
39379 -226 226 226 231 231 231 101 101 101 6 6 6
39380 - 2 2 6 2 2 6 2 2 6 2 2 6
39381 - 2 2 6 2 2 6 2 2 6 2 2 6
39382 - 38 38 38 82 82 82 42 42 42 14 14 14
39383 - 6 6 6 0 0 0 0 0 0 0 0 0
39384 - 0 0 0 0 0 0 0 0 0 0 0 0
39385 - 0 0 0 0 0 0 0 0 0 0 0 0
39386 - 0 0 0 0 0 0 0 0 0 0 0 0
39387 - 0 0 0 0 0 0 0 0 0 0 0 0
39388 - 0 0 0 0 0 0 0 0 0 0 0 0
39389 - 0 0 0 0 0 0 0 0 0 0 0 0
39390 - 0 0 0 0 0 0 0 0 0 0 0 0
39391 - 0 0 0 0 0 0 0 0 0 0 0 0
39392 - 10 10 10 26 26 26 62 62 62 66 66 66
39393 - 2 2 6 2 2 6 2 2 6 6 6 6
39394 - 70 70 70 170 170 170 206 206 206 234 234 234
39395 -246 246 246 250 250 250 250 250 250 238 238 238
39396 -226 226 226 231 231 231 238 238 238 250 250 250
39397 -250 250 250 250 250 250 246 246 246 231 231 231
39398 -214 214 214 206 206 206 202 202 202 202 202 202
39399 -198 198 198 202 202 202 182 182 182 18 18 18
39400 - 2 2 6 2 2 6 2 2 6 2 2 6
39401 - 2 2 6 2 2 6 2 2 6 2 2 6
39402 - 2 2 6 62 62 62 66 66 66 30 30 30
39403 - 10 10 10 0 0 0 0 0 0 0 0 0
39404 - 0 0 0 0 0 0 0 0 0 0 0 0
39405 - 0 0 0 0 0 0 0 0 0 0 0 0
39406 - 0 0 0 0 0 0 0 0 0 0 0 0
39407 - 0 0 0 0 0 0 0 0 0 0 0 0
39408 - 0 0 0 0 0 0 0 0 0 0 0 0
39409 - 0 0 0 0 0 0 0 0 0 0 0 0
39410 - 0 0 0 0 0 0 0 0 0 0 0 0
39411 - 0 0 0 0 0 0 0 0 0 0 0 0
39412 - 14 14 14 42 42 42 82 82 82 18 18 18
39413 - 2 2 6 2 2 6 2 2 6 10 10 10
39414 - 94 94 94 182 182 182 218 218 218 242 242 242
39415 -250 250 250 253 253 253 253 253 253 250 250 250
39416 -234 234 234 253 253 253 253 253 253 253 253 253
39417 -253 253 253 253 253 253 253 253 253 246 246 246
39418 -238 238 238 226 226 226 210 210 210 202 202 202
39419 -195 195 195 195 195 195 210 210 210 158 158 158
39420 - 6 6 6 14 14 14 50 50 50 14 14 14
39421 - 2 2 6 2 2 6 2 2 6 2 2 6
39422 - 2 2 6 6 6 6 86 86 86 46 46 46
39423 - 18 18 18 6 6 6 0 0 0 0 0 0
39424 - 0 0 0 0 0 0 0 0 0 0 0 0
39425 - 0 0 0 0 0 0 0 0 0 0 0 0
39426 - 0 0 0 0 0 0 0 0 0 0 0 0
39427 - 0 0 0 0 0 0 0 0 0 0 0 0
39428 - 0 0 0 0 0 0 0 0 0 0 0 0
39429 - 0 0 0 0 0 0 0 0 0 0 0 0
39430 - 0 0 0 0 0 0 0 0 0 0 0 0
39431 - 0 0 0 0 0 0 0 0 0 6 6 6
39432 - 22 22 22 54 54 54 70 70 70 2 2 6
39433 - 2 2 6 10 10 10 2 2 6 22 22 22
39434 -166 166 166 231 231 231 250 250 250 253 253 253
39435 -253 253 253 253 253 253 253 253 253 250 250 250
39436 -242 242 242 253 253 253 253 253 253 253 253 253
39437 -253 253 253 253 253 253 253 253 253 253 253 253
39438 -253 253 253 253 253 253 253 253 253 246 246 246
39439 -231 231 231 206 206 206 198 198 198 226 226 226
39440 - 94 94 94 2 2 6 6 6 6 38 38 38
39441 - 30 30 30 2 2 6 2 2 6 2 2 6
39442 - 2 2 6 2 2 6 62 62 62 66 66 66
39443 - 26 26 26 10 10 10 0 0 0 0 0 0
39444 - 0 0 0 0 0 0 0 0 0 0 0 0
39445 - 0 0 0 0 0 0 0 0 0 0 0 0
39446 - 0 0 0 0 0 0 0 0 0 0 0 0
39447 - 0 0 0 0 0 0 0 0 0 0 0 0
39448 - 0 0 0 0 0 0 0 0 0 0 0 0
39449 - 0 0 0 0 0 0 0 0 0 0 0 0
39450 - 0 0 0 0 0 0 0 0 0 0 0 0
39451 - 0 0 0 0 0 0 0 0 0 10 10 10
39452 - 30 30 30 74 74 74 50 50 50 2 2 6
39453 - 26 26 26 26 26 26 2 2 6 106 106 106
39454 -238 238 238 253 253 253 253 253 253 253 253 253
39455 -253 253 253 253 253 253 253 253 253 253 253 253
39456 -253 253 253 253 253 253 253 253 253 253 253 253
39457 -253 253 253 253 253 253 253 253 253 253 253 253
39458 -253 253 253 253 253 253 253 253 253 253 253 253
39459 -253 253 253 246 246 246 218 218 218 202 202 202
39460 -210 210 210 14 14 14 2 2 6 2 2 6
39461 - 30 30 30 22 22 22 2 2 6 2 2 6
39462 - 2 2 6 2 2 6 18 18 18 86 86 86
39463 - 42 42 42 14 14 14 0 0 0 0 0 0
39464 - 0 0 0 0 0 0 0 0 0 0 0 0
39465 - 0 0 0 0 0 0 0 0 0 0 0 0
39466 - 0 0 0 0 0 0 0 0 0 0 0 0
39467 - 0 0 0 0 0 0 0 0 0 0 0 0
39468 - 0 0 0 0 0 0 0 0 0 0 0 0
39469 - 0 0 0 0 0 0 0 0 0 0 0 0
39470 - 0 0 0 0 0 0 0 0 0 0 0 0
39471 - 0 0 0 0 0 0 0 0 0 14 14 14
39472 - 42 42 42 90 90 90 22 22 22 2 2 6
39473 - 42 42 42 2 2 6 18 18 18 218 218 218
39474 -253 253 253 253 253 253 253 253 253 253 253 253
39475 -253 253 253 253 253 253 253 253 253 253 253 253
39476 -253 253 253 253 253 253 253 253 253 253 253 253
39477 -253 253 253 253 253 253 253 253 253 253 253 253
39478 -253 253 253 253 253 253 253 253 253 253 253 253
39479 -253 253 253 253 253 253 250 250 250 221 221 221
39480 -218 218 218 101 101 101 2 2 6 14 14 14
39481 - 18 18 18 38 38 38 10 10 10 2 2 6
39482 - 2 2 6 2 2 6 2 2 6 78 78 78
39483 - 58 58 58 22 22 22 6 6 6 0 0 0
39484 - 0 0 0 0 0 0 0 0 0 0 0 0
39485 - 0 0 0 0 0 0 0 0 0 0 0 0
39486 - 0 0 0 0 0 0 0 0 0 0 0 0
39487 - 0 0 0 0 0 0 0 0 0 0 0 0
39488 - 0 0 0 0 0 0 0 0 0 0 0 0
39489 - 0 0 0 0 0 0 0 0 0 0 0 0
39490 - 0 0 0 0 0 0 0 0 0 0 0 0
39491 - 0 0 0 0 0 0 6 6 6 18 18 18
39492 - 54 54 54 82 82 82 2 2 6 26 26 26
39493 - 22 22 22 2 2 6 123 123 123 253 253 253
39494 -253 253 253 253 253 253 253 253 253 253 253 253
39495 -253 253 253 253 253 253 253 253 253 253 253 253
39496 -253 253 253 253 253 253 253 253 253 253 253 253
39497 -253 253 253 253 253 253 253 253 253 253 253 253
39498 -253 253 253 253 253 253 253 253 253 253 253 253
39499 -253 253 253 253 253 253 253 253 253 250 250 250
39500 -238 238 238 198 198 198 6 6 6 38 38 38
39501 - 58 58 58 26 26 26 38 38 38 2 2 6
39502 - 2 2 6 2 2 6 2 2 6 46 46 46
39503 - 78 78 78 30 30 30 10 10 10 0 0 0
39504 - 0 0 0 0 0 0 0 0 0 0 0 0
39505 - 0 0 0 0 0 0 0 0 0 0 0 0
39506 - 0 0 0 0 0 0 0 0 0 0 0 0
39507 - 0 0 0 0 0 0 0 0 0 0 0 0
39508 - 0 0 0 0 0 0 0 0 0 0 0 0
39509 - 0 0 0 0 0 0 0 0 0 0 0 0
39510 - 0 0 0 0 0 0 0 0 0 0 0 0
39511 - 0 0 0 0 0 0 10 10 10 30 30 30
39512 - 74 74 74 58 58 58 2 2 6 42 42 42
39513 - 2 2 6 22 22 22 231 231 231 253 253 253
39514 -253 253 253 253 253 253 253 253 253 253 253 253
39515 -253 253 253 253 253 253 253 253 253 250 250 250
39516 -253 253 253 253 253 253 253 253 253 253 253 253
39517 -253 253 253 253 253 253 253 253 253 253 253 253
39518 -253 253 253 253 253 253 253 253 253 253 253 253
39519 -253 253 253 253 253 253 253 253 253 253 253 253
39520 -253 253 253 246 246 246 46 46 46 38 38 38
39521 - 42 42 42 14 14 14 38 38 38 14 14 14
39522 - 2 2 6 2 2 6 2 2 6 6 6 6
39523 - 86 86 86 46 46 46 14 14 14 0 0 0
39524 - 0 0 0 0 0 0 0 0 0 0 0 0
39525 - 0 0 0 0 0 0 0 0 0 0 0 0
39526 - 0 0 0 0 0 0 0 0 0 0 0 0
39527 - 0 0 0 0 0 0 0 0 0 0 0 0
39528 - 0 0 0 0 0 0 0 0 0 0 0 0
39529 - 0 0 0 0 0 0 0 0 0 0 0 0
39530 - 0 0 0 0 0 0 0 0 0 0 0 0
39531 - 0 0 0 6 6 6 14 14 14 42 42 42
39532 - 90 90 90 18 18 18 18 18 18 26 26 26
39533 - 2 2 6 116 116 116 253 253 253 253 253 253
39534 -253 253 253 253 253 253 253 253 253 253 253 253
39535 -253 253 253 253 253 253 250 250 250 238 238 238
39536 -253 253 253 253 253 253 253 253 253 253 253 253
39537 -253 253 253 253 253 253 253 253 253 253 253 253
39538 -253 253 253 253 253 253 253 253 253 253 253 253
39539 -253 253 253 253 253 253 253 253 253 253 253 253
39540 -253 253 253 253 253 253 94 94 94 6 6 6
39541 - 2 2 6 2 2 6 10 10 10 34 34 34
39542 - 2 2 6 2 2 6 2 2 6 2 2 6
39543 - 74 74 74 58 58 58 22 22 22 6 6 6
39544 - 0 0 0 0 0 0 0 0 0 0 0 0
39545 - 0 0 0 0 0 0 0 0 0 0 0 0
39546 - 0 0 0 0 0 0 0 0 0 0 0 0
39547 - 0 0 0 0 0 0 0 0 0 0 0 0
39548 - 0 0 0 0 0 0 0 0 0 0 0 0
39549 - 0 0 0 0 0 0 0 0 0 0 0 0
39550 - 0 0 0 0 0 0 0 0 0 0 0 0
39551 - 0 0 0 10 10 10 26 26 26 66 66 66
39552 - 82 82 82 2 2 6 38 38 38 6 6 6
39553 - 14 14 14 210 210 210 253 253 253 253 253 253
39554 -253 253 253 253 253 253 253 253 253 253 253 253
39555 -253 253 253 253 253 253 246 246 246 242 242 242
39556 -253 253 253 253 253 253 253 253 253 253 253 253
39557 -253 253 253 253 253 253 253 253 253 253 253 253
39558 -253 253 253 253 253 253 253 253 253 253 253 253
39559 -253 253 253 253 253 253 253 253 253 253 253 253
39560 -253 253 253 253 253 253 144 144 144 2 2 6
39561 - 2 2 6 2 2 6 2 2 6 46 46 46
39562 - 2 2 6 2 2 6 2 2 6 2 2 6
39563 - 42 42 42 74 74 74 30 30 30 10 10 10
39564 - 0 0 0 0 0 0 0 0 0 0 0 0
39565 - 0 0 0 0 0 0 0 0 0 0 0 0
39566 - 0 0 0 0 0 0 0 0 0 0 0 0
39567 - 0 0 0 0 0 0 0 0 0 0 0 0
39568 - 0 0 0 0 0 0 0 0 0 0 0 0
39569 - 0 0 0 0 0 0 0 0 0 0 0 0
39570 - 0 0 0 0 0 0 0 0 0 0 0 0
39571 - 6 6 6 14 14 14 42 42 42 90 90 90
39572 - 26 26 26 6 6 6 42 42 42 2 2 6
39573 - 74 74 74 250 250 250 253 253 253 253 253 253
39574 -253 253 253 253 253 253 253 253 253 253 253 253
39575 -253 253 253 253 253 253 242 242 242 242 242 242
39576 -253 253 253 253 253 253 253 253 253 253 253 253
39577 -253 253 253 253 253 253 253 253 253 253 253 253
39578 -253 253 253 253 253 253 253 253 253 253 253 253
39579 -253 253 253 253 253 253 253 253 253 253 253 253
39580 -253 253 253 253 253 253 182 182 182 2 2 6
39581 - 2 2 6 2 2 6 2 2 6 46 46 46
39582 - 2 2 6 2 2 6 2 2 6 2 2 6
39583 - 10 10 10 86 86 86 38 38 38 10 10 10
39584 - 0 0 0 0 0 0 0 0 0 0 0 0
39585 - 0 0 0 0 0 0 0 0 0 0 0 0
39586 - 0 0 0 0 0 0 0 0 0 0 0 0
39587 - 0 0 0 0 0 0 0 0 0 0 0 0
39588 - 0 0 0 0 0 0 0 0 0 0 0 0
39589 - 0 0 0 0 0 0 0 0 0 0 0 0
39590 - 0 0 0 0 0 0 0 0 0 0 0 0
39591 - 10 10 10 26 26 26 66 66 66 82 82 82
39592 - 2 2 6 22 22 22 18 18 18 2 2 6
39593 -149 149 149 253 253 253 253 253 253 253 253 253
39594 -253 253 253 253 253 253 253 253 253 253 253 253
39595 -253 253 253 253 253 253 234 234 234 242 242 242
39596 -253 253 253 253 253 253 253 253 253 253 253 253
39597 -253 253 253 253 253 253 253 253 253 253 253 253
39598 -253 253 253 253 253 253 253 253 253 253 253 253
39599 -253 253 253 253 253 253 253 253 253 253 253 253
39600 -253 253 253 253 253 253 206 206 206 2 2 6
39601 - 2 2 6 2 2 6 2 2 6 38 38 38
39602 - 2 2 6 2 2 6 2 2 6 2 2 6
39603 - 6 6 6 86 86 86 46 46 46 14 14 14
39604 - 0 0 0 0 0 0 0 0 0 0 0 0
39605 - 0 0 0 0 0 0 0 0 0 0 0 0
39606 - 0 0 0 0 0 0 0 0 0 0 0 0
39607 - 0 0 0 0 0 0 0 0 0 0 0 0
39608 - 0 0 0 0 0 0 0 0 0 0 0 0
39609 - 0 0 0 0 0 0 0 0 0 0 0 0
39610 - 0 0 0 0 0 0 0 0 0 6 6 6
39611 - 18 18 18 46 46 46 86 86 86 18 18 18
39612 - 2 2 6 34 34 34 10 10 10 6 6 6
39613 -210 210 210 253 253 253 253 253 253 253 253 253
39614 -253 253 253 253 253 253 253 253 253 253 253 253
39615 -253 253 253 253 253 253 234 234 234 242 242 242
39616 -253 253 253 253 253 253 253 253 253 253 253 253
39617 -253 253 253 253 253 253 253 253 253 253 253 253
39618 -253 253 253 253 253 253 253 253 253 253 253 253
39619 -253 253 253 253 253 253 253 253 253 253 253 253
39620 -253 253 253 253 253 253 221 221 221 6 6 6
39621 - 2 2 6 2 2 6 6 6 6 30 30 30
39622 - 2 2 6 2 2 6 2 2 6 2 2 6
39623 - 2 2 6 82 82 82 54 54 54 18 18 18
39624 - 6 6 6 0 0 0 0 0 0 0 0 0
39625 - 0 0 0 0 0 0 0 0 0 0 0 0
39626 - 0 0 0 0 0 0 0 0 0 0 0 0
39627 - 0 0 0 0 0 0 0 0 0 0 0 0
39628 - 0 0 0 0 0 0 0 0 0 0 0 0
39629 - 0 0 0 0 0 0 0 0 0 0 0 0
39630 - 0 0 0 0 0 0 0 0 0 10 10 10
39631 - 26 26 26 66 66 66 62 62 62 2 2 6
39632 - 2 2 6 38 38 38 10 10 10 26 26 26
39633 -238 238 238 253 253 253 253 253 253 253 253 253
39634 -253 253 253 253 253 253 253 253 253 253 253 253
39635 -253 253 253 253 253 253 231 231 231 238 238 238
39636 -253 253 253 253 253 253 253 253 253 253 253 253
39637 -253 253 253 253 253 253 253 253 253 253 253 253
39638 -253 253 253 253 253 253 253 253 253 253 253 253
39639 -253 253 253 253 253 253 253 253 253 253 253 253
39640 -253 253 253 253 253 253 231 231 231 6 6 6
39641 - 2 2 6 2 2 6 10 10 10 30 30 30
39642 - 2 2 6 2 2 6 2 2 6 2 2 6
39643 - 2 2 6 66 66 66 58 58 58 22 22 22
39644 - 6 6 6 0 0 0 0 0 0 0 0 0
39645 - 0 0 0 0 0 0 0 0 0 0 0 0
39646 - 0 0 0 0 0 0 0 0 0 0 0 0
39647 - 0 0 0 0 0 0 0 0 0 0 0 0
39648 - 0 0 0 0 0 0 0 0 0 0 0 0
39649 - 0 0 0 0 0 0 0 0 0 0 0 0
39650 - 0 0 0 0 0 0 0 0 0 10 10 10
39651 - 38 38 38 78 78 78 6 6 6 2 2 6
39652 - 2 2 6 46 46 46 14 14 14 42 42 42
39653 -246 246 246 253 253 253 253 253 253 253 253 253
39654 -253 253 253 253 253 253 253 253 253 253 253 253
39655 -253 253 253 253 253 253 231 231 231 242 242 242
39656 -253 253 253 253 253 253 253 253 253 253 253 253
39657 -253 253 253 253 253 253 253 253 253 253 253 253
39658 -253 253 253 253 253 253 253 253 253 253 253 253
39659 -253 253 253 253 253 253 253 253 253 253 253 253
39660 -253 253 253 253 253 253 234 234 234 10 10 10
39661 - 2 2 6 2 2 6 22 22 22 14 14 14
39662 - 2 2 6 2 2 6 2 2 6 2 2 6
39663 - 2 2 6 66 66 66 62 62 62 22 22 22
39664 - 6 6 6 0 0 0 0 0 0 0 0 0
39665 - 0 0 0 0 0 0 0 0 0 0 0 0
39666 - 0 0 0 0 0 0 0 0 0 0 0 0
39667 - 0 0 0 0 0 0 0 0 0 0 0 0
39668 - 0 0 0 0 0 0 0 0 0 0 0 0
39669 - 0 0 0 0 0 0 0 0 0 0 0 0
39670 - 0 0 0 0 0 0 6 6 6 18 18 18
39671 - 50 50 50 74 74 74 2 2 6 2 2 6
39672 - 14 14 14 70 70 70 34 34 34 62 62 62
39673 -250 250 250 253 253 253 253 253 253 253 253 253
39674 -253 253 253 253 253 253 253 253 253 253 253 253
39675 -253 253 253 253 253 253 231 231 231 246 246 246
39676 -253 253 253 253 253 253 253 253 253 253 253 253
39677 -253 253 253 253 253 253 253 253 253 253 253 253
39678 -253 253 253 253 253 253 253 253 253 253 253 253
39679 -253 253 253 253 253 253 253 253 253 253 253 253
39680 -253 253 253 253 253 253 234 234 234 14 14 14
39681 - 2 2 6 2 2 6 30 30 30 2 2 6
39682 - 2 2 6 2 2 6 2 2 6 2 2 6
39683 - 2 2 6 66 66 66 62 62 62 22 22 22
39684 - 6 6 6 0 0 0 0 0 0 0 0 0
39685 - 0 0 0 0 0 0 0 0 0 0 0 0
39686 - 0 0 0 0 0 0 0 0 0 0 0 0
39687 - 0 0 0 0 0 0 0 0 0 0 0 0
39688 - 0 0 0 0 0 0 0 0 0 0 0 0
39689 - 0 0 0 0 0 0 0 0 0 0 0 0
39690 - 0 0 0 0 0 0 6 6 6 18 18 18
39691 - 54 54 54 62 62 62 2 2 6 2 2 6
39692 - 2 2 6 30 30 30 46 46 46 70 70 70
39693 -250 250 250 253 253 253 253 253 253 253 253 253
39694 -253 253 253 253 253 253 253 253 253 253 253 253
39695 -253 253 253 253 253 253 231 231 231 246 246 246
39696 -253 253 253 253 253 253 253 253 253 253 253 253
39697 -253 253 253 253 253 253 253 253 253 253 253 253
39698 -253 253 253 253 253 253 253 253 253 253 253 253
39699 -253 253 253 253 253 253 253 253 253 253 253 253
39700 -253 253 253 253 253 253 226 226 226 10 10 10
39701 - 2 2 6 6 6 6 30 30 30 2 2 6
39702 - 2 2 6 2 2 6 2 2 6 2 2 6
39703 - 2 2 6 66 66 66 58 58 58 22 22 22
39704 - 6 6 6 0 0 0 0 0 0 0 0 0
39705 - 0 0 0 0 0 0 0 0 0 0 0 0
39706 - 0 0 0 0 0 0 0 0 0 0 0 0
39707 - 0 0 0 0 0 0 0 0 0 0 0 0
39708 - 0 0 0 0 0 0 0 0 0 0 0 0
39709 - 0 0 0 0 0 0 0 0 0 0 0 0
39710 - 0 0 0 0 0 0 6 6 6 22 22 22
39711 - 58 58 58 62 62 62 2 2 6 2 2 6
39712 - 2 2 6 2 2 6 30 30 30 78 78 78
39713 -250 250 250 253 253 253 253 253 253 253 253 253
39714 -253 253 253 253 253 253 253 253 253 253 253 253
39715 -253 253 253 253 253 253 231 231 231 246 246 246
39716 -253 253 253 253 253 253 253 253 253 253 253 253
39717 -253 253 253 253 253 253 253 253 253 253 253 253
39718 -253 253 253 253 253 253 253 253 253 253 253 253
39719 -253 253 253 253 253 253 253 253 253 253 253 253
39720 -253 253 253 253 253 253 206 206 206 2 2 6
39721 - 22 22 22 34 34 34 18 14 6 22 22 22
39722 - 26 26 26 18 18 18 6 6 6 2 2 6
39723 - 2 2 6 82 82 82 54 54 54 18 18 18
39724 - 6 6 6 0 0 0 0 0 0 0 0 0
39725 - 0 0 0 0 0 0 0 0 0 0 0 0
39726 - 0 0 0 0 0 0 0 0 0 0 0 0
39727 - 0 0 0 0 0 0 0 0 0 0 0 0
39728 - 0 0 0 0 0 0 0 0 0 0 0 0
39729 - 0 0 0 0 0 0 0 0 0 0 0 0
39730 - 0 0 0 0 0 0 6 6 6 26 26 26
39731 - 62 62 62 106 106 106 74 54 14 185 133 11
39732 -210 162 10 121 92 8 6 6 6 62 62 62
39733 -238 238 238 253 253 253 253 253 253 253 253 253
39734 -253 253 253 253 253 253 253 253 253 253 253 253
39735 -253 253 253 253 253 253 231 231 231 246 246 246
39736 -253 253 253 253 253 253 253 253 253 253 253 253
39737 -253 253 253 253 253 253 253 253 253 253 253 253
39738 -253 253 253 253 253 253 253 253 253 253 253 253
39739 -253 253 253 253 253 253 253 253 253 253 253 253
39740 -253 253 253 253 253 253 158 158 158 18 18 18
39741 - 14 14 14 2 2 6 2 2 6 2 2 6
39742 - 6 6 6 18 18 18 66 66 66 38 38 38
39743 - 6 6 6 94 94 94 50 50 50 18 18 18
39744 - 6 6 6 0 0 0 0 0 0 0 0 0
39745 - 0 0 0 0 0 0 0 0 0 0 0 0
39746 - 0 0 0 0 0 0 0 0 0 0 0 0
39747 - 0 0 0 0 0 0 0 0 0 0 0 0
39748 - 0 0 0 0 0 0 0 0 0 0 0 0
39749 - 0 0 0 0 0 0 0 0 0 6 6 6
39750 - 10 10 10 10 10 10 18 18 18 38 38 38
39751 - 78 78 78 142 134 106 216 158 10 242 186 14
39752 -246 190 14 246 190 14 156 118 10 10 10 10
39753 - 90 90 90 238 238 238 253 253 253 253 253 253
39754 -253 253 253 253 253 253 253 253 253 253 253 253
39755 -253 253 253 253 253 253 231 231 231 250 250 250
39756 -253 253 253 253 253 253 253 253 253 253 253 253
39757 -253 253 253 253 253 253 253 253 253 253 253 253
39758 -253 253 253 253 253 253 253 253 253 253 253 253
39759 -253 253 253 253 253 253 253 253 253 246 230 190
39760 -238 204 91 238 204 91 181 142 44 37 26 9
39761 - 2 2 6 2 2 6 2 2 6 2 2 6
39762 - 2 2 6 2 2 6 38 38 38 46 46 46
39763 - 26 26 26 106 106 106 54 54 54 18 18 18
39764 - 6 6 6 0 0 0 0 0 0 0 0 0
39765 - 0 0 0 0 0 0 0 0 0 0 0 0
39766 - 0 0 0 0 0 0 0 0 0 0 0 0
39767 - 0 0 0 0 0 0 0 0 0 0 0 0
39768 - 0 0 0 0 0 0 0 0 0 0 0 0
39769 - 0 0 0 6 6 6 14 14 14 22 22 22
39770 - 30 30 30 38 38 38 50 50 50 70 70 70
39771 -106 106 106 190 142 34 226 170 11 242 186 14
39772 -246 190 14 246 190 14 246 190 14 154 114 10
39773 - 6 6 6 74 74 74 226 226 226 253 253 253
39774 -253 253 253 253 253 253 253 253 253 253 253 253
39775 -253 253 253 253 253 253 231 231 231 250 250 250
39776 -253 253 253 253 253 253 253 253 253 253 253 253
39777 -253 253 253 253 253 253 253 253 253 253 253 253
39778 -253 253 253 253 253 253 253 253 253 253 253 253
39779 -253 253 253 253 253 253 253 253 253 228 184 62
39780 -241 196 14 241 208 19 232 195 16 38 30 10
39781 - 2 2 6 2 2 6 2 2 6 2 2 6
39782 - 2 2 6 6 6 6 30 30 30 26 26 26
39783 -203 166 17 154 142 90 66 66 66 26 26 26
39784 - 6 6 6 0 0 0 0 0 0 0 0 0
39785 - 0 0 0 0 0 0 0 0 0 0 0 0
39786 - 0 0 0 0 0 0 0 0 0 0 0 0
39787 - 0 0 0 0 0 0 0 0 0 0 0 0
39788 - 0 0 0 0 0 0 0 0 0 0 0 0
39789 - 6 6 6 18 18 18 38 38 38 58 58 58
39790 - 78 78 78 86 86 86 101 101 101 123 123 123
39791 -175 146 61 210 150 10 234 174 13 246 186 14
39792 -246 190 14 246 190 14 246 190 14 238 190 10
39793 -102 78 10 2 2 6 46 46 46 198 198 198
39794 -253 253 253 253 253 253 253 253 253 253 253 253
39795 -253 253 253 253 253 253 234 234 234 242 242 242
39796 -253 253 253 253 253 253 253 253 253 253 253 253
39797 -253 253 253 253 253 253 253 253 253 253 253 253
39798 -253 253 253 253 253 253 253 253 253 253 253 253
39799 -253 253 253 253 253 253 253 253 253 224 178 62
39800 -242 186 14 241 196 14 210 166 10 22 18 6
39801 - 2 2 6 2 2 6 2 2 6 2 2 6
39802 - 2 2 6 2 2 6 6 6 6 121 92 8
39803 -238 202 15 232 195 16 82 82 82 34 34 34
39804 - 10 10 10 0 0 0 0 0 0 0 0 0
39805 - 0 0 0 0 0 0 0 0 0 0 0 0
39806 - 0 0 0 0 0 0 0 0 0 0 0 0
39807 - 0 0 0 0 0 0 0 0 0 0 0 0
39808 - 0 0 0 0 0 0 0 0 0 0 0 0
39809 - 14 14 14 38 38 38 70 70 70 154 122 46
39810 -190 142 34 200 144 11 197 138 11 197 138 11
39811 -213 154 11 226 170 11 242 186 14 246 190 14
39812 -246 190 14 246 190 14 246 190 14 246 190 14
39813 -225 175 15 46 32 6 2 2 6 22 22 22
39814 -158 158 158 250 250 250 253 253 253 253 253 253
39815 -253 253 253 253 253 253 253 253 253 253 253 253
39816 -253 253 253 253 253 253 253 253 253 253 253 253
39817 -253 253 253 253 253 253 253 253 253 253 253 253
39818 -253 253 253 253 253 253 253 253 253 253 253 253
39819 -253 253 253 250 250 250 242 242 242 224 178 62
39820 -239 182 13 236 186 11 213 154 11 46 32 6
39821 - 2 2 6 2 2 6 2 2 6 2 2 6
39822 - 2 2 6 2 2 6 61 42 6 225 175 15
39823 -238 190 10 236 186 11 112 100 78 42 42 42
39824 - 14 14 14 0 0 0 0 0 0 0 0 0
39825 - 0 0 0 0 0 0 0 0 0 0 0 0
39826 - 0 0 0 0 0 0 0 0 0 0 0 0
39827 - 0 0 0 0 0 0 0 0 0 0 0 0
39828 - 0 0 0 0 0 0 0 0 0 6 6 6
39829 - 22 22 22 54 54 54 154 122 46 213 154 11
39830 -226 170 11 230 174 11 226 170 11 226 170 11
39831 -236 178 12 242 186 14 246 190 14 246 190 14
39832 -246 190 14 246 190 14 246 190 14 246 190 14
39833 -241 196 14 184 144 12 10 10 10 2 2 6
39834 - 6 6 6 116 116 116 242 242 242 253 253 253
39835 -253 253 253 253 253 253 253 253 253 253 253 253
39836 -253 253 253 253 253 253 253 253 253 253 253 253
39837 -253 253 253 253 253 253 253 253 253 253 253 253
39838 -253 253 253 253 253 253 253 253 253 253 253 253
39839 -253 253 253 231 231 231 198 198 198 214 170 54
39840 -236 178 12 236 178 12 210 150 10 137 92 6
39841 - 18 14 6 2 2 6 2 2 6 2 2 6
39842 - 6 6 6 70 47 6 200 144 11 236 178 12
39843 -239 182 13 239 182 13 124 112 88 58 58 58
39844 - 22 22 22 6 6 6 0 0 0 0 0 0
39845 - 0 0 0 0 0 0 0 0 0 0 0 0
39846 - 0 0 0 0 0 0 0 0 0 0 0 0
39847 - 0 0 0 0 0 0 0 0 0 0 0 0
39848 - 0 0 0 0 0 0 0 0 0 10 10 10
39849 - 30 30 30 70 70 70 180 133 36 226 170 11
39850 -239 182 13 242 186 14 242 186 14 246 186 14
39851 -246 190 14 246 190 14 246 190 14 246 190 14
39852 -246 190 14 246 190 14 246 190 14 246 190 14
39853 -246 190 14 232 195 16 98 70 6 2 2 6
39854 - 2 2 6 2 2 6 66 66 66 221 221 221
39855 -253 253 253 253 253 253 253 253 253 253 253 253
39856 -253 253 253 253 253 253 253 253 253 253 253 253
39857 -253 253 253 253 253 253 253 253 253 253 253 253
39858 -253 253 253 253 253 253 253 253 253 253 253 253
39859 -253 253 253 206 206 206 198 198 198 214 166 58
39860 -230 174 11 230 174 11 216 158 10 192 133 9
39861 -163 110 8 116 81 8 102 78 10 116 81 8
39862 -167 114 7 197 138 11 226 170 11 239 182 13
39863 -242 186 14 242 186 14 162 146 94 78 78 78
39864 - 34 34 34 14 14 14 6 6 6 0 0 0
39865 - 0 0 0 0 0 0 0 0 0 0 0 0
39866 - 0 0 0 0 0 0 0 0 0 0 0 0
39867 - 0 0 0 0 0 0 0 0 0 0 0 0
39868 - 0 0 0 0 0 0 0 0 0 6 6 6
39869 - 30 30 30 78 78 78 190 142 34 226 170 11
39870 -239 182 13 246 190 14 246 190 14 246 190 14
39871 -246 190 14 246 190 14 246 190 14 246 190 14
39872 -246 190 14 246 190 14 246 190 14 246 190 14
39873 -246 190 14 241 196 14 203 166 17 22 18 6
39874 - 2 2 6 2 2 6 2 2 6 38 38 38
39875 -218 218 218 253 253 253 253 253 253 253 253 253
39876 -253 253 253 253 253 253 253 253 253 253 253 253
39877 -253 253 253 253 253 253 253 253 253 253 253 253
39878 -253 253 253 253 253 253 253 253 253 253 253 253
39879 -250 250 250 206 206 206 198 198 198 202 162 69
39880 -226 170 11 236 178 12 224 166 10 210 150 10
39881 -200 144 11 197 138 11 192 133 9 197 138 11
39882 -210 150 10 226 170 11 242 186 14 246 190 14
39883 -246 190 14 246 186 14 225 175 15 124 112 88
39884 - 62 62 62 30 30 30 14 14 14 6 6 6
39885 - 0 0 0 0 0 0 0 0 0 0 0 0
39886 - 0 0 0 0 0 0 0 0 0 0 0 0
39887 - 0 0 0 0 0 0 0 0 0 0 0 0
39888 - 0 0 0 0 0 0 0 0 0 10 10 10
39889 - 30 30 30 78 78 78 174 135 50 224 166 10
39890 -239 182 13 246 190 14 246 190 14 246 190 14
39891 -246 190 14 246 190 14 246 190 14 246 190 14
39892 -246 190 14 246 190 14 246 190 14 246 190 14
39893 -246 190 14 246 190 14 241 196 14 139 102 15
39894 - 2 2 6 2 2 6 2 2 6 2 2 6
39895 - 78 78 78 250 250 250 253 253 253 253 253 253
39896 -253 253 253 253 253 253 253 253 253 253 253 253
39897 -253 253 253 253 253 253 253 253 253 253 253 253
39898 -253 253 253 253 253 253 253 253 253 253 253 253
39899 -250 250 250 214 214 214 198 198 198 190 150 46
39900 -219 162 10 236 178 12 234 174 13 224 166 10
39901 -216 158 10 213 154 11 213 154 11 216 158 10
39902 -226 170 11 239 182 13 246 190 14 246 190 14
39903 -246 190 14 246 190 14 242 186 14 206 162 42
39904 -101 101 101 58 58 58 30 30 30 14 14 14
39905 - 6 6 6 0 0 0 0 0 0 0 0 0
39906 - 0 0 0 0 0 0 0 0 0 0 0 0
39907 - 0 0 0 0 0 0 0 0 0 0 0 0
39908 - 0 0 0 0 0 0 0 0 0 10 10 10
39909 - 30 30 30 74 74 74 174 135 50 216 158 10
39910 -236 178 12 246 190 14 246 190 14 246 190 14
39911 -246 190 14 246 190 14 246 190 14 246 190 14
39912 -246 190 14 246 190 14 246 190 14 246 190 14
39913 -246 190 14 246 190 14 241 196 14 226 184 13
39914 - 61 42 6 2 2 6 2 2 6 2 2 6
39915 - 22 22 22 238 238 238 253 253 253 253 253 253
39916 -253 253 253 253 253 253 253 253 253 253 253 253
39917 -253 253 253 253 253 253 253 253 253 253 253 253
39918 -253 253 253 253 253 253 253 253 253 253 253 253
39919 -253 253 253 226 226 226 187 187 187 180 133 36
39920 -216 158 10 236 178 12 239 182 13 236 178 12
39921 -230 174 11 226 170 11 226 170 11 230 174 11
39922 -236 178 12 242 186 14 246 190 14 246 190 14
39923 -246 190 14 246 190 14 246 186 14 239 182 13
39924 -206 162 42 106 106 106 66 66 66 34 34 34
39925 - 14 14 14 6 6 6 0 0 0 0 0 0
39926 - 0 0 0 0 0 0 0 0 0 0 0 0
39927 - 0 0 0 0 0 0 0 0 0 0 0 0
39928 - 0 0 0 0 0 0 0 0 0 6 6 6
39929 - 26 26 26 70 70 70 163 133 67 213 154 11
39930 -236 178 12 246 190 14 246 190 14 246 190 14
39931 -246 190 14 246 190 14 246 190 14 246 190 14
39932 -246 190 14 246 190 14 246 190 14 246 190 14
39933 -246 190 14 246 190 14 246 190 14 241 196 14
39934 -190 146 13 18 14 6 2 2 6 2 2 6
39935 - 46 46 46 246 246 246 253 253 253 253 253 253
39936 -253 253 253 253 253 253 253 253 253 253 253 253
39937 -253 253 253 253 253 253 253 253 253 253 253 253
39938 -253 253 253 253 253 253 253 253 253 253 253 253
39939 -253 253 253 221 221 221 86 86 86 156 107 11
39940 -216 158 10 236 178 12 242 186 14 246 186 14
39941 -242 186 14 239 182 13 239 182 13 242 186 14
39942 -242 186 14 246 186 14 246 190 14 246 190 14
39943 -246 190 14 246 190 14 246 190 14 246 190 14
39944 -242 186 14 225 175 15 142 122 72 66 66 66
39945 - 30 30 30 10 10 10 0 0 0 0 0 0
39946 - 0 0 0 0 0 0 0 0 0 0 0 0
39947 - 0 0 0 0 0 0 0 0 0 0 0 0
39948 - 0 0 0 0 0 0 0 0 0 6 6 6
39949 - 26 26 26 70 70 70 163 133 67 210 150 10
39950 -236 178 12 246 190 14 246 190 14 246 190 14
39951 -246 190 14 246 190 14 246 190 14 246 190 14
39952 -246 190 14 246 190 14 246 190 14 246 190 14
39953 -246 190 14 246 190 14 246 190 14 246 190 14
39954 -232 195 16 121 92 8 34 34 34 106 106 106
39955 -221 221 221 253 253 253 253 253 253 253 253 253
39956 -253 253 253 253 253 253 253 253 253 253 253 253
39957 -253 253 253 253 253 253 253 253 253 253 253 253
39958 -253 253 253 253 253 253 253 253 253 253 253 253
39959 -242 242 242 82 82 82 18 14 6 163 110 8
39960 -216 158 10 236 178 12 242 186 14 246 190 14
39961 -246 190 14 246 190 14 246 190 14 246 190 14
39962 -246 190 14 246 190 14 246 190 14 246 190 14
39963 -246 190 14 246 190 14 246 190 14 246 190 14
39964 -246 190 14 246 190 14 242 186 14 163 133 67
39965 - 46 46 46 18 18 18 6 6 6 0 0 0
39966 - 0 0 0 0 0 0 0 0 0 0 0 0
39967 - 0 0 0 0 0 0 0 0 0 0 0 0
39968 - 0 0 0 0 0 0 0 0 0 10 10 10
39969 - 30 30 30 78 78 78 163 133 67 210 150 10
39970 -236 178 12 246 186 14 246 190 14 246 190 14
39971 -246 190 14 246 190 14 246 190 14 246 190 14
39972 -246 190 14 246 190 14 246 190 14 246 190 14
39973 -246 190 14 246 190 14 246 190 14 246 190 14
39974 -241 196 14 215 174 15 190 178 144 253 253 253
39975 -253 253 253 253 253 253 253 253 253 253 253 253
39976 -253 253 253 253 253 253 253 253 253 253 253 253
39977 -253 253 253 253 253 253 253 253 253 253 253 253
39978 -253 253 253 253 253 253 253 253 253 218 218 218
39979 - 58 58 58 2 2 6 22 18 6 167 114 7
39980 -216 158 10 236 178 12 246 186 14 246 190 14
39981 -246 190 14 246 190 14 246 190 14 246 190 14
39982 -246 190 14 246 190 14 246 190 14 246 190 14
39983 -246 190 14 246 190 14 246 190 14 246 190 14
39984 -246 190 14 246 186 14 242 186 14 190 150 46
39985 - 54 54 54 22 22 22 6 6 6 0 0 0
39986 - 0 0 0 0 0 0 0 0 0 0 0 0
39987 - 0 0 0 0 0 0 0 0 0 0 0 0
39988 - 0 0 0 0 0 0 0 0 0 14 14 14
39989 - 38 38 38 86 86 86 180 133 36 213 154 11
39990 -236 178 12 246 186 14 246 190 14 246 190 14
39991 -246 190 14 246 190 14 246 190 14 246 190 14
39992 -246 190 14 246 190 14 246 190 14 246 190 14
39993 -246 190 14 246 190 14 246 190 14 246 190 14
39994 -246 190 14 232 195 16 190 146 13 214 214 214
39995 -253 253 253 253 253 253 253 253 253 253 253 253
39996 -253 253 253 253 253 253 253 253 253 253 253 253
39997 -253 253 253 253 253 253 253 253 253 253 253 253
39998 -253 253 253 250 250 250 170 170 170 26 26 26
39999 - 2 2 6 2 2 6 37 26 9 163 110 8
40000 -219 162 10 239 182 13 246 186 14 246 190 14
40001 -246 190 14 246 190 14 246 190 14 246 190 14
40002 -246 190 14 246 190 14 246 190 14 246 190 14
40003 -246 190 14 246 190 14 246 190 14 246 190 14
40004 -246 186 14 236 178 12 224 166 10 142 122 72
40005 - 46 46 46 18 18 18 6 6 6 0 0 0
40006 - 0 0 0 0 0 0 0 0 0 0 0 0
40007 - 0 0 0 0 0 0 0 0 0 0 0 0
40008 - 0 0 0 0 0 0 6 6 6 18 18 18
40009 - 50 50 50 109 106 95 192 133 9 224 166 10
40010 -242 186 14 246 190 14 246 190 14 246 190 14
40011 -246 190 14 246 190 14 246 190 14 246 190 14
40012 -246 190 14 246 190 14 246 190 14 246 190 14
40013 -246 190 14 246 190 14 246 190 14 246 190 14
40014 -242 186 14 226 184 13 210 162 10 142 110 46
40015 -226 226 226 253 253 253 253 253 253 253 253 253
40016 -253 253 253 253 253 253 253 253 253 253 253 253
40017 -253 253 253 253 253 253 253 253 253 253 253 253
40018 -198 198 198 66 66 66 2 2 6 2 2 6
40019 - 2 2 6 2 2 6 50 34 6 156 107 11
40020 -219 162 10 239 182 13 246 186 14 246 190 14
40021 -246 190 14 246 190 14 246 190 14 246 190 14
40022 -246 190 14 246 190 14 246 190 14 246 190 14
40023 -246 190 14 246 190 14 246 190 14 242 186 14
40024 -234 174 13 213 154 11 154 122 46 66 66 66
40025 - 30 30 30 10 10 10 0 0 0 0 0 0
40026 - 0 0 0 0 0 0 0 0 0 0 0 0
40027 - 0 0 0 0 0 0 0 0 0 0 0 0
40028 - 0 0 0 0 0 0 6 6 6 22 22 22
40029 - 58 58 58 154 121 60 206 145 10 234 174 13
40030 -242 186 14 246 186 14 246 190 14 246 190 14
40031 -246 190 14 246 190 14 246 190 14 246 190 14
40032 -246 190 14 246 190 14 246 190 14 246 190 14
40033 -246 190 14 246 190 14 246 190 14 246 190 14
40034 -246 186 14 236 178 12 210 162 10 163 110 8
40035 - 61 42 6 138 138 138 218 218 218 250 250 250
40036 -253 253 253 253 253 253 253 253 253 250 250 250
40037 -242 242 242 210 210 210 144 144 144 66 66 66
40038 - 6 6 6 2 2 6 2 2 6 2 2 6
40039 - 2 2 6 2 2 6 61 42 6 163 110 8
40040 -216 158 10 236 178 12 246 190 14 246 190 14
40041 -246 190 14 246 190 14 246 190 14 246 190 14
40042 -246 190 14 246 190 14 246 190 14 246 190 14
40043 -246 190 14 239 182 13 230 174 11 216 158 10
40044 -190 142 34 124 112 88 70 70 70 38 38 38
40045 - 18 18 18 6 6 6 0 0 0 0 0 0
40046 - 0 0 0 0 0 0 0 0 0 0 0 0
40047 - 0 0 0 0 0 0 0 0 0 0 0 0
40048 - 0 0 0 0 0 0 6 6 6 22 22 22
40049 - 62 62 62 168 124 44 206 145 10 224 166 10
40050 -236 178 12 239 182 13 242 186 14 242 186 14
40051 -246 186 14 246 190 14 246 190 14 246 190 14
40052 -246 190 14 246 190 14 246 190 14 246 190 14
40053 -246 190 14 246 190 14 246 190 14 246 190 14
40054 -246 190 14 236 178 12 216 158 10 175 118 6
40055 - 80 54 7 2 2 6 6 6 6 30 30 30
40056 - 54 54 54 62 62 62 50 50 50 38 38 38
40057 - 14 14 14 2 2 6 2 2 6 2 2 6
40058 - 2 2 6 2 2 6 2 2 6 2 2 6
40059 - 2 2 6 6 6 6 80 54 7 167 114 7
40060 -213 154 11 236 178 12 246 190 14 246 190 14
40061 -246 190 14 246 190 14 246 190 14 246 190 14
40062 -246 190 14 242 186 14 239 182 13 239 182 13
40063 -230 174 11 210 150 10 174 135 50 124 112 88
40064 - 82 82 82 54 54 54 34 34 34 18 18 18
40065 - 6 6 6 0 0 0 0 0 0 0 0 0
40066 - 0 0 0 0 0 0 0 0 0 0 0 0
40067 - 0 0 0 0 0 0 0 0 0 0 0 0
40068 - 0 0 0 0 0 0 6 6 6 18 18 18
40069 - 50 50 50 158 118 36 192 133 9 200 144 11
40070 -216 158 10 219 162 10 224 166 10 226 170 11
40071 -230 174 11 236 178 12 239 182 13 239 182 13
40072 -242 186 14 246 186 14 246 190 14 246 190 14
40073 -246 190 14 246 190 14 246 190 14 246 190 14
40074 -246 186 14 230 174 11 210 150 10 163 110 8
40075 -104 69 6 10 10 10 2 2 6 2 2 6
40076 - 2 2 6 2 2 6 2 2 6 2 2 6
40077 - 2 2 6 2 2 6 2 2 6 2 2 6
40078 - 2 2 6 2 2 6 2 2 6 2 2 6
40079 - 2 2 6 6 6 6 91 60 6 167 114 7
40080 -206 145 10 230 174 11 242 186 14 246 190 14
40081 -246 190 14 246 190 14 246 186 14 242 186 14
40082 -239 182 13 230 174 11 224 166 10 213 154 11
40083 -180 133 36 124 112 88 86 86 86 58 58 58
40084 - 38 38 38 22 22 22 10 10 10 6 6 6
40085 - 0 0 0 0 0 0 0 0 0 0 0 0
40086 - 0 0 0 0 0 0 0 0 0 0 0 0
40087 - 0 0 0 0 0 0 0 0 0 0 0 0
40088 - 0 0 0 0 0 0 0 0 0 14 14 14
40089 - 34 34 34 70 70 70 138 110 50 158 118 36
40090 -167 114 7 180 123 7 192 133 9 197 138 11
40091 -200 144 11 206 145 10 213 154 11 219 162 10
40092 -224 166 10 230 174 11 239 182 13 242 186 14
40093 -246 186 14 246 186 14 246 186 14 246 186 14
40094 -239 182 13 216 158 10 185 133 11 152 99 6
40095 -104 69 6 18 14 6 2 2 6 2 2 6
40096 - 2 2 6 2 2 6 2 2 6 2 2 6
40097 - 2 2 6 2 2 6 2 2 6 2 2 6
40098 - 2 2 6 2 2 6 2 2 6 2 2 6
40099 - 2 2 6 6 6 6 80 54 7 152 99 6
40100 -192 133 9 219 162 10 236 178 12 239 182 13
40101 -246 186 14 242 186 14 239 182 13 236 178 12
40102 -224 166 10 206 145 10 192 133 9 154 121 60
40103 - 94 94 94 62 62 62 42 42 42 22 22 22
40104 - 14 14 14 6 6 6 0 0 0 0 0 0
40105 - 0 0 0 0 0 0 0 0 0 0 0 0
40106 - 0 0 0 0 0 0 0 0 0 0 0 0
40107 - 0 0 0 0 0 0 0 0 0 0 0 0
40108 - 0 0 0 0 0 0 0 0 0 6 6 6
40109 - 18 18 18 34 34 34 58 58 58 78 78 78
40110 -101 98 89 124 112 88 142 110 46 156 107 11
40111 -163 110 8 167 114 7 175 118 6 180 123 7
40112 -185 133 11 197 138 11 210 150 10 219 162 10
40113 -226 170 11 236 178 12 236 178 12 234 174 13
40114 -219 162 10 197 138 11 163 110 8 130 83 6
40115 - 91 60 6 10 10 10 2 2 6 2 2 6
40116 - 18 18 18 38 38 38 38 38 38 38 38 38
40117 - 38 38 38 38 38 38 38 38 38 38 38 38
40118 - 38 38 38 38 38 38 26 26 26 2 2 6
40119 - 2 2 6 6 6 6 70 47 6 137 92 6
40120 -175 118 6 200 144 11 219 162 10 230 174 11
40121 -234 174 13 230 174 11 219 162 10 210 150 10
40122 -192 133 9 163 110 8 124 112 88 82 82 82
40123 - 50 50 50 30 30 30 14 14 14 6 6 6
40124 - 0 0 0 0 0 0 0 0 0 0 0 0
40125 - 0 0 0 0 0 0 0 0 0 0 0 0
40126 - 0 0 0 0 0 0 0 0 0 0 0 0
40127 - 0 0 0 0 0 0 0 0 0 0 0 0
40128 - 0 0 0 0 0 0 0 0 0 0 0 0
40129 - 6 6 6 14 14 14 22 22 22 34 34 34
40130 - 42 42 42 58 58 58 74 74 74 86 86 86
40131 -101 98 89 122 102 70 130 98 46 121 87 25
40132 -137 92 6 152 99 6 163 110 8 180 123 7
40133 -185 133 11 197 138 11 206 145 10 200 144 11
40134 -180 123 7 156 107 11 130 83 6 104 69 6
40135 - 50 34 6 54 54 54 110 110 110 101 98 89
40136 - 86 86 86 82 82 82 78 78 78 78 78 78
40137 - 78 78 78 78 78 78 78 78 78 78 78 78
40138 - 78 78 78 82 82 82 86 86 86 94 94 94
40139 -106 106 106 101 101 101 86 66 34 124 80 6
40140 -156 107 11 180 123 7 192 133 9 200 144 11
40141 -206 145 10 200 144 11 192 133 9 175 118 6
40142 -139 102 15 109 106 95 70 70 70 42 42 42
40143 - 22 22 22 10 10 10 0 0 0 0 0 0
40144 - 0 0 0 0 0 0 0 0 0 0 0 0
40145 - 0 0 0 0 0 0 0 0 0 0 0 0
40146 - 0 0 0 0 0 0 0 0 0 0 0 0
40147 - 0 0 0 0 0 0 0 0 0 0 0 0
40148 - 0 0 0 0 0 0 0 0 0 0 0 0
40149 - 0 0 0 0 0 0 6 6 6 10 10 10
40150 - 14 14 14 22 22 22 30 30 30 38 38 38
40151 - 50 50 50 62 62 62 74 74 74 90 90 90
40152 -101 98 89 112 100 78 121 87 25 124 80 6
40153 -137 92 6 152 99 6 152 99 6 152 99 6
40154 -138 86 6 124 80 6 98 70 6 86 66 30
40155 -101 98 89 82 82 82 58 58 58 46 46 46
40156 - 38 38 38 34 34 34 34 34 34 34 34 34
40157 - 34 34 34 34 34 34 34 34 34 34 34 34
40158 - 34 34 34 34 34 34 38 38 38 42 42 42
40159 - 54 54 54 82 82 82 94 86 76 91 60 6
40160 -134 86 6 156 107 11 167 114 7 175 118 6
40161 -175 118 6 167 114 7 152 99 6 121 87 25
40162 -101 98 89 62 62 62 34 34 34 18 18 18
40163 - 6 6 6 0 0 0 0 0 0 0 0 0
40164 - 0 0 0 0 0 0 0 0 0 0 0 0
40165 - 0 0 0 0 0 0 0 0 0 0 0 0
40166 - 0 0 0 0 0 0 0 0 0 0 0 0
40167 - 0 0 0 0 0 0 0 0 0 0 0 0
40168 - 0 0 0 0 0 0 0 0 0 0 0 0
40169 - 0 0 0 0 0 0 0 0 0 0 0 0
40170 - 0 0 0 6 6 6 6 6 6 10 10 10
40171 - 18 18 18 22 22 22 30 30 30 42 42 42
40172 - 50 50 50 66 66 66 86 86 86 101 98 89
40173 -106 86 58 98 70 6 104 69 6 104 69 6
40174 -104 69 6 91 60 6 82 62 34 90 90 90
40175 - 62 62 62 38 38 38 22 22 22 14 14 14
40176 - 10 10 10 10 10 10 10 10 10 10 10 10
40177 - 10 10 10 10 10 10 6 6 6 10 10 10
40178 - 10 10 10 10 10 10 10 10 10 14 14 14
40179 - 22 22 22 42 42 42 70 70 70 89 81 66
40180 - 80 54 7 104 69 6 124 80 6 137 92 6
40181 -134 86 6 116 81 8 100 82 52 86 86 86
40182 - 58 58 58 30 30 30 14 14 14 6 6 6
40183 - 0 0 0 0 0 0 0 0 0 0 0 0
40184 - 0 0 0 0 0 0 0 0 0 0 0 0
40185 - 0 0 0 0 0 0 0 0 0 0 0 0
40186 - 0 0 0 0 0 0 0 0 0 0 0 0
40187 - 0 0 0 0 0 0 0 0 0 0 0 0
40188 - 0 0 0 0 0 0 0 0 0 0 0 0
40189 - 0 0 0 0 0 0 0 0 0 0 0 0
40190 - 0 0 0 0 0 0 0 0 0 0 0 0
40191 - 0 0 0 6 6 6 10 10 10 14 14 14
40192 - 18 18 18 26 26 26 38 38 38 54 54 54
40193 - 70 70 70 86 86 86 94 86 76 89 81 66
40194 - 89 81 66 86 86 86 74 74 74 50 50 50
40195 - 30 30 30 14 14 14 6 6 6 0 0 0
40196 - 0 0 0 0 0 0 0 0 0 0 0 0
40197 - 0 0 0 0 0 0 0 0 0 0 0 0
40198 - 0 0 0 0 0 0 0 0 0 0 0 0
40199 - 6 6 6 18 18 18 34 34 34 58 58 58
40200 - 82 82 82 89 81 66 89 81 66 89 81 66
40201 - 94 86 66 94 86 76 74 74 74 50 50 50
40202 - 26 26 26 14 14 14 6 6 6 0 0 0
40203 - 0 0 0 0 0 0 0 0 0 0 0 0
40204 - 0 0 0 0 0 0 0 0 0 0 0 0
40205 - 0 0 0 0 0 0 0 0 0 0 0 0
40206 - 0 0 0 0 0 0 0 0 0 0 0 0
40207 - 0 0 0 0 0 0 0 0 0 0 0 0
40208 - 0 0 0 0 0 0 0 0 0 0 0 0
40209 - 0 0 0 0 0 0 0 0 0 0 0 0
40210 - 0 0 0 0 0 0 0 0 0 0 0 0
40211 - 0 0 0 0 0 0 0 0 0 0 0 0
40212 - 6 6 6 6 6 6 14 14 14 18 18 18
40213 - 30 30 30 38 38 38 46 46 46 54 54 54
40214 - 50 50 50 42 42 42 30 30 30 18 18 18
40215 - 10 10 10 0 0 0 0 0 0 0 0 0
40216 - 0 0 0 0 0 0 0 0 0 0 0 0
40217 - 0 0 0 0 0 0 0 0 0 0 0 0
40218 - 0 0 0 0 0 0 0 0 0 0 0 0
40219 - 0 0 0 6 6 6 14 14 14 26 26 26
40220 - 38 38 38 50 50 50 58 58 58 58 58 58
40221 - 54 54 54 42 42 42 30 30 30 18 18 18
40222 - 10 10 10 0 0 0 0 0 0 0 0 0
40223 - 0 0 0 0 0 0 0 0 0 0 0 0
40224 - 0 0 0 0 0 0 0 0 0 0 0 0
40225 - 0 0 0 0 0 0 0 0 0 0 0 0
40226 - 0 0 0 0 0 0 0 0 0 0 0 0
40227 - 0 0 0 0 0 0 0 0 0 0 0 0
40228 - 0 0 0 0 0 0 0 0 0 0 0 0
40229 - 0 0 0 0 0 0 0 0 0 0 0 0
40230 - 0 0 0 0 0 0 0 0 0 0 0 0
40231 - 0 0 0 0 0 0 0 0 0 0 0 0
40232 - 0 0 0 0 0 0 0 0 0 6 6 6
40233 - 6 6 6 10 10 10 14 14 14 18 18 18
40234 - 18 18 18 14 14 14 10 10 10 6 6 6
40235 - 0 0 0 0 0 0 0 0 0 0 0 0
40236 - 0 0 0 0 0 0 0 0 0 0 0 0
40237 - 0 0 0 0 0 0 0 0 0 0 0 0
40238 - 0 0 0 0 0 0 0 0 0 0 0 0
40239 - 0 0 0 0 0 0 0 0 0 6 6 6
40240 - 14 14 14 18 18 18 22 22 22 22 22 22
40241 - 18 18 18 14 14 14 10 10 10 6 6 6
40242 - 0 0 0 0 0 0 0 0 0 0 0 0
40243 - 0 0 0 0 0 0 0 0 0 0 0 0
40244 - 0 0 0 0 0 0 0 0 0 0 0 0
40245 - 0 0 0 0 0 0 0 0 0 0 0 0
40246 - 0 0 0 0 0 0 0 0 0 0 0 0
40247 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40248 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40249 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40250 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40251 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40252 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40253 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40254 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40255 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40256 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40257 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40258 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40259 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40260 +4 4 4 4 4 4
40261 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40262 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40263 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40264 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40265 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40266 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40267 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40268 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40269 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40270 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40271 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40272 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40273 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40274 +4 4 4 4 4 4
40275 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40276 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40277 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40278 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40279 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40280 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40281 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40282 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40283 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40284 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40285 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40286 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40287 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40288 +4 4 4 4 4 4
40289 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40290 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40291 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40292 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40293 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40294 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40295 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40296 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40297 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40298 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40299 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40300 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40301 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40302 +4 4 4 4 4 4
40303 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40304 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40305 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40306 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40307 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40308 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40309 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40310 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40311 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40312 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40313 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40314 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40315 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40316 +4 4 4 4 4 4
40317 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40318 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40319 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40320 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40321 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40322 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40323 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40324 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40325 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40326 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40327 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40328 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40329 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40330 +4 4 4 4 4 4
40331 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40332 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40333 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40334 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40335 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
40336 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
40337 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40338 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40339 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40340 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
40341 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40342 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
40343 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40344 +4 4 4 4 4 4
40345 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40346 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40347 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40348 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40349 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
40350 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
40351 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40352 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40353 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40354 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
40355 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
40356 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
40357 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40358 +4 4 4 4 4 4
40359 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40360 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40361 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40362 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40363 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
40364 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
40365 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40366 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40367 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40368 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
40369 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
40370 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
40371 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
40372 +4 4 4 4 4 4
40373 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40374 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40375 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40376 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
40377 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
40378 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
40379 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
40380 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40381 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
40382 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
40383 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
40384 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
40385 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
40386 +4 4 4 4 4 4
40387 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40388 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40389 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40390 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
40391 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
40392 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
40393 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
40394 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40395 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
40396 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
40397 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
40398 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
40399 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
40400 +4 4 4 4 4 4
40401 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40402 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40403 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40404 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
40405 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
40406 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
40407 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
40408 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
40409 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
40410 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
40411 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
40412 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
40413 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
40414 +4 4 4 4 4 4
40415 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40416 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40417 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
40418 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
40419 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
40420 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
40421 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
40422 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
40423 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
40424 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
40425 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
40426 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
40427 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
40428 +4 4 4 4 4 4
40429 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40430 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40431 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
40432 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
40433 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
40434 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
40435 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
40436 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
40437 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
40438 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
40439 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
40440 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
40441 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
40442 +4 4 4 4 4 4
40443 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40444 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40445 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
40446 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
40447 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
40448 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
40449 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
40450 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
40451 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
40452 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
40453 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
40454 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
40455 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40456 +4 4 4 4 4 4
40457 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40458 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40459 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
40460 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
40461 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
40462 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
40463 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
40464 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
40465 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
40466 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
40467 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
40468 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
40469 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
40470 +4 4 4 4 4 4
40471 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40472 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
40473 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
40474 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
40475 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
40476 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
40477 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
40478 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
40479 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
40480 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
40481 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
40482 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
40483 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
40484 +4 4 4 4 4 4
40485 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40486 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
40487 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
40488 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
40489 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
40490 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
40491 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
40492 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
40493 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
40494 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
40495 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
40496 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
40497 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
40498 +0 0 0 4 4 4
40499 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
40500 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
40501 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
40502 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
40503 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
40504 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
40505 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
40506 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
40507 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
40508 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
40509 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
40510 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
40511 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
40512 +2 0 0 0 0 0
40513 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
40514 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
40515 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
40516 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
40517 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
40518 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
40519 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
40520 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
40521 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
40522 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
40523 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
40524 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
40525 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
40526 +37 38 37 0 0 0
40527 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
40528 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
40529 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
40530 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
40531 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
40532 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
40533 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
40534 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
40535 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
40536 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
40537 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
40538 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
40539 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
40540 +85 115 134 4 0 0
40541 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
40542 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
40543 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
40544 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
40545 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
40546 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
40547 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
40548 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
40549 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
40550 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
40551 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
40552 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
40553 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
40554 +60 73 81 4 0 0
40555 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
40556 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
40557 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
40558 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
40559 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
40560 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
40561 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
40562 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
40563 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
40564 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
40565 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
40566 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
40567 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
40568 +16 19 21 4 0 0
40569 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
40570 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
40571 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
40572 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
40573 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
40574 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
40575 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
40576 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
40577 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
40578 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
40579 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
40580 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
40581 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
40582 +4 0 0 4 3 3
40583 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
40584 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
40585 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
40586 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
40587 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
40588 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
40589 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
40590 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
40591 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
40592 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
40593 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
40594 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
40595 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
40596 +3 2 2 4 4 4
40597 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
40598 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
40599 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
40600 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
40601 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
40602 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
40603 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
40604 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
40605 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
40606 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
40607 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
40608 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
40609 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
40610 +4 4 4 4 4 4
40611 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
40612 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
40613 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
40614 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
40615 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
40616 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
40617 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
40618 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
40619 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
40620 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
40621 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
40622 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
40623 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
40624 +4 4 4 4 4 4
40625 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
40626 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
40627 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
40628 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
40629 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
40630 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
40631 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
40632 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
40633 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
40634 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
40635 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
40636 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
40637 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
40638 +5 5 5 5 5 5
40639 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
40640 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
40641 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
40642 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
40643 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
40644 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40645 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
40646 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
40647 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
40648 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
40649 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
40650 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
40651 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
40652 +5 5 5 4 4 4
40653 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
40654 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
40655 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
40656 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
40657 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40658 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
40659 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
40660 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
40661 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
40662 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
40663 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
40664 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
40665 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40666 +4 4 4 4 4 4
40667 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
40668 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
40669 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
40670 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
40671 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
40672 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40673 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40674 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
40675 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
40676 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
40677 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
40678 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
40679 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40680 +4 4 4 4 4 4
40681 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
40682 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
40683 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
40684 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
40685 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40686 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
40687 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
40688 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
40689 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
40690 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
40691 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
40692 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40693 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40694 +4 4 4 4 4 4
40695 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
40696 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
40697 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
40698 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
40699 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40700 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40701 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40702 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
40703 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
40704 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
40705 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
40706 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40707 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40708 +4 4 4 4 4 4
40709 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
40710 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
40711 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
40712 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
40713 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40714 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
40715 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
40716 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
40717 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
40718 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
40719 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40720 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40721 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40722 +4 4 4 4 4 4
40723 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
40724 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
40725 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
40726 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
40727 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40728 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
40729 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
40730 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
40731 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
40732 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
40733 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
40734 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40735 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40736 +4 4 4 4 4 4
40737 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
40738 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
40739 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
40740 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
40741 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40742 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
40743 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
40744 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
40745 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
40746 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
40747 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
40748 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40749 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40750 +4 4 4 4 4 4
40751 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
40752 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
40753 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
40754 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
40755 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
40756 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
40757 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
40758 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
40759 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
40760 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
40761 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40762 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40763 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40764 +4 4 4 4 4 4
40765 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
40766 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
40767 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
40768 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
40769 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40770 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
40771 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
40772 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
40773 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
40774 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
40775 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40776 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40777 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40778 +4 4 4 4 4 4
40779 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
40780 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
40781 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
40782 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
40783 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40784 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
40785 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
40786 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
40787 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
40788 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
40789 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40790 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40791 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40792 +4 4 4 4 4 4
40793 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
40794 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
40795 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40796 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
40797 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40798 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
40799 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
40800 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
40801 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40802 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40803 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40804 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40805 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40806 +4 4 4 4 4 4
40807 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40808 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
40809 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
40810 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
40811 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
40812 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
40813 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
40814 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
40815 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40816 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40817 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40818 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40819 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40820 +4 4 4 4 4 4
40821 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40822 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
40823 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40824 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
40825 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40826 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
40827 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
40828 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
40829 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40830 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40831 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40832 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40833 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40834 +4 4 4 4 4 4
40835 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
40836 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
40837 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40838 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
40839 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
40840 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
40841 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
40842 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
40843 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40844 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40845 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40846 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40847 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40848 +4 4 4 4 4 4
40849 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
40850 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
40851 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40852 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
40853 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
40854 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
40855 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
40856 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
40857 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40858 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40859 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40860 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40861 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40862 +4 4 4 4 4 4
40863 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40864 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
40865 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40866 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
40867 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
40868 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
40869 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
40870 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
40871 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40872 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40873 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40874 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40875 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40876 +4 4 4 4 4 4
40877 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
40878 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
40879 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40880 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
40881 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
40882 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
40883 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
40884 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
40885 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
40886 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40887 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40888 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40889 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40890 +4 4 4 4 4 4
40891 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40892 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
40893 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
40894 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
40895 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
40896 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
40897 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
40898 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
40899 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40900 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40901 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40902 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40903 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40904 +4 4 4 4 4 4
40905 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40906 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
40907 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40908 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
40909 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
40910 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
40911 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
40912 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
40913 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40914 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40915 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40916 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40917 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40918 +4 4 4 4 4 4
40919 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
40920 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
40921 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
40922 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
40923 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
40924 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
40925 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40926 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
40927 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40928 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40929 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40930 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40931 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40932 +4 4 4 4 4 4
40933 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40934 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
40935 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
40936 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
40937 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
40938 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
40939 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40940 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
40941 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40942 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40943 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40944 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40945 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40946 +4 4 4 4 4 4
40947 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
40948 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
40949 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
40950 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
40951 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
40952 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
40953 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
40954 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
40955 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
40956 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40957 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40958 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40959 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40960 +4 4 4 4 4 4
40961 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40962 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
40963 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
40964 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
40965 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
40966 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
40967 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
40968 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
40969 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
40970 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40971 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40972 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40973 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40974 +4 4 4 4 4 4
40975 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
40976 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
40977 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
40978 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
40979 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
40980 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
40981 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
40982 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
40983 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
40984 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40985 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40986 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40987 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40988 +4 4 4 4 4 4
40989 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40990 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
40991 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
40992 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
40993 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
40994 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
40995 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
40996 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
40997 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
40998 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40999 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41000 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41001 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41002 +4 4 4 4 4 4
41003 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
41004 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
41005 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
41006 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
41007 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
41008 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
41009 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
41010 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
41011 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
41012 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
41013 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41014 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41015 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41016 +4 4 4 4 4 4
41017 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
41018 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
41019 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
41020 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
41021 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
41022 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
41023 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
41024 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
41025 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
41026 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
41027 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41028 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41029 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41030 +4 4 4 4 4 4
41031 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
41032 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
41033 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
41034 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
41035 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
41036 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
41037 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41038 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
41039 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
41040 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
41041 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41042 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41043 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41044 +4 4 4 4 4 4
41045 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
41046 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
41047 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
41048 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
41049 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
41050 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
41051 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
41052 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
41053 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
41054 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
41055 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41056 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41057 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41058 +4 4 4 4 4 4
41059 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
41060 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
41061 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
41062 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
41063 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
41064 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
41065 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
41066 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
41067 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
41068 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
41069 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41070 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41071 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41072 +4 4 4 4 4 4
41073 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41074 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
41075 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
41076 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
41077 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
41078 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
41079 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
41080 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
41081 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
41082 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
41083 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41084 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41085 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41086 +4 4 4 4 4 4
41087 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
41088 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
41089 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
41090 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
41091 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
41092 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
41093 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
41094 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
41095 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
41096 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
41097 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41098 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41099 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41100 +4 4 4 4 4 4
41101 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
41102 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
41103 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
41104 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
41105 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
41106 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
41107 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
41108 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
41109 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
41110 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41111 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41112 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41113 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41114 +4 4 4 4 4 4
41115 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
41116 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41117 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
41118 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
41119 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
41120 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
41121 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
41122 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
41123 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
41124 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41125 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41126 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41127 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41128 +4 4 4 4 4 4
41129 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
41130 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
41131 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
41132 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
41133 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
41134 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
41135 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
41136 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
41137 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
41138 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41139 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41140 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41141 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41142 +4 4 4 4 4 4
41143 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
41144 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
41145 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
41146 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
41147 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
41148 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
41149 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
41150 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
41151 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41152 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41153 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41154 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41155 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41156 +4 4 4 4 4 4
41157 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
41158 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
41159 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
41160 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
41161 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
41162 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
41163 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
41164 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
41165 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41166 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41167 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41168 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41169 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41170 +4 4 4 4 4 4
41171 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
41172 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
41173 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
41174 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
41175 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
41176 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
41177 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
41178 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
41179 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41180 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41181 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41182 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41183 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41184 +4 4 4 4 4 4
41185 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41186 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
41187 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41188 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
41189 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
41190 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
41191 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
41192 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
41193 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41194 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41195 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41196 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41197 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41198 +4 4 4 4 4 4
41199 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41200 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
41201 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
41202 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
41203 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
41204 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
41205 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
41206 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
41207 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41208 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41209 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41210 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41211 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41212 +4 4 4 4 4 4
41213 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41214 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
41215 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
41216 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
41217 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
41218 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
41219 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
41220 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41221 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41222 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41223 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41224 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41225 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41226 +4 4 4 4 4 4
41227 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41228 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41229 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
41230 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
41231 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
41232 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
41233 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
41234 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41235 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41236 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41237 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41238 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41239 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41240 +4 4 4 4 4 4
41241 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41242 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41243 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41244 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41245 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
41246 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
41247 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
41248 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41249 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41250 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41251 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41252 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41253 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41254 +4 4 4 4 4 4
41255 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41256 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41257 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41258 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
41259 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
41260 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
41261 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
41262 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41263 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41264 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41265 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41266 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41267 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41268 +4 4 4 4 4 4
41269 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41270 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41271 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41272 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
41273 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41274 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
41275 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
41276 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41277 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41278 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41279 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41280 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41281 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41282 +4 4 4 4 4 4
41283 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41284 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41285 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41286 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
41287 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
41288 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
41289 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
41290 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41291 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41292 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41293 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41294 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41295 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41296 +4 4 4 4 4 4
41297 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41298 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41299 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41300 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41301 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
41302 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41303 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41304 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41305 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41306 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41307 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41308 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41309 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41310 +4 4 4 4 4 4
41311 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41312 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41313 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41314 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41315 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
41316 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
41317 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
41318 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41319 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41320 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41321 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41322 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41323 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41324 +4 4 4 4 4 4
41325 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41326 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41327 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41328 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41329 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
41330 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
41331 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41332 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41333 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41334 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41335 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41336 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41337 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41338 +4 4 4 4 4 4
41339 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41340 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41341 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41342 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41343 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
41344 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
41345 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41346 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41347 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41348 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41349 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41350 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41351 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41352 +4 4 4 4 4 4
41353 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41354 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41355 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41356 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41357 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
41358 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
41359 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41360 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41361 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41362 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41363 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41364 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41365 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41366 +4 4 4 4 4 4
41367 diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
41368 index a159b63..4ab532d 100644
41369 --- a/drivers/video/udlfb.c
41370 +++ b/drivers/video/udlfb.c
41371 @@ -620,11 +620,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
41372 dlfb_urb_completion(urb);
41373
41374 error:
41375 - atomic_add(bytes_sent, &dev->bytes_sent);
41376 - atomic_add(bytes_identical, &dev->bytes_identical);
41377 - atomic_add(width*height*2, &dev->bytes_rendered);
41378 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
41379 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
41380 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
41381 end_cycles = get_cycles();
41382 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
41383 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
41384 >> 10)), /* Kcycles */
41385 &dev->cpu_kcycles_used);
41386
41387 @@ -745,11 +745,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
41388 dlfb_urb_completion(urb);
41389
41390 error:
41391 - atomic_add(bytes_sent, &dev->bytes_sent);
41392 - atomic_add(bytes_identical, &dev->bytes_identical);
41393 - atomic_add(bytes_rendered, &dev->bytes_rendered);
41394 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
41395 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
41396 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
41397 end_cycles = get_cycles();
41398 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
41399 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
41400 >> 10)), /* Kcycles */
41401 &dev->cpu_kcycles_used);
41402 }
41403 @@ -1373,7 +1373,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
41404 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41405 struct dlfb_data *dev = fb_info->par;
41406 return snprintf(buf, PAGE_SIZE, "%u\n",
41407 - atomic_read(&dev->bytes_rendered));
41408 + atomic_read_unchecked(&dev->bytes_rendered));
41409 }
41410
41411 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
41412 @@ -1381,7 +1381,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
41413 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41414 struct dlfb_data *dev = fb_info->par;
41415 return snprintf(buf, PAGE_SIZE, "%u\n",
41416 - atomic_read(&dev->bytes_identical));
41417 + atomic_read_unchecked(&dev->bytes_identical));
41418 }
41419
41420 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
41421 @@ -1389,7 +1389,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
41422 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41423 struct dlfb_data *dev = fb_info->par;
41424 return snprintf(buf, PAGE_SIZE, "%u\n",
41425 - atomic_read(&dev->bytes_sent));
41426 + atomic_read_unchecked(&dev->bytes_sent));
41427 }
41428
41429 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
41430 @@ -1397,7 +1397,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
41431 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41432 struct dlfb_data *dev = fb_info->par;
41433 return snprintf(buf, PAGE_SIZE, "%u\n",
41434 - atomic_read(&dev->cpu_kcycles_used));
41435 + atomic_read_unchecked(&dev->cpu_kcycles_used));
41436 }
41437
41438 static ssize_t edid_show(
41439 @@ -1457,10 +1457,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
41440 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41441 struct dlfb_data *dev = fb_info->par;
41442
41443 - atomic_set(&dev->bytes_rendered, 0);
41444 - atomic_set(&dev->bytes_identical, 0);
41445 - atomic_set(&dev->bytes_sent, 0);
41446 - atomic_set(&dev->cpu_kcycles_used, 0);
41447 + atomic_set_unchecked(&dev->bytes_rendered, 0);
41448 + atomic_set_unchecked(&dev->bytes_identical, 0);
41449 + atomic_set_unchecked(&dev->bytes_sent, 0);
41450 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
41451
41452 return count;
41453 }
41454 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
41455 index b0e2a42..e2df3ad 100644
41456 --- a/drivers/video/uvesafb.c
41457 +++ b/drivers/video/uvesafb.c
41458 @@ -19,6 +19,7 @@
41459 #include <linux/io.h>
41460 #include <linux/mutex.h>
41461 #include <linux/slab.h>
41462 +#include <linux/moduleloader.h>
41463 #include <video/edid.h>
41464 #include <video/uvesafb.h>
41465 #ifdef CONFIG_X86
41466 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
41467 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
41468 par->pmi_setpal = par->ypan = 0;
41469 } else {
41470 +
41471 +#ifdef CONFIG_PAX_KERNEXEC
41472 +#ifdef CONFIG_MODULES
41473 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
41474 +#endif
41475 + if (!par->pmi_code) {
41476 + par->pmi_setpal = par->ypan = 0;
41477 + return 0;
41478 + }
41479 +#endif
41480 +
41481 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
41482 + task->t.regs.edi);
41483 +
41484 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41485 + pax_open_kernel();
41486 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
41487 + pax_close_kernel();
41488 +
41489 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
41490 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
41491 +#else
41492 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
41493 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
41494 +#endif
41495 +
41496 printk(KERN_INFO "uvesafb: protected mode interface info at "
41497 "%04x:%04x\n",
41498 (u16)task->t.regs.es, (u16)task->t.regs.edi);
41499 @@ -816,13 +839,14 @@ static int __devinit uvesafb_vbe_init(struct fb_info *info)
41500 par->ypan = ypan;
41501
41502 if (par->pmi_setpal || par->ypan) {
41503 +#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
41504 if (__supported_pte_mask & _PAGE_NX) {
41505 par->pmi_setpal = par->ypan = 0;
41506 printk(KERN_WARNING "uvesafb: NX protection is actively."
41507 "We have better not to use the PMI.\n");
41508 - } else {
41509 + } else
41510 +#endif
41511 uvesafb_vbe_getpmi(task, par);
41512 - }
41513 }
41514 #else
41515 /* The protected mode interface is not available on non-x86. */
41516 @@ -1836,6 +1860,11 @@ out:
41517 if (par->vbe_modes)
41518 kfree(par->vbe_modes);
41519
41520 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41521 + if (par->pmi_code)
41522 + module_free_exec(NULL, par->pmi_code);
41523 +#endif
41524 +
41525 framebuffer_release(info);
41526 return err;
41527 }
41528 @@ -1862,6 +1891,12 @@ static int uvesafb_remove(struct platform_device *dev)
41529 kfree(par->vbe_state_orig);
41530 if (par->vbe_state_saved)
41531 kfree(par->vbe_state_saved);
41532 +
41533 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41534 + if (par->pmi_code)
41535 + module_free_exec(NULL, par->pmi_code);
41536 +#endif
41537 +
41538 }
41539
41540 framebuffer_release(info);
41541 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
41542 index 501b340..86bd4cf 100644
41543 --- a/drivers/video/vesafb.c
41544 +++ b/drivers/video/vesafb.c
41545 @@ -9,6 +9,7 @@
41546 */
41547
41548 #include <linux/module.h>
41549 +#include <linux/moduleloader.h>
41550 #include <linux/kernel.h>
41551 #include <linux/errno.h>
41552 #include <linux/string.h>
41553 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
41554 static int vram_total __initdata; /* Set total amount of memory */
41555 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
41556 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
41557 -static void (*pmi_start)(void) __read_mostly;
41558 -static void (*pmi_pal) (void) __read_mostly;
41559 +static void (*pmi_start)(void) __read_only;
41560 +static void (*pmi_pal) (void) __read_only;
41561 static int depth __read_mostly;
41562 static int vga_compat __read_mostly;
41563 /* --------------------------------------------------------------------- */
41564 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
41565 unsigned int size_vmode;
41566 unsigned int size_remap;
41567 unsigned int size_total;
41568 + void *pmi_code = NULL;
41569
41570 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
41571 return -ENODEV;
41572 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
41573 size_remap = size_total;
41574 vesafb_fix.smem_len = size_remap;
41575
41576 -#ifndef __i386__
41577 - screen_info.vesapm_seg = 0;
41578 -#endif
41579 -
41580 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
41581 printk(KERN_WARNING
41582 "vesafb: cannot reserve video memory at 0x%lx\n",
41583 @@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
41584 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
41585 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
41586
41587 +#ifdef __i386__
41588 +
41589 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41590 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
41591 + if (!pmi_code)
41592 +#elif !defined(CONFIG_PAX_KERNEXEC)
41593 + if (0)
41594 +#endif
41595 +
41596 +#endif
41597 + screen_info.vesapm_seg = 0;
41598 +
41599 if (screen_info.vesapm_seg) {
41600 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
41601 - screen_info.vesapm_seg,screen_info.vesapm_off);
41602 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
41603 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
41604 }
41605
41606 if (screen_info.vesapm_seg < 0xc000)
41607 @@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
41608
41609 if (ypan || pmi_setpal) {
41610 unsigned short *pmi_base;
41611 +
41612 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
41613 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
41614 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
41615 +
41616 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41617 + pax_open_kernel();
41618 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
41619 +#else
41620 + pmi_code = pmi_base;
41621 +#endif
41622 +
41623 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
41624 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
41625 +
41626 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41627 + pmi_start = ktva_ktla(pmi_start);
41628 + pmi_pal = ktva_ktla(pmi_pal);
41629 + pax_close_kernel();
41630 +#endif
41631 +
41632 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
41633 if (pmi_base[3]) {
41634 printk(KERN_INFO "vesafb: pmi: ports = ");
41635 @@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
41636 info->node, info->fix.id);
41637 return 0;
41638 err:
41639 +
41640 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41641 + module_free_exec(NULL, pmi_code);
41642 +#endif
41643 +
41644 if (info->screen_base)
41645 iounmap(info->screen_base);
41646 framebuffer_release(info);
41647 diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
41648 index 88714ae..16c2e11 100644
41649 --- a/drivers/video/via/via_clock.h
41650 +++ b/drivers/video/via/via_clock.h
41651 @@ -56,7 +56,7 @@ struct via_clock {
41652
41653 void (*set_engine_pll_state)(u8 state);
41654 void (*set_engine_pll)(struct via_pll_config config);
41655 -};
41656 +} __no_const;
41657
41658
41659 static inline u32 get_pll_internal_frequency(u32 ref_freq,
41660 diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
41661 index e56c934..fc22f4b 100644
41662 --- a/drivers/xen/xen-pciback/conf_space.h
41663 +++ b/drivers/xen/xen-pciback/conf_space.h
41664 @@ -44,15 +44,15 @@ struct config_field {
41665 struct {
41666 conf_dword_write write;
41667 conf_dword_read read;
41668 - } dw;
41669 + } __no_const dw;
41670 struct {
41671 conf_word_write write;
41672 conf_word_read read;
41673 - } w;
41674 + } __no_const w;
41675 struct {
41676 conf_byte_write write;
41677 conf_byte_read read;
41678 - } b;
41679 + } __no_const b;
41680 } u;
41681 struct list_head list;
41682 };
41683 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
41684 index 014c8dd..6f3dfe6 100644
41685 --- a/fs/9p/vfs_inode.c
41686 +++ b/fs/9p/vfs_inode.c
41687 @@ -1303,7 +1303,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
41688 void
41689 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
41690 {
41691 - char *s = nd_get_link(nd);
41692 + const char *s = nd_get_link(nd);
41693
41694 p9_debug(P9_DEBUG_VFS, " %s %s\n",
41695 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
41696 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
41697 index e95d1b6..3454244 100644
41698 --- a/fs/Kconfig.binfmt
41699 +++ b/fs/Kconfig.binfmt
41700 @@ -89,7 +89,7 @@ config HAVE_AOUT
41701
41702 config BINFMT_AOUT
41703 tristate "Kernel support for a.out and ECOFF binaries"
41704 - depends on HAVE_AOUT
41705 + depends on HAVE_AOUT && BROKEN
41706 ---help---
41707 A.out (Assembler.OUTput) is a set of formats for libraries and
41708 executables used in the earliest versions of UNIX. Linux used
41709 diff --git a/fs/aio.c b/fs/aio.c
41710 index e7f2fad..15ad8a4 100644
41711 --- a/fs/aio.c
41712 +++ b/fs/aio.c
41713 @@ -118,7 +118,7 @@ static int aio_setup_ring(struct kioctx *ctx)
41714 size += sizeof(struct io_event) * nr_events;
41715 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
41716
41717 - if (nr_pages < 0)
41718 + if (nr_pages <= 0)
41719 return -EINVAL;
41720
41721 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
41722 @@ -1440,18 +1440,19 @@ static ssize_t aio_fsync(struct kiocb *iocb)
41723 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
41724 {
41725 ssize_t ret;
41726 + struct iovec iovstack;
41727
41728 #ifdef CONFIG_COMPAT
41729 if (compat)
41730 ret = compat_rw_copy_check_uvector(type,
41731 (struct compat_iovec __user *)kiocb->ki_buf,
41732 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
41733 + kiocb->ki_nbytes, 1, &iovstack,
41734 &kiocb->ki_iovec, 1);
41735 else
41736 #endif
41737 ret = rw_copy_check_uvector(type,
41738 (struct iovec __user *)kiocb->ki_buf,
41739 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
41740 + kiocb->ki_nbytes, 1, &iovstack,
41741 &kiocb->ki_iovec, 1);
41742 if (ret < 0)
41743 goto out;
41744 @@ -1460,6 +1461,10 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
41745 if (ret < 0)
41746 goto out;
41747
41748 + if (kiocb->ki_iovec == &iovstack) {
41749 + kiocb->ki_inline_vec = iovstack;
41750 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
41751 + }
41752 kiocb->ki_nr_segs = kiocb->ki_nbytes;
41753 kiocb->ki_cur_seg = 0;
41754 /* ki_nbytes/left now reflect bytes instead of segs */
41755 diff --git a/fs/attr.c b/fs/attr.c
41756 index d94d1b6..f9bccd6 100644
41757 --- a/fs/attr.c
41758 +++ b/fs/attr.c
41759 @@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
41760 unsigned long limit;
41761
41762 limit = rlimit(RLIMIT_FSIZE);
41763 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
41764 if (limit != RLIM_INFINITY && offset > limit)
41765 goto out_sig;
41766 if (offset > inode->i_sb->s_maxbytes)
41767 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
41768 index da8876d..9f3e6d8 100644
41769 --- a/fs/autofs4/waitq.c
41770 +++ b/fs/autofs4/waitq.c
41771 @@ -61,7 +61,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
41772 {
41773 unsigned long sigpipe, flags;
41774 mm_segment_t fs;
41775 - const char *data = (const char *)addr;
41776 + const char __user *data = (const char __force_user *)addr;
41777 ssize_t wr = 0;
41778
41779 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
41780 diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
41781 index e18da23..affc30e 100644
41782 --- a/fs/befs/linuxvfs.c
41783 +++ b/fs/befs/linuxvfs.c
41784 @@ -502,7 +502,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
41785 {
41786 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
41787 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
41788 - char *link = nd_get_link(nd);
41789 + const char *link = nd_get_link(nd);
41790 if (!IS_ERR(link))
41791 kfree(link);
41792 }
41793 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
41794 index d146e18..12d1bd1 100644
41795 --- a/fs/binfmt_aout.c
41796 +++ b/fs/binfmt_aout.c
41797 @@ -16,6 +16,7 @@
41798 #include <linux/string.h>
41799 #include <linux/fs.h>
41800 #include <linux/file.h>
41801 +#include <linux/security.h>
41802 #include <linux/stat.h>
41803 #include <linux/fcntl.h>
41804 #include <linux/ptrace.h>
41805 @@ -83,6 +84,8 @@ static int aout_core_dump(struct coredump_params *cprm)
41806 #endif
41807 # define START_STACK(u) ((void __user *)u.start_stack)
41808
41809 + memset(&dump, 0, sizeof(dump));
41810 +
41811 fs = get_fs();
41812 set_fs(KERNEL_DS);
41813 has_dumped = 1;
41814 @@ -94,10 +97,12 @@ static int aout_core_dump(struct coredump_params *cprm)
41815
41816 /* If the size of the dump file exceeds the rlimit, then see what would happen
41817 if we wrote the stack, but not the data area. */
41818 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
41819 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
41820 dump.u_dsize = 0;
41821
41822 /* Make sure we have enough room to write the stack and data areas. */
41823 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
41824 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
41825 dump.u_ssize = 0;
41826
41827 @@ -231,6 +236,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41828 rlim = rlimit(RLIMIT_DATA);
41829 if (rlim >= RLIM_INFINITY)
41830 rlim = ~0;
41831 +
41832 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
41833 if (ex.a_data + ex.a_bss > rlim)
41834 return -ENOMEM;
41835
41836 @@ -265,6 +272,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41837
41838 install_exec_creds(bprm);
41839
41840 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41841 + current->mm->pax_flags = 0UL;
41842 +#endif
41843 +
41844 +#ifdef CONFIG_PAX_PAGEEXEC
41845 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
41846 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
41847 +
41848 +#ifdef CONFIG_PAX_EMUTRAMP
41849 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
41850 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
41851 +#endif
41852 +
41853 +#ifdef CONFIG_PAX_MPROTECT
41854 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
41855 + current->mm->pax_flags |= MF_PAX_MPROTECT;
41856 +#endif
41857 +
41858 + }
41859 +#endif
41860 +
41861 if (N_MAGIC(ex) == OMAGIC) {
41862 unsigned long text_addr, map_size;
41863 loff_t pos;
41864 @@ -330,7 +358,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41865 }
41866
41867 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
41868 - PROT_READ | PROT_WRITE | PROT_EXEC,
41869 + PROT_READ | PROT_WRITE,
41870 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
41871 fd_offset + ex.a_text);
41872 if (error != N_DATADDR(ex)) {
41873 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
41874 index 16f7354..7cc1e24 100644
41875 --- a/fs/binfmt_elf.c
41876 +++ b/fs/binfmt_elf.c
41877 @@ -32,6 +32,7 @@
41878 #include <linux/elf.h>
41879 #include <linux/utsname.h>
41880 #include <linux/coredump.h>
41881 +#include <linux/xattr.h>
41882 #include <asm/uaccess.h>
41883 #include <asm/param.h>
41884 #include <asm/page.h>
41885 @@ -52,6 +53,10 @@ static int elf_core_dump(struct coredump_params *cprm);
41886 #define elf_core_dump NULL
41887 #endif
41888
41889 +#ifdef CONFIG_PAX_MPROTECT
41890 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
41891 +#endif
41892 +
41893 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
41894 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
41895 #else
41896 @@ -71,6 +76,11 @@ static struct linux_binfmt elf_format = {
41897 .load_binary = load_elf_binary,
41898 .load_shlib = load_elf_library,
41899 .core_dump = elf_core_dump,
41900 +
41901 +#ifdef CONFIG_PAX_MPROTECT
41902 + .handle_mprotect= elf_handle_mprotect,
41903 +#endif
41904 +
41905 .min_coredump = ELF_EXEC_PAGESIZE,
41906 };
41907
41908 @@ -78,6 +88,8 @@ static struct linux_binfmt elf_format = {
41909
41910 static int set_brk(unsigned long start, unsigned long end)
41911 {
41912 + unsigned long e = end;
41913 +
41914 start = ELF_PAGEALIGN(start);
41915 end = ELF_PAGEALIGN(end);
41916 if (end > start) {
41917 @@ -86,7 +98,7 @@ static int set_brk(unsigned long start, unsigned long end)
41918 if (BAD_ADDR(addr))
41919 return addr;
41920 }
41921 - current->mm->start_brk = current->mm->brk = end;
41922 + current->mm->start_brk = current->mm->brk = e;
41923 return 0;
41924 }
41925
41926 @@ -147,12 +159,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41927 elf_addr_t __user *u_rand_bytes;
41928 const char *k_platform = ELF_PLATFORM;
41929 const char *k_base_platform = ELF_BASE_PLATFORM;
41930 - unsigned char k_rand_bytes[16];
41931 + u32 k_rand_bytes[4];
41932 int items;
41933 elf_addr_t *elf_info;
41934 int ei_index = 0;
41935 const struct cred *cred = current_cred();
41936 struct vm_area_struct *vma;
41937 + unsigned long saved_auxv[AT_VECTOR_SIZE];
41938
41939 /*
41940 * In some cases (e.g. Hyper-Threading), we want to avoid L1
41941 @@ -194,8 +207,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41942 * Generate 16 random bytes for userspace PRNG seeding.
41943 */
41944 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
41945 - u_rand_bytes = (elf_addr_t __user *)
41946 - STACK_ALLOC(p, sizeof(k_rand_bytes));
41947 + srandom32(k_rand_bytes[0] ^ random32());
41948 + srandom32(k_rand_bytes[1] ^ random32());
41949 + srandom32(k_rand_bytes[2] ^ random32());
41950 + srandom32(k_rand_bytes[3] ^ random32());
41951 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
41952 + u_rand_bytes = (elf_addr_t __user *) p;
41953 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
41954 return -EFAULT;
41955
41956 @@ -307,9 +324,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
41957 return -EFAULT;
41958 current->mm->env_end = p;
41959
41960 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
41961 +
41962 /* Put the elf_info on the stack in the right place. */
41963 sp = (elf_addr_t __user *)envp + 1;
41964 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
41965 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
41966 return -EFAULT;
41967 return 0;
41968 }
41969 @@ -380,10 +399,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41970 {
41971 struct elf_phdr *elf_phdata;
41972 struct elf_phdr *eppnt;
41973 - unsigned long load_addr = 0;
41974 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
41975 int load_addr_set = 0;
41976 unsigned long last_bss = 0, elf_bss = 0;
41977 - unsigned long error = ~0UL;
41978 + unsigned long error = -EINVAL;
41979 unsigned long total_size;
41980 int retval, i, size;
41981
41982 @@ -429,6 +448,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41983 goto out_close;
41984 }
41985
41986 +#ifdef CONFIG_PAX_SEGMEXEC
41987 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
41988 + pax_task_size = SEGMEXEC_TASK_SIZE;
41989 +#endif
41990 +
41991 eppnt = elf_phdata;
41992 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
41993 if (eppnt->p_type == PT_LOAD) {
41994 @@ -472,8 +496,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
41995 k = load_addr + eppnt->p_vaddr;
41996 if (BAD_ADDR(k) ||
41997 eppnt->p_filesz > eppnt->p_memsz ||
41998 - eppnt->p_memsz > TASK_SIZE ||
41999 - TASK_SIZE - eppnt->p_memsz < k) {
42000 + eppnt->p_memsz > pax_task_size ||
42001 + pax_task_size - eppnt->p_memsz < k) {
42002 error = -ENOMEM;
42003 goto out_close;
42004 }
42005 @@ -525,6 +549,311 @@ out:
42006 return error;
42007 }
42008
42009 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
42010 +#ifdef CONFIG_PAX_SOFTMODE
42011 +static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
42012 +{
42013 + unsigned long pax_flags = 0UL;
42014 +
42015 +#ifdef CONFIG_PAX_PAGEEXEC
42016 + if (elf_phdata->p_flags & PF_PAGEEXEC)
42017 + pax_flags |= MF_PAX_PAGEEXEC;
42018 +#endif
42019 +
42020 +#ifdef CONFIG_PAX_SEGMEXEC
42021 + if (elf_phdata->p_flags & PF_SEGMEXEC)
42022 + pax_flags |= MF_PAX_SEGMEXEC;
42023 +#endif
42024 +
42025 +#ifdef CONFIG_PAX_EMUTRAMP
42026 + if (elf_phdata->p_flags & PF_EMUTRAMP)
42027 + pax_flags |= MF_PAX_EMUTRAMP;
42028 +#endif
42029 +
42030 +#ifdef CONFIG_PAX_MPROTECT
42031 + if (elf_phdata->p_flags & PF_MPROTECT)
42032 + pax_flags |= MF_PAX_MPROTECT;
42033 +#endif
42034 +
42035 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42036 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
42037 + pax_flags |= MF_PAX_RANDMMAP;
42038 +#endif
42039 +
42040 + return pax_flags;
42041 +}
42042 +#endif
42043 +
42044 +static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
42045 +{
42046 + unsigned long pax_flags = 0UL;
42047 +
42048 +#ifdef CONFIG_PAX_PAGEEXEC
42049 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
42050 + pax_flags |= MF_PAX_PAGEEXEC;
42051 +#endif
42052 +
42053 +#ifdef CONFIG_PAX_SEGMEXEC
42054 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
42055 + pax_flags |= MF_PAX_SEGMEXEC;
42056 +#endif
42057 +
42058 +#ifdef CONFIG_PAX_EMUTRAMP
42059 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
42060 + pax_flags |= MF_PAX_EMUTRAMP;
42061 +#endif
42062 +
42063 +#ifdef CONFIG_PAX_MPROTECT
42064 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
42065 + pax_flags |= MF_PAX_MPROTECT;
42066 +#endif
42067 +
42068 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42069 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
42070 + pax_flags |= MF_PAX_RANDMMAP;
42071 +#endif
42072 +
42073 + return pax_flags;
42074 +}
42075 +#endif
42076 +
42077 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
42078 +#ifdef CONFIG_PAX_SOFTMODE
42079 +static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
42080 +{
42081 + unsigned long pax_flags = 0UL;
42082 +
42083 +#ifdef CONFIG_PAX_PAGEEXEC
42084 + if (pax_flags_softmode & MF_PAX_PAGEEXEC)
42085 + pax_flags |= MF_PAX_PAGEEXEC;
42086 +#endif
42087 +
42088 +#ifdef CONFIG_PAX_SEGMEXEC
42089 + if (pax_flags_softmode & MF_PAX_SEGMEXEC)
42090 + pax_flags |= MF_PAX_SEGMEXEC;
42091 +#endif
42092 +
42093 +#ifdef CONFIG_PAX_EMUTRAMP
42094 + if (pax_flags_softmode & MF_PAX_EMUTRAMP)
42095 + pax_flags |= MF_PAX_EMUTRAMP;
42096 +#endif
42097 +
42098 +#ifdef CONFIG_PAX_MPROTECT
42099 + if (pax_flags_softmode & MF_PAX_MPROTECT)
42100 + pax_flags |= MF_PAX_MPROTECT;
42101 +#endif
42102 +
42103 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42104 + if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
42105 + pax_flags |= MF_PAX_RANDMMAP;
42106 +#endif
42107 +
42108 + return pax_flags;
42109 +}
42110 +#endif
42111 +
42112 +static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
42113 +{
42114 + unsigned long pax_flags = 0UL;
42115 +
42116 +#ifdef CONFIG_PAX_PAGEEXEC
42117 + if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
42118 + pax_flags |= MF_PAX_PAGEEXEC;
42119 +#endif
42120 +
42121 +#ifdef CONFIG_PAX_SEGMEXEC
42122 + if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
42123 + pax_flags |= MF_PAX_SEGMEXEC;
42124 +#endif
42125 +
42126 +#ifdef CONFIG_PAX_EMUTRAMP
42127 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
42128 + pax_flags |= MF_PAX_EMUTRAMP;
42129 +#endif
42130 +
42131 +#ifdef CONFIG_PAX_MPROTECT
42132 + if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
42133 + pax_flags |= MF_PAX_MPROTECT;
42134 +#endif
42135 +
42136 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42137 + if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
42138 + pax_flags |= MF_PAX_RANDMMAP;
42139 +#endif
42140 +
42141 + return pax_flags;
42142 +}
42143 +#endif
42144 +
42145 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42146 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
42147 +{
42148 + unsigned long pax_flags = 0UL;
42149 +
42150 +#ifdef CONFIG_PAX_EI_PAX
42151 +
42152 +#ifdef CONFIG_PAX_PAGEEXEC
42153 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
42154 + pax_flags |= MF_PAX_PAGEEXEC;
42155 +#endif
42156 +
42157 +#ifdef CONFIG_PAX_SEGMEXEC
42158 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
42159 + pax_flags |= MF_PAX_SEGMEXEC;
42160 +#endif
42161 +
42162 +#ifdef CONFIG_PAX_EMUTRAMP
42163 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
42164 + pax_flags |= MF_PAX_EMUTRAMP;
42165 +#endif
42166 +
42167 +#ifdef CONFIG_PAX_MPROTECT
42168 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
42169 + pax_flags |= MF_PAX_MPROTECT;
42170 +#endif
42171 +
42172 +#ifdef CONFIG_PAX_ASLR
42173 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
42174 + pax_flags |= MF_PAX_RANDMMAP;
42175 +#endif
42176 +
42177 +#else
42178 +
42179 +#ifdef CONFIG_PAX_PAGEEXEC
42180 + pax_flags |= MF_PAX_PAGEEXEC;
42181 +#endif
42182 +
42183 +#ifdef CONFIG_PAX_SEGMEXEC
42184 + pax_flags |= MF_PAX_SEGMEXEC;
42185 +#endif
42186 +
42187 +#ifdef CONFIG_PAX_MPROTECT
42188 + pax_flags |= MF_PAX_MPROTECT;
42189 +#endif
42190 +
42191 +#ifdef CONFIG_PAX_RANDMMAP
42192 + if (randomize_va_space)
42193 + pax_flags |= MF_PAX_RANDMMAP;
42194 +#endif
42195 +
42196 +#endif
42197 +
42198 + return pax_flags;
42199 +}
42200 +
42201 +static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
42202 +{
42203 +
42204 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
42205 + unsigned long i;
42206 +
42207 + for (i = 0UL; i < elf_ex->e_phnum; i++)
42208 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
42209 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
42210 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
42211 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
42212 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
42213 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
42214 + return ~0UL;
42215 +
42216 +#ifdef CONFIG_PAX_SOFTMODE
42217 + if (pax_softmode)
42218 + return pax_parse_pt_pax_softmode(&elf_phdata[i]);
42219 + else
42220 +#endif
42221 +
42222 + return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
42223 + break;
42224 + }
42225 +#endif
42226 +
42227 + return ~0UL;
42228 +}
42229 +
42230 +static unsigned long pax_parse_xattr_pax(struct file * const file)
42231 +{
42232 +
42233 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
42234 + ssize_t xattr_size, i;
42235 + unsigned char xattr_value[5];
42236 + unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
42237 +
42238 + xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
42239 + if (xattr_size <= 0)
42240 + return ~0UL;
42241 +
42242 + for (i = 0; i < xattr_size; i++)
42243 + switch (xattr_value[i]) {
42244 + default:
42245 + return ~0UL;
42246 +
42247 +#define parse_flag(option1, option2, flag) \
42248 + case option1: \
42249 + pax_flags_hardmode |= MF_PAX_##flag; \
42250 + break; \
42251 + case option2: \
42252 + pax_flags_softmode |= MF_PAX_##flag; \
42253 + break;
42254 +
42255 + parse_flag('p', 'P', PAGEEXEC);
42256 + parse_flag('e', 'E', EMUTRAMP);
42257 + parse_flag('m', 'M', MPROTECT);
42258 + parse_flag('r', 'R', RANDMMAP);
42259 + parse_flag('s', 'S', SEGMEXEC);
42260 +
42261 +#undef parse_flag
42262 + }
42263 +
42264 + if (pax_flags_hardmode & pax_flags_softmode)
42265 + return ~0UL;
42266 +
42267 +#ifdef CONFIG_PAX_SOFTMODE
42268 + if (pax_softmode)
42269 + return pax_parse_xattr_pax_softmode(pax_flags_softmode);
42270 + else
42271 +#endif
42272 +
42273 + return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
42274 +#else
42275 + return ~0UL;
42276 +#endif
42277 +
42278 +}
42279 +
42280 +static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
42281 +{
42282 + unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
42283 +
42284 + pax_flags = pax_parse_ei_pax(elf_ex);
42285 + pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
42286 + xattr_pax_flags = pax_parse_xattr_pax(file);
42287 +
42288 + if (pt_pax_flags == ~0UL)
42289 + pt_pax_flags = xattr_pax_flags;
42290 + else if (xattr_pax_flags == ~0UL)
42291 + xattr_pax_flags = pt_pax_flags;
42292 + if (pt_pax_flags != xattr_pax_flags)
42293 + return -EINVAL;
42294 + if (pt_pax_flags != ~0UL)
42295 + pax_flags = pt_pax_flags;
42296 +
42297 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42298 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42299 + if ((__supported_pte_mask & _PAGE_NX))
42300 + pax_flags &= ~MF_PAX_SEGMEXEC;
42301 + else
42302 + pax_flags &= ~MF_PAX_PAGEEXEC;
42303 + }
42304 +#endif
42305 +
42306 + if (0 > pax_check_flags(&pax_flags))
42307 + return -EINVAL;
42308 +
42309 + current->mm->pax_flags = pax_flags;
42310 + return 0;
42311 +}
42312 +#endif
42313 +
42314 /*
42315 * These are the functions used to load ELF style executables and shared
42316 * libraries. There is no binary dependent code anywhere else.
42317 @@ -541,6 +870,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
42318 {
42319 unsigned int random_variable = 0;
42320
42321 +#ifdef CONFIG_PAX_RANDUSTACK
42322 + if (randomize_va_space)
42323 + return stack_top - current->mm->delta_stack;
42324 +#endif
42325 +
42326 if ((current->flags & PF_RANDOMIZE) &&
42327 !(current->personality & ADDR_NO_RANDOMIZE)) {
42328 random_variable = get_random_int() & STACK_RND_MASK;
42329 @@ -559,7 +893,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42330 unsigned long load_addr = 0, load_bias = 0;
42331 int load_addr_set = 0;
42332 char * elf_interpreter = NULL;
42333 - unsigned long error;
42334 + unsigned long error = 0;
42335 struct elf_phdr *elf_ppnt, *elf_phdata;
42336 unsigned long elf_bss, elf_brk;
42337 int retval, i;
42338 @@ -569,11 +903,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42339 unsigned long start_code, end_code, start_data, end_data;
42340 unsigned long reloc_func_desc __maybe_unused = 0;
42341 int executable_stack = EXSTACK_DEFAULT;
42342 - unsigned long def_flags = 0;
42343 struct {
42344 struct elfhdr elf_ex;
42345 struct elfhdr interp_elf_ex;
42346 } *loc;
42347 + unsigned long pax_task_size = TASK_SIZE;
42348
42349 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
42350 if (!loc) {
42351 @@ -709,11 +1043,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42352 goto out_free_dentry;
42353
42354 /* OK, This is the point of no return */
42355 - current->mm->def_flags = def_flags;
42356 +
42357 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42358 + current->mm->pax_flags = 0UL;
42359 +#endif
42360 +
42361 +#ifdef CONFIG_PAX_DLRESOLVE
42362 + current->mm->call_dl_resolve = 0UL;
42363 +#endif
42364 +
42365 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
42366 + current->mm->call_syscall = 0UL;
42367 +#endif
42368 +
42369 +#ifdef CONFIG_PAX_ASLR
42370 + current->mm->delta_mmap = 0UL;
42371 + current->mm->delta_stack = 0UL;
42372 +#endif
42373 +
42374 + current->mm->def_flags = 0;
42375 +
42376 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42377 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
42378 + send_sig(SIGKILL, current, 0);
42379 + goto out_free_dentry;
42380 + }
42381 +#endif
42382 +
42383 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
42384 + pax_set_initial_flags(bprm);
42385 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
42386 + if (pax_set_initial_flags_func)
42387 + (pax_set_initial_flags_func)(bprm);
42388 +#endif
42389 +
42390 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
42391 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
42392 + current->mm->context.user_cs_limit = PAGE_SIZE;
42393 + current->mm->def_flags |= VM_PAGEEXEC;
42394 + }
42395 +#endif
42396 +
42397 +#ifdef CONFIG_PAX_SEGMEXEC
42398 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
42399 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
42400 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
42401 + pax_task_size = SEGMEXEC_TASK_SIZE;
42402 + current->mm->def_flags |= VM_NOHUGEPAGE;
42403 + }
42404 +#endif
42405 +
42406 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
42407 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42408 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
42409 + put_cpu();
42410 + }
42411 +#endif
42412
42413 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
42414 may depend on the personality. */
42415 SET_PERSONALITY(loc->elf_ex);
42416 +
42417 +#ifdef CONFIG_PAX_ASLR
42418 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
42419 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
42420 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
42421 + }
42422 +#endif
42423 +
42424 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
42425 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42426 + executable_stack = EXSTACK_DISABLE_X;
42427 + current->personality &= ~READ_IMPLIES_EXEC;
42428 + } else
42429 +#endif
42430 +
42431 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
42432 current->personality |= READ_IMPLIES_EXEC;
42433
42434 @@ -804,6 +1208,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42435 #else
42436 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
42437 #endif
42438 +
42439 +#ifdef CONFIG_PAX_RANDMMAP
42440 + /* PaX: randomize base address at the default exe base if requested */
42441 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
42442 +#ifdef CONFIG_SPARC64
42443 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
42444 +#else
42445 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
42446 +#endif
42447 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
42448 + elf_flags |= MAP_FIXED;
42449 + }
42450 +#endif
42451 +
42452 }
42453
42454 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
42455 @@ -836,9 +1254,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42456 * allowed task size. Note that p_filesz must always be
42457 * <= p_memsz so it is only necessary to check p_memsz.
42458 */
42459 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
42460 - elf_ppnt->p_memsz > TASK_SIZE ||
42461 - TASK_SIZE - elf_ppnt->p_memsz < k) {
42462 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
42463 + elf_ppnt->p_memsz > pax_task_size ||
42464 + pax_task_size - elf_ppnt->p_memsz < k) {
42465 /* set_brk can never work. Avoid overflows. */
42466 send_sig(SIGKILL, current, 0);
42467 retval = -EINVAL;
42468 @@ -877,11 +1295,40 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
42469 goto out_free_dentry;
42470 }
42471 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
42472 - send_sig(SIGSEGV, current, 0);
42473 - retval = -EFAULT; /* Nobody gets to see this, but.. */
42474 - goto out_free_dentry;
42475 + /*
42476 + * This bss-zeroing can fail if the ELF
42477 + * file specifies odd protections. So
42478 + * we don't check the return value
42479 + */
42480 }
42481
42482 +#ifdef CONFIG_PAX_RANDMMAP
42483 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
42484 + unsigned long start, size;
42485 +
42486 + start = ELF_PAGEALIGN(elf_brk);
42487 + size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
42488 + down_write(&current->mm->mmap_sem);
42489 + retval = -ENOMEM;
42490 + if (!find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
42491 + unsigned long prot = PROT_NONE;
42492 +
42493 + current->mm->brk_gap = PAGE_ALIGN(size) >> PAGE_SHIFT;
42494 +// if (current->personality & ADDR_NO_RANDOMIZE)
42495 +// prot = PROT_READ;
42496 + start = do_mmap(NULL, start, size, prot, MAP_ANONYMOUS | MAP_FIXED | MAP_PRIVATE, 0);
42497 + retval = IS_ERR_VALUE(start) ? start : 0;
42498 + }
42499 + up_write(&current->mm->mmap_sem);
42500 + if (retval == 0)
42501 + retval = set_brk(start + size, start + size + PAGE_SIZE);
42502 + if (retval < 0) {
42503 + send_sig(SIGKILL, current, 0);
42504 + goto out_free_dentry;
42505 + }
42506 + }
42507 +#endif
42508 +
42509 if (elf_interpreter) {
42510 unsigned long uninitialized_var(interp_map_addr);
42511
42512 @@ -1109,7 +1556,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
42513 * Decide what to dump of a segment, part, all or none.
42514 */
42515 static unsigned long vma_dump_size(struct vm_area_struct *vma,
42516 - unsigned long mm_flags)
42517 + unsigned long mm_flags, long signr)
42518 {
42519 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
42520
42521 @@ -1146,7 +1593,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
42522 if (vma->vm_file == NULL)
42523 return 0;
42524
42525 - if (FILTER(MAPPED_PRIVATE))
42526 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
42527 goto whole;
42528
42529 /*
42530 @@ -1368,9 +1815,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
42531 {
42532 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
42533 int i = 0;
42534 - do
42535 + do {
42536 i += 2;
42537 - while (auxv[i - 2] != AT_NULL);
42538 + } while (auxv[i - 2] != AT_NULL);
42539 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
42540 }
42541
42542 @@ -1892,14 +2339,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
42543 }
42544
42545 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
42546 - unsigned long mm_flags)
42547 + struct coredump_params *cprm)
42548 {
42549 struct vm_area_struct *vma;
42550 size_t size = 0;
42551
42552 for (vma = first_vma(current, gate_vma); vma != NULL;
42553 vma = next_vma(vma, gate_vma))
42554 - size += vma_dump_size(vma, mm_flags);
42555 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42556 return size;
42557 }
42558
42559 @@ -1993,7 +2440,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42560
42561 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
42562
42563 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
42564 + offset += elf_core_vma_data_size(gate_vma, cprm);
42565 offset += elf_core_extra_data_size();
42566 e_shoff = offset;
42567
42568 @@ -2007,10 +2454,12 @@ static int elf_core_dump(struct coredump_params *cprm)
42569 offset = dataoff;
42570
42571 size += sizeof(*elf);
42572 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42573 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
42574 goto end_coredump;
42575
42576 size += sizeof(*phdr4note);
42577 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42578 if (size > cprm->limit
42579 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
42580 goto end_coredump;
42581 @@ -2024,7 +2473,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42582 phdr.p_offset = offset;
42583 phdr.p_vaddr = vma->vm_start;
42584 phdr.p_paddr = 0;
42585 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
42586 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42587 phdr.p_memsz = vma->vm_end - vma->vm_start;
42588 offset += phdr.p_filesz;
42589 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
42590 @@ -2035,6 +2484,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42591 phdr.p_align = ELF_EXEC_PAGESIZE;
42592
42593 size += sizeof(phdr);
42594 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42595 if (size > cprm->limit
42596 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
42597 goto end_coredump;
42598 @@ -2059,7 +2509,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42599 unsigned long addr;
42600 unsigned long end;
42601
42602 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
42603 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42604
42605 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
42606 struct page *page;
42607 @@ -2068,6 +2518,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42608 page = get_dump_page(addr);
42609 if (page) {
42610 void *kaddr = kmap(page);
42611 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
42612 stop = ((size += PAGE_SIZE) > cprm->limit) ||
42613 !dump_write(cprm->file, kaddr,
42614 PAGE_SIZE);
42615 @@ -2085,6 +2536,7 @@ static int elf_core_dump(struct coredump_params *cprm)
42616
42617 if (e_phnum == PN_XNUM) {
42618 size += sizeof(*shdr4extnum);
42619 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
42620 if (size > cprm->limit
42621 || !dump_write(cprm->file, shdr4extnum,
42622 sizeof(*shdr4extnum)))
42623 @@ -2105,6 +2557,97 @@ out:
42624
42625 #endif /* CONFIG_ELF_CORE */
42626
42627 +#ifdef CONFIG_PAX_MPROTECT
42628 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
42629 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
42630 + * we'll remove VM_MAYWRITE for good on RELRO segments.
42631 + *
42632 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
42633 + * basis because we want to allow the common case and not the special ones.
42634 + */
42635 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
42636 +{
42637 + struct elfhdr elf_h;
42638 + struct elf_phdr elf_p;
42639 + unsigned long i;
42640 + unsigned long oldflags;
42641 + bool is_textrel_rw, is_textrel_rx, is_relro;
42642 +
42643 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
42644 + return;
42645 +
42646 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
42647 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
42648 +
42649 +#ifdef CONFIG_PAX_ELFRELOCS
42650 + /* possible TEXTREL */
42651 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
42652 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
42653 +#else
42654 + is_textrel_rw = false;
42655 + is_textrel_rx = false;
42656 +#endif
42657 +
42658 + /* possible RELRO */
42659 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
42660 +
42661 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
42662 + return;
42663 +
42664 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
42665 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
42666 +
42667 +#ifdef CONFIG_PAX_ETEXECRELOCS
42668 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
42669 +#else
42670 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
42671 +#endif
42672 +
42673 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
42674 + !elf_check_arch(&elf_h) ||
42675 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
42676 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
42677 + return;
42678 +
42679 + for (i = 0UL; i < elf_h.e_phnum; i++) {
42680 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
42681 + return;
42682 + switch (elf_p.p_type) {
42683 + case PT_DYNAMIC:
42684 + if (!is_textrel_rw && !is_textrel_rx)
42685 + continue;
42686 + i = 0UL;
42687 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
42688 + elf_dyn dyn;
42689 +
42690 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
42691 + return;
42692 + if (dyn.d_tag == DT_NULL)
42693 + return;
42694 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
42695 + gr_log_textrel(vma);
42696 + if (is_textrel_rw)
42697 + vma->vm_flags |= VM_MAYWRITE;
42698 + else
42699 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
42700 + vma->vm_flags &= ~VM_MAYWRITE;
42701 + return;
42702 + }
42703 + i++;
42704 + }
42705 + return;
42706 +
42707 + case PT_GNU_RELRO:
42708 + if (!is_relro)
42709 + continue;
42710 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
42711 + vma->vm_flags &= ~VM_MAYWRITE;
42712 + return;
42713 + }
42714 + }
42715 +}
42716 +#endif
42717 +
42718 static int __init init_elf_binfmt(void)
42719 {
42720 register_binfmt(&elf_format);
42721 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
42722 index 6b2daf9..a70dccb 100644
42723 --- a/fs/binfmt_flat.c
42724 +++ b/fs/binfmt_flat.c
42725 @@ -562,7 +562,9 @@ static int load_flat_file(struct linux_binprm * bprm,
42726 realdatastart = (unsigned long) -ENOMEM;
42727 printk("Unable to allocate RAM for process data, errno %d\n",
42728 (int)-realdatastart);
42729 + down_write(&current->mm->mmap_sem);
42730 do_munmap(current->mm, textpos, text_len);
42731 + up_write(&current->mm->mmap_sem);
42732 ret = realdatastart;
42733 goto err;
42734 }
42735 @@ -586,8 +588,10 @@ static int load_flat_file(struct linux_binprm * bprm,
42736 }
42737 if (IS_ERR_VALUE(result)) {
42738 printk("Unable to read data+bss, errno %d\n", (int)-result);
42739 + down_write(&current->mm->mmap_sem);
42740 do_munmap(current->mm, textpos, text_len);
42741 do_munmap(current->mm, realdatastart, len);
42742 + up_write(&current->mm->mmap_sem);
42743 ret = result;
42744 goto err;
42745 }
42746 @@ -654,8 +658,10 @@ static int load_flat_file(struct linux_binprm * bprm,
42747 }
42748 if (IS_ERR_VALUE(result)) {
42749 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
42750 + down_write(&current->mm->mmap_sem);
42751 do_munmap(current->mm, textpos, text_len + data_len + extra +
42752 MAX_SHARED_LIBS * sizeof(unsigned long));
42753 + up_write(&current->mm->mmap_sem);
42754 ret = result;
42755 goto err;
42756 }
42757 diff --git a/fs/bio.c b/fs/bio.c
42758 index 84da885..bac1d48 100644
42759 --- a/fs/bio.c
42760 +++ b/fs/bio.c
42761 @@ -838,7 +838,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
42762 /*
42763 * Overflow, abort
42764 */
42765 - if (end < start)
42766 + if (end < start || end - start > INT_MAX - nr_pages)
42767 return ERR_PTR(-EINVAL);
42768
42769 nr_pages += end - start;
42770 @@ -972,7 +972,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
42771 /*
42772 * Overflow, abort
42773 */
42774 - if (end < start)
42775 + if (end < start || end - start > INT_MAX - nr_pages)
42776 return ERR_PTR(-EINVAL);
42777
42778 nr_pages += end - start;
42779 @@ -1234,7 +1234,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
42780 const int read = bio_data_dir(bio) == READ;
42781 struct bio_map_data *bmd = bio->bi_private;
42782 int i;
42783 - char *p = bmd->sgvecs[0].iov_base;
42784 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
42785
42786 __bio_for_each_segment(bvec, bio, i, 0) {
42787 char *addr = page_address(bvec->bv_page);
42788 diff --git a/fs/block_dev.c b/fs/block_dev.c
42789 index ba11c30..623d736 100644
42790 --- a/fs/block_dev.c
42791 +++ b/fs/block_dev.c
42792 @@ -704,7 +704,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
42793 else if (bdev->bd_contains == bdev)
42794 return true; /* is a whole device which isn't held */
42795
42796 - else if (whole->bd_holder == bd_may_claim)
42797 + else if (whole->bd_holder == (void *)bd_may_claim)
42798 return true; /* is a partition of a device that is being partitioned */
42799 else if (whole->bd_holder != NULL)
42800 return false; /* is a partition of a held device */
42801 diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
42802 index c053e90..e5f1afc 100644
42803 --- a/fs/btrfs/check-integrity.c
42804 +++ b/fs/btrfs/check-integrity.c
42805 @@ -156,7 +156,7 @@ struct btrfsic_block {
42806 union {
42807 bio_end_io_t *bio;
42808 bh_end_io_t *bh;
42809 - } orig_bio_bh_end_io;
42810 + } __no_const orig_bio_bh_end_io;
42811 int submit_bio_bh_rw;
42812 u64 flush_gen; /* only valid if !never_written */
42813 };
42814 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
42815 index 4106264..8157ede 100644
42816 --- a/fs/btrfs/ctree.c
42817 +++ b/fs/btrfs/ctree.c
42818 @@ -513,9 +513,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
42819 free_extent_buffer(buf);
42820 add_root_to_dirty_list(root);
42821 } else {
42822 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
42823 - parent_start = parent->start;
42824 - else
42825 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
42826 + if (parent)
42827 + parent_start = parent->start;
42828 + else
42829 + parent_start = 0;
42830 + } else
42831 parent_start = 0;
42832
42833 WARN_ON(trans->transid != btrfs_header_generation(parent));
42834 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
42835 index 0df0d1f..4bdcbfe 100644
42836 --- a/fs/btrfs/inode.c
42837 +++ b/fs/btrfs/inode.c
42838 @@ -7074,7 +7074,7 @@ fail:
42839 return -ENOMEM;
42840 }
42841
42842 -static int btrfs_getattr(struct vfsmount *mnt,
42843 +int btrfs_getattr(struct vfsmount *mnt,
42844 struct dentry *dentry, struct kstat *stat)
42845 {
42846 struct inode *inode = dentry->d_inode;
42847 @@ -7088,6 +7088,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
42848 return 0;
42849 }
42850
42851 +EXPORT_SYMBOL(btrfs_getattr);
42852 +
42853 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
42854 +{
42855 + return BTRFS_I(inode)->root->anon_dev;
42856 +}
42857 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
42858 +
42859 /*
42860 * If a file is moved, it will inherit the cow and compression flags of the new
42861 * directory.
42862 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
42863 index 14f8e1f..ab8d81f 100644
42864 --- a/fs/btrfs/ioctl.c
42865 +++ b/fs/btrfs/ioctl.c
42866 @@ -2882,9 +2882,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
42867 for (i = 0; i < num_types; i++) {
42868 struct btrfs_space_info *tmp;
42869
42870 + /* Don't copy in more than we allocated */
42871 if (!slot_count)
42872 break;
42873
42874 + slot_count--;
42875 +
42876 info = NULL;
42877 rcu_read_lock();
42878 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
42879 @@ -2906,15 +2909,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
42880 memcpy(dest, &space, sizeof(space));
42881 dest++;
42882 space_args.total_spaces++;
42883 - slot_count--;
42884 }
42885 - if (!slot_count)
42886 - break;
42887 }
42888 up_read(&info->groups_sem);
42889 }
42890
42891 - user_dest = (struct btrfs_ioctl_space_info *)
42892 + user_dest = (struct btrfs_ioctl_space_info __user *)
42893 (arg + sizeof(struct btrfs_ioctl_space_args));
42894
42895 if (copy_to_user(user_dest, dest_orig, alloc_size))
42896 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
42897 index 646ee21..f020f87 100644
42898 --- a/fs/btrfs/relocation.c
42899 +++ b/fs/btrfs/relocation.c
42900 @@ -1268,7 +1268,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
42901 }
42902 spin_unlock(&rc->reloc_root_tree.lock);
42903
42904 - BUG_ON((struct btrfs_root *)node->data != root);
42905 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
42906
42907 if (!del) {
42908 spin_lock(&rc->reloc_root_tree.lock);
42909 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
42910 index 622f469..e8d2d55 100644
42911 --- a/fs/cachefiles/bind.c
42912 +++ b/fs/cachefiles/bind.c
42913 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
42914 args);
42915
42916 /* start by checking things over */
42917 - ASSERT(cache->fstop_percent >= 0 &&
42918 - cache->fstop_percent < cache->fcull_percent &&
42919 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
42920 cache->fcull_percent < cache->frun_percent &&
42921 cache->frun_percent < 100);
42922
42923 - ASSERT(cache->bstop_percent >= 0 &&
42924 - cache->bstop_percent < cache->bcull_percent &&
42925 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
42926 cache->bcull_percent < cache->brun_percent &&
42927 cache->brun_percent < 100);
42928
42929 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
42930 index 0a1467b..6a53245 100644
42931 --- a/fs/cachefiles/daemon.c
42932 +++ b/fs/cachefiles/daemon.c
42933 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
42934 if (n > buflen)
42935 return -EMSGSIZE;
42936
42937 - if (copy_to_user(_buffer, buffer, n) != 0)
42938 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
42939 return -EFAULT;
42940
42941 return n;
42942 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
42943 if (test_bit(CACHEFILES_DEAD, &cache->flags))
42944 return -EIO;
42945
42946 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
42947 + if (datalen > PAGE_SIZE - 1)
42948 return -EOPNOTSUPP;
42949
42950 /* drag the command string into the kernel so we can parse it */
42951 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
42952 if (args[0] != '%' || args[1] != '\0')
42953 return -EINVAL;
42954
42955 - if (fstop < 0 || fstop >= cache->fcull_percent)
42956 + if (fstop >= cache->fcull_percent)
42957 return cachefiles_daemon_range_error(cache, args);
42958
42959 cache->fstop_percent = fstop;
42960 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
42961 if (args[0] != '%' || args[1] != '\0')
42962 return -EINVAL;
42963
42964 - if (bstop < 0 || bstop >= cache->bcull_percent)
42965 + if (bstop >= cache->bcull_percent)
42966 return cachefiles_daemon_range_error(cache, args);
42967
42968 cache->bstop_percent = bstop;
42969 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
42970 index bd6bc1b..b627b53 100644
42971 --- a/fs/cachefiles/internal.h
42972 +++ b/fs/cachefiles/internal.h
42973 @@ -57,7 +57,7 @@ struct cachefiles_cache {
42974 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
42975 struct rb_root active_nodes; /* active nodes (can't be culled) */
42976 rwlock_t active_lock; /* lock for active_nodes */
42977 - atomic_t gravecounter; /* graveyard uniquifier */
42978 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
42979 unsigned frun_percent; /* when to stop culling (% files) */
42980 unsigned fcull_percent; /* when to start culling (% files) */
42981 unsigned fstop_percent; /* when to stop allocating (% files) */
42982 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
42983 * proc.c
42984 */
42985 #ifdef CONFIG_CACHEFILES_HISTOGRAM
42986 -extern atomic_t cachefiles_lookup_histogram[HZ];
42987 -extern atomic_t cachefiles_mkdir_histogram[HZ];
42988 -extern atomic_t cachefiles_create_histogram[HZ];
42989 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
42990 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
42991 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
42992
42993 extern int __init cachefiles_proc_init(void);
42994 extern void cachefiles_proc_cleanup(void);
42995 static inline
42996 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
42997 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
42998 {
42999 unsigned long jif = jiffies - start_jif;
43000 if (jif >= HZ)
43001 jif = HZ - 1;
43002 - atomic_inc(&histogram[jif]);
43003 + atomic_inc_unchecked(&histogram[jif]);
43004 }
43005
43006 #else
43007 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
43008 index 7f0771d..87d4f36 100644
43009 --- a/fs/cachefiles/namei.c
43010 +++ b/fs/cachefiles/namei.c
43011 @@ -318,7 +318,7 @@ try_again:
43012 /* first step is to make up a grave dentry in the graveyard */
43013 sprintf(nbuffer, "%08x%08x",
43014 (uint32_t) get_seconds(),
43015 - (uint32_t) atomic_inc_return(&cache->gravecounter));
43016 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
43017
43018 /* do the multiway lock magic */
43019 trap = lock_rename(cache->graveyard, dir);
43020 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
43021 index eccd339..4c1d995 100644
43022 --- a/fs/cachefiles/proc.c
43023 +++ b/fs/cachefiles/proc.c
43024 @@ -14,9 +14,9 @@
43025 #include <linux/seq_file.h>
43026 #include "internal.h"
43027
43028 -atomic_t cachefiles_lookup_histogram[HZ];
43029 -atomic_t cachefiles_mkdir_histogram[HZ];
43030 -atomic_t cachefiles_create_histogram[HZ];
43031 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
43032 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
43033 +atomic_unchecked_t cachefiles_create_histogram[HZ];
43034
43035 /*
43036 * display the latency histogram
43037 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
43038 return 0;
43039 default:
43040 index = (unsigned long) v - 3;
43041 - x = atomic_read(&cachefiles_lookup_histogram[index]);
43042 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
43043 - z = atomic_read(&cachefiles_create_histogram[index]);
43044 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
43045 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
43046 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
43047 if (x == 0 && y == 0 && z == 0)
43048 return 0;
43049
43050 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
43051 index 0e3c092..818480e 100644
43052 --- a/fs/cachefiles/rdwr.c
43053 +++ b/fs/cachefiles/rdwr.c
43054 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
43055 old_fs = get_fs();
43056 set_fs(KERNEL_DS);
43057 ret = file->f_op->write(
43058 - file, (const void __user *) data, len, &pos);
43059 + file, (const void __force_user *) data, len, &pos);
43060 set_fs(old_fs);
43061 kunmap(page);
43062 if (ret != len)
43063 diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
43064 index 3e8094b..cb3ff3d 100644
43065 --- a/fs/ceph/dir.c
43066 +++ b/fs/ceph/dir.c
43067 @@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
43068 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
43069 struct ceph_mds_client *mdsc = fsc->mdsc;
43070 unsigned frag = fpos_frag(filp->f_pos);
43071 - int off = fpos_off(filp->f_pos);
43072 + unsigned int off = fpos_off(filp->f_pos);
43073 int err;
43074 u32 ftype;
43075 struct ceph_mds_reply_info_parsed *rinfo;
43076 @@ -598,7 +598,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
43077 if (nd &&
43078 (nd->flags & LOOKUP_OPEN) &&
43079 !(nd->intent.open.flags & O_CREAT)) {
43080 - int mode = nd->intent.open.create_mode & ~current->fs->umask;
43081 + int mode = nd->intent.open.create_mode & ~current_umask();
43082 return ceph_lookup_open(dir, dentry, nd, mode, 1);
43083 }
43084
43085 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
43086 index 2704646..c581c91 100644
43087 --- a/fs/cifs/cifs_debug.c
43088 +++ b/fs/cifs/cifs_debug.c
43089 @@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
43090
43091 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
43092 #ifdef CONFIG_CIFS_STATS2
43093 - atomic_set(&totBufAllocCount, 0);
43094 - atomic_set(&totSmBufAllocCount, 0);
43095 + atomic_set_unchecked(&totBufAllocCount, 0);
43096 + atomic_set_unchecked(&totSmBufAllocCount, 0);
43097 #endif /* CONFIG_CIFS_STATS2 */
43098 spin_lock(&cifs_tcp_ses_lock);
43099 list_for_each(tmp1, &cifs_tcp_ses_list) {
43100 @@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
43101 tcon = list_entry(tmp3,
43102 struct cifs_tcon,
43103 tcon_list);
43104 - atomic_set(&tcon->num_smbs_sent, 0);
43105 - atomic_set(&tcon->num_writes, 0);
43106 - atomic_set(&tcon->num_reads, 0);
43107 - atomic_set(&tcon->num_oplock_brks, 0);
43108 - atomic_set(&tcon->num_opens, 0);
43109 - atomic_set(&tcon->num_posixopens, 0);
43110 - atomic_set(&tcon->num_posixmkdirs, 0);
43111 - atomic_set(&tcon->num_closes, 0);
43112 - atomic_set(&tcon->num_deletes, 0);
43113 - atomic_set(&tcon->num_mkdirs, 0);
43114 - atomic_set(&tcon->num_rmdirs, 0);
43115 - atomic_set(&tcon->num_renames, 0);
43116 - atomic_set(&tcon->num_t2renames, 0);
43117 - atomic_set(&tcon->num_ffirst, 0);
43118 - atomic_set(&tcon->num_fnext, 0);
43119 - atomic_set(&tcon->num_fclose, 0);
43120 - atomic_set(&tcon->num_hardlinks, 0);
43121 - atomic_set(&tcon->num_symlinks, 0);
43122 - atomic_set(&tcon->num_locks, 0);
43123 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
43124 + atomic_set_unchecked(&tcon->num_writes, 0);
43125 + atomic_set_unchecked(&tcon->num_reads, 0);
43126 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
43127 + atomic_set_unchecked(&tcon->num_opens, 0);
43128 + atomic_set_unchecked(&tcon->num_posixopens, 0);
43129 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
43130 + atomic_set_unchecked(&tcon->num_closes, 0);
43131 + atomic_set_unchecked(&tcon->num_deletes, 0);
43132 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
43133 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
43134 + atomic_set_unchecked(&tcon->num_renames, 0);
43135 + atomic_set_unchecked(&tcon->num_t2renames, 0);
43136 + atomic_set_unchecked(&tcon->num_ffirst, 0);
43137 + atomic_set_unchecked(&tcon->num_fnext, 0);
43138 + atomic_set_unchecked(&tcon->num_fclose, 0);
43139 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
43140 + atomic_set_unchecked(&tcon->num_symlinks, 0);
43141 + atomic_set_unchecked(&tcon->num_locks, 0);
43142 }
43143 }
43144 }
43145 @@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
43146 smBufAllocCount.counter, cifs_min_small);
43147 #ifdef CONFIG_CIFS_STATS2
43148 seq_printf(m, "Total Large %d Small %d Allocations\n",
43149 - atomic_read(&totBufAllocCount),
43150 - atomic_read(&totSmBufAllocCount));
43151 + atomic_read_unchecked(&totBufAllocCount),
43152 + atomic_read_unchecked(&totSmBufAllocCount));
43153 #endif /* CONFIG_CIFS_STATS2 */
43154
43155 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
43156 @@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
43157 if (tcon->need_reconnect)
43158 seq_puts(m, "\tDISCONNECTED ");
43159 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
43160 - atomic_read(&tcon->num_smbs_sent),
43161 - atomic_read(&tcon->num_oplock_brks));
43162 + atomic_read_unchecked(&tcon->num_smbs_sent),
43163 + atomic_read_unchecked(&tcon->num_oplock_brks));
43164 seq_printf(m, "\nReads: %d Bytes: %lld",
43165 - atomic_read(&tcon->num_reads),
43166 + atomic_read_unchecked(&tcon->num_reads),
43167 (long long)(tcon->bytes_read));
43168 seq_printf(m, "\nWrites: %d Bytes: %lld",
43169 - atomic_read(&tcon->num_writes),
43170 + atomic_read_unchecked(&tcon->num_writes),
43171 (long long)(tcon->bytes_written));
43172 seq_printf(m, "\nFlushes: %d",
43173 - atomic_read(&tcon->num_flushes));
43174 + atomic_read_unchecked(&tcon->num_flushes));
43175 seq_printf(m, "\nLocks: %d HardLinks: %d "
43176 "Symlinks: %d",
43177 - atomic_read(&tcon->num_locks),
43178 - atomic_read(&tcon->num_hardlinks),
43179 - atomic_read(&tcon->num_symlinks));
43180 + atomic_read_unchecked(&tcon->num_locks),
43181 + atomic_read_unchecked(&tcon->num_hardlinks),
43182 + atomic_read_unchecked(&tcon->num_symlinks));
43183 seq_printf(m, "\nOpens: %d Closes: %d "
43184 "Deletes: %d",
43185 - atomic_read(&tcon->num_opens),
43186 - atomic_read(&tcon->num_closes),
43187 - atomic_read(&tcon->num_deletes));
43188 + atomic_read_unchecked(&tcon->num_opens),
43189 + atomic_read_unchecked(&tcon->num_closes),
43190 + atomic_read_unchecked(&tcon->num_deletes));
43191 seq_printf(m, "\nPosix Opens: %d "
43192 "Posix Mkdirs: %d",
43193 - atomic_read(&tcon->num_posixopens),
43194 - atomic_read(&tcon->num_posixmkdirs));
43195 + atomic_read_unchecked(&tcon->num_posixopens),
43196 + atomic_read_unchecked(&tcon->num_posixmkdirs));
43197 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
43198 - atomic_read(&tcon->num_mkdirs),
43199 - atomic_read(&tcon->num_rmdirs));
43200 + atomic_read_unchecked(&tcon->num_mkdirs),
43201 + atomic_read_unchecked(&tcon->num_rmdirs));
43202 seq_printf(m, "\nRenames: %d T2 Renames %d",
43203 - atomic_read(&tcon->num_renames),
43204 - atomic_read(&tcon->num_t2renames));
43205 + atomic_read_unchecked(&tcon->num_renames),
43206 + atomic_read_unchecked(&tcon->num_t2renames));
43207 seq_printf(m, "\nFindFirst: %d FNext %d "
43208 "FClose %d",
43209 - atomic_read(&tcon->num_ffirst),
43210 - atomic_read(&tcon->num_fnext),
43211 - atomic_read(&tcon->num_fclose));
43212 + atomic_read_unchecked(&tcon->num_ffirst),
43213 + atomic_read_unchecked(&tcon->num_fnext),
43214 + atomic_read_unchecked(&tcon->num_fclose));
43215 }
43216 }
43217 }
43218 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
43219 index 541ef81..a78deb8 100644
43220 --- a/fs/cifs/cifsfs.c
43221 +++ b/fs/cifs/cifsfs.c
43222 @@ -985,7 +985,7 @@ cifs_init_request_bufs(void)
43223 cifs_req_cachep = kmem_cache_create("cifs_request",
43224 CIFSMaxBufSize +
43225 MAX_CIFS_HDR_SIZE, 0,
43226 - SLAB_HWCACHE_ALIGN, NULL);
43227 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
43228 if (cifs_req_cachep == NULL)
43229 return -ENOMEM;
43230
43231 @@ -1012,7 +1012,7 @@ cifs_init_request_bufs(void)
43232 efficient to alloc 1 per page off the slab compared to 17K (5page)
43233 alloc of large cifs buffers even when page debugging is on */
43234 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
43235 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
43236 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
43237 NULL);
43238 if (cifs_sm_req_cachep == NULL) {
43239 mempool_destroy(cifs_req_poolp);
43240 @@ -1097,8 +1097,8 @@ init_cifs(void)
43241 atomic_set(&bufAllocCount, 0);
43242 atomic_set(&smBufAllocCount, 0);
43243 #ifdef CONFIG_CIFS_STATS2
43244 - atomic_set(&totBufAllocCount, 0);
43245 - atomic_set(&totSmBufAllocCount, 0);
43246 + atomic_set_unchecked(&totBufAllocCount, 0);
43247 + atomic_set_unchecked(&totSmBufAllocCount, 0);
43248 #endif /* CONFIG_CIFS_STATS2 */
43249
43250 atomic_set(&midCount, 0);
43251 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
43252 index 73fea28..b996b84 100644
43253 --- a/fs/cifs/cifsglob.h
43254 +++ b/fs/cifs/cifsglob.h
43255 @@ -439,28 +439,28 @@ struct cifs_tcon {
43256 __u16 Flags; /* optional support bits */
43257 enum statusEnum tidStatus;
43258 #ifdef CONFIG_CIFS_STATS
43259 - atomic_t num_smbs_sent;
43260 - atomic_t num_writes;
43261 - atomic_t num_reads;
43262 - atomic_t num_flushes;
43263 - atomic_t num_oplock_brks;
43264 - atomic_t num_opens;
43265 - atomic_t num_closes;
43266 - atomic_t num_deletes;
43267 - atomic_t num_mkdirs;
43268 - atomic_t num_posixopens;
43269 - atomic_t num_posixmkdirs;
43270 - atomic_t num_rmdirs;
43271 - atomic_t num_renames;
43272 - atomic_t num_t2renames;
43273 - atomic_t num_ffirst;
43274 - atomic_t num_fnext;
43275 - atomic_t num_fclose;
43276 - atomic_t num_hardlinks;
43277 - atomic_t num_symlinks;
43278 - atomic_t num_locks;
43279 - atomic_t num_acl_get;
43280 - atomic_t num_acl_set;
43281 + atomic_unchecked_t num_smbs_sent;
43282 + atomic_unchecked_t num_writes;
43283 + atomic_unchecked_t num_reads;
43284 + atomic_unchecked_t num_flushes;
43285 + atomic_unchecked_t num_oplock_brks;
43286 + atomic_unchecked_t num_opens;
43287 + atomic_unchecked_t num_closes;
43288 + atomic_unchecked_t num_deletes;
43289 + atomic_unchecked_t num_mkdirs;
43290 + atomic_unchecked_t num_posixopens;
43291 + atomic_unchecked_t num_posixmkdirs;
43292 + atomic_unchecked_t num_rmdirs;
43293 + atomic_unchecked_t num_renames;
43294 + atomic_unchecked_t num_t2renames;
43295 + atomic_unchecked_t num_ffirst;
43296 + atomic_unchecked_t num_fnext;
43297 + atomic_unchecked_t num_fclose;
43298 + atomic_unchecked_t num_hardlinks;
43299 + atomic_unchecked_t num_symlinks;
43300 + atomic_unchecked_t num_locks;
43301 + atomic_unchecked_t num_acl_get;
43302 + atomic_unchecked_t num_acl_set;
43303 #ifdef CONFIG_CIFS_STATS2
43304 unsigned long long time_writes;
43305 unsigned long long time_reads;
43306 @@ -677,7 +677,7 @@ convert_delimiter(char *path, char delim)
43307 }
43308
43309 #ifdef CONFIG_CIFS_STATS
43310 -#define cifs_stats_inc atomic_inc
43311 +#define cifs_stats_inc atomic_inc_unchecked
43312
43313 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
43314 unsigned int bytes)
43315 @@ -1036,8 +1036,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
43316 /* Various Debug counters */
43317 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
43318 #ifdef CONFIG_CIFS_STATS2
43319 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
43320 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
43321 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
43322 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
43323 #endif
43324 GLOBAL_EXTERN atomic_t smBufAllocCount;
43325 GLOBAL_EXTERN atomic_t midCount;
43326 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
43327 index 6b0e064..94e6c3c 100644
43328 --- a/fs/cifs/link.c
43329 +++ b/fs/cifs/link.c
43330 @@ -600,7 +600,7 @@ symlink_exit:
43331
43332 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
43333 {
43334 - char *p = nd_get_link(nd);
43335 + const char *p = nd_get_link(nd);
43336 if (!IS_ERR(p))
43337 kfree(p);
43338 }
43339 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
43340 index c29d1aa..58018da 100644
43341 --- a/fs/cifs/misc.c
43342 +++ b/fs/cifs/misc.c
43343 @@ -156,7 +156,7 @@ cifs_buf_get(void)
43344 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
43345 atomic_inc(&bufAllocCount);
43346 #ifdef CONFIG_CIFS_STATS2
43347 - atomic_inc(&totBufAllocCount);
43348 + atomic_inc_unchecked(&totBufAllocCount);
43349 #endif /* CONFIG_CIFS_STATS2 */
43350 }
43351
43352 @@ -191,7 +191,7 @@ cifs_small_buf_get(void)
43353 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
43354 atomic_inc(&smBufAllocCount);
43355 #ifdef CONFIG_CIFS_STATS2
43356 - atomic_inc(&totSmBufAllocCount);
43357 + atomic_inc_unchecked(&totSmBufAllocCount);
43358 #endif /* CONFIG_CIFS_STATS2 */
43359
43360 }
43361 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
43362 index 6901578..d402eb5 100644
43363 --- a/fs/coda/cache.c
43364 +++ b/fs/coda/cache.c
43365 @@ -24,7 +24,7 @@
43366 #include "coda_linux.h"
43367 #include "coda_cache.h"
43368
43369 -static atomic_t permission_epoch = ATOMIC_INIT(0);
43370 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
43371
43372 /* replace or extend an acl cache hit */
43373 void coda_cache_enter(struct inode *inode, int mask)
43374 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
43375 struct coda_inode_info *cii = ITOC(inode);
43376
43377 spin_lock(&cii->c_lock);
43378 - cii->c_cached_epoch = atomic_read(&permission_epoch);
43379 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
43380 if (cii->c_uid != current_fsuid()) {
43381 cii->c_uid = current_fsuid();
43382 cii->c_cached_perm = mask;
43383 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
43384 {
43385 struct coda_inode_info *cii = ITOC(inode);
43386 spin_lock(&cii->c_lock);
43387 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
43388 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
43389 spin_unlock(&cii->c_lock);
43390 }
43391
43392 /* remove all acl caches */
43393 void coda_cache_clear_all(struct super_block *sb)
43394 {
43395 - atomic_inc(&permission_epoch);
43396 + atomic_inc_unchecked(&permission_epoch);
43397 }
43398
43399
43400 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
43401 spin_lock(&cii->c_lock);
43402 hit = (mask & cii->c_cached_perm) == mask &&
43403 cii->c_uid == current_fsuid() &&
43404 - cii->c_cached_epoch == atomic_read(&permission_epoch);
43405 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
43406 spin_unlock(&cii->c_lock);
43407
43408 return hit;
43409 diff --git a/fs/compat.c b/fs/compat.c
43410 index f2944ac..62845d2 100644
43411 --- a/fs/compat.c
43412 +++ b/fs/compat.c
43413 @@ -490,7 +490,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
43414
43415 set_fs(KERNEL_DS);
43416 /* The __user pointer cast is valid because of the set_fs() */
43417 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
43418 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
43419 set_fs(oldfs);
43420 /* truncating is ok because it's a user address */
43421 if (!ret)
43422 @@ -548,7 +548,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
43423 goto out;
43424
43425 ret = -EINVAL;
43426 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
43427 + if (nr_segs > UIO_MAXIOV)
43428 goto out;
43429 if (nr_segs > fast_segs) {
43430 ret = -ENOMEM;
43431 @@ -831,6 +831,7 @@ struct compat_old_linux_dirent {
43432
43433 struct compat_readdir_callback {
43434 struct compat_old_linux_dirent __user *dirent;
43435 + struct file * file;
43436 int result;
43437 };
43438
43439 @@ -848,6 +849,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
43440 buf->result = -EOVERFLOW;
43441 return -EOVERFLOW;
43442 }
43443 +
43444 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43445 + return 0;
43446 +
43447 buf->result++;
43448 dirent = buf->dirent;
43449 if (!access_ok(VERIFY_WRITE, dirent,
43450 @@ -880,6 +885,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
43451
43452 buf.result = 0;
43453 buf.dirent = dirent;
43454 + buf.file = file;
43455
43456 error = vfs_readdir(file, compat_fillonedir, &buf);
43457 if (buf.result)
43458 @@ -900,6 +906,7 @@ struct compat_linux_dirent {
43459 struct compat_getdents_callback {
43460 struct compat_linux_dirent __user *current_dir;
43461 struct compat_linux_dirent __user *previous;
43462 + struct file * file;
43463 int count;
43464 int error;
43465 };
43466 @@ -921,6 +928,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
43467 buf->error = -EOVERFLOW;
43468 return -EOVERFLOW;
43469 }
43470 +
43471 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43472 + return 0;
43473 +
43474 dirent = buf->previous;
43475 if (dirent) {
43476 if (__put_user(offset, &dirent->d_off))
43477 @@ -968,6 +979,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
43478 buf.previous = NULL;
43479 buf.count = count;
43480 buf.error = 0;
43481 + buf.file = file;
43482
43483 error = vfs_readdir(file, compat_filldir, &buf);
43484 if (error >= 0)
43485 @@ -989,6 +1001,7 @@ out:
43486 struct compat_getdents_callback64 {
43487 struct linux_dirent64 __user *current_dir;
43488 struct linux_dirent64 __user *previous;
43489 + struct file * file;
43490 int count;
43491 int error;
43492 };
43493 @@ -1005,6 +1018,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
43494 buf->error = -EINVAL; /* only used if we fail.. */
43495 if (reclen > buf->count)
43496 return -EINVAL;
43497 +
43498 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43499 + return 0;
43500 +
43501 dirent = buf->previous;
43502
43503 if (dirent) {
43504 @@ -1056,13 +1073,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
43505 buf.previous = NULL;
43506 buf.count = count;
43507 buf.error = 0;
43508 + buf.file = file;
43509
43510 error = vfs_readdir(file, compat_filldir64, &buf);
43511 if (error >= 0)
43512 error = buf.error;
43513 lastdirent = buf.previous;
43514 if (lastdirent) {
43515 - typeof(lastdirent->d_off) d_off = file->f_pos;
43516 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
43517 if (__put_user_unaligned(d_off, &lastdirent->d_off))
43518 error = -EFAULT;
43519 else
43520 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
43521 index 112e45a..b59845b 100644
43522 --- a/fs/compat_binfmt_elf.c
43523 +++ b/fs/compat_binfmt_elf.c
43524 @@ -30,11 +30,13 @@
43525 #undef elf_phdr
43526 #undef elf_shdr
43527 #undef elf_note
43528 +#undef elf_dyn
43529 #undef elf_addr_t
43530 #define elfhdr elf32_hdr
43531 #define elf_phdr elf32_phdr
43532 #define elf_shdr elf32_shdr
43533 #define elf_note elf32_note
43534 +#define elf_dyn Elf32_Dyn
43535 #define elf_addr_t Elf32_Addr
43536
43537 /*
43538 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
43539 index debdfe0..75d31d4 100644
43540 --- a/fs/compat_ioctl.c
43541 +++ b/fs/compat_ioctl.c
43542 @@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
43543
43544 err = get_user(palp, &up->palette);
43545 err |= get_user(length, &up->length);
43546 + if (err)
43547 + return -EFAULT;
43548
43549 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
43550 err = put_user(compat_ptr(palp), &up_native->palette);
43551 @@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
43552 return -EFAULT;
43553 if (__get_user(udata, &ss32->iomem_base))
43554 return -EFAULT;
43555 - ss.iomem_base = compat_ptr(udata);
43556 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
43557 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
43558 __get_user(ss.port_high, &ss32->port_high))
43559 return -EFAULT;
43560 @@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
43561 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
43562 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
43563 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
43564 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
43565 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
43566 return -EFAULT;
43567
43568 return ioctl_preallocate(file, p);
43569 @@ -1610,8 +1612,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
43570 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
43571 {
43572 unsigned int a, b;
43573 - a = *(unsigned int *)p;
43574 - b = *(unsigned int *)q;
43575 + a = *(const unsigned int *)p;
43576 + b = *(const unsigned int *)q;
43577 if (a > b)
43578 return 1;
43579 if (a < b)
43580 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
43581 index 7e6c52d..94bc756 100644
43582 --- a/fs/configfs/dir.c
43583 +++ b/fs/configfs/dir.c
43584 @@ -1564,7 +1564,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
43585 }
43586 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
43587 struct configfs_dirent *next;
43588 - const char * name;
43589 + const unsigned char * name;
43590 + char d_name[sizeof(next->s_dentry->d_iname)];
43591 int len;
43592 struct inode *inode = NULL;
43593
43594 @@ -1574,7 +1575,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
43595 continue;
43596
43597 name = configfs_get_name(next);
43598 - len = strlen(name);
43599 + if (next->s_dentry && name == next->s_dentry->d_iname) {
43600 + len = next->s_dentry->d_name.len;
43601 + memcpy(d_name, name, len);
43602 + name = d_name;
43603 + } else
43604 + len = strlen(name);
43605
43606 /*
43607 * We'll have a dentry and an inode for
43608 diff --git a/fs/dcache.c b/fs/dcache.c
43609 index b80531c..8ca7e2d 100644
43610 --- a/fs/dcache.c
43611 +++ b/fs/dcache.c
43612 @@ -3084,7 +3084,7 @@ void __init vfs_caches_init(unsigned long mempages)
43613 mempages -= reserve;
43614
43615 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
43616 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
43617 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
43618
43619 dcache_init();
43620 inode_init();
43621 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
43622 index b80bc84..0d46d1a 100644
43623 --- a/fs/debugfs/inode.c
43624 +++ b/fs/debugfs/inode.c
43625 @@ -408,7 +408,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
43626 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
43627 {
43628 return debugfs_create_file(name,
43629 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
43630 + S_IFDIR | S_IRWXU,
43631 +#else
43632 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
43633 +#endif
43634 parent, NULL, NULL);
43635 }
43636 EXPORT_SYMBOL_GPL(debugfs_create_dir);
43637 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
43638 index ab35b11..b30af66 100644
43639 --- a/fs/ecryptfs/inode.c
43640 +++ b/fs/ecryptfs/inode.c
43641 @@ -672,7 +672,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
43642 old_fs = get_fs();
43643 set_fs(get_ds());
43644 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
43645 - (char __user *)lower_buf,
43646 + (char __force_user *)lower_buf,
43647 lower_bufsiz);
43648 set_fs(old_fs);
43649 if (rc < 0)
43650 @@ -718,7 +718,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
43651 }
43652 old_fs = get_fs();
43653 set_fs(get_ds());
43654 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
43655 + rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
43656 set_fs(old_fs);
43657 if (rc < 0) {
43658 kfree(buf);
43659 @@ -733,7 +733,7 @@ out:
43660 static void
43661 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
43662 {
43663 - char *buf = nd_get_link(nd);
43664 + const char *buf = nd_get_link(nd);
43665 if (!IS_ERR(buf)) {
43666 /* Free the char* */
43667 kfree(buf);
43668 diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
43669 index c0038f6..47ab347 100644
43670 --- a/fs/ecryptfs/miscdev.c
43671 +++ b/fs/ecryptfs/miscdev.c
43672 @@ -355,7 +355,7 @@ check_list:
43673 goto out_unlock_msg_ctx;
43674 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
43675 if (msg_ctx->msg) {
43676 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
43677 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
43678 goto out_unlock_msg_ctx;
43679 i += packet_length_size;
43680 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
43681 diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
43682 index b2a34a1..162fa69 100644
43683 --- a/fs/ecryptfs/read_write.c
43684 +++ b/fs/ecryptfs/read_write.c
43685 @@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
43686 return -EIO;
43687 fs_save = get_fs();
43688 set_fs(get_ds());
43689 - rc = vfs_write(lower_file, data, size, &offset);
43690 + rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
43691 set_fs(fs_save);
43692 mark_inode_dirty_sync(ecryptfs_inode);
43693 return rc;
43694 @@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
43695 return -EIO;
43696 fs_save = get_fs();
43697 set_fs(get_ds());
43698 - rc = vfs_read(lower_file, data, size, &offset);
43699 + rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
43700 set_fs(fs_save);
43701 return rc;
43702 }
43703 diff --git a/fs/exec.c b/fs/exec.c
43704 index 29e5f84..ec81452 100644
43705 --- a/fs/exec.c
43706 +++ b/fs/exec.c
43707 @@ -55,6 +55,15 @@
43708 #include <linux/pipe_fs_i.h>
43709 #include <linux/oom.h>
43710 #include <linux/compat.h>
43711 +#include <linux/random.h>
43712 +#include <linux/seq_file.h>
43713 +
43714 +#ifdef CONFIG_PAX_REFCOUNT
43715 +#include <linux/kallsyms.h>
43716 +#include <linux/kdebug.h>
43717 +#endif
43718 +
43719 +#include <trace/events/fs.h>
43720
43721 #include <asm/uaccess.h>
43722 #include <asm/mmu_context.h>
43723 @@ -66,6 +75,18 @@
43724
43725 #include <trace/events/sched.h>
43726
43727 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
43728 +void __weak pax_set_initial_flags(struct linux_binprm *bprm)
43729 +{
43730 + WARN_ONCE(1, "PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
43731 +}
43732 +#endif
43733 +
43734 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
43735 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
43736 +EXPORT_SYMBOL(pax_set_initial_flags_func);
43737 +#endif
43738 +
43739 int core_uses_pid;
43740 char core_pattern[CORENAME_MAX_SIZE] = "core";
43741 unsigned int core_pipe_limit;
43742 @@ -75,7 +96,7 @@ struct core_name {
43743 char *corename;
43744 int used, size;
43745 };
43746 -static atomic_t call_count = ATOMIC_INIT(1);
43747 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
43748
43749 /* The maximal length of core_pattern is also specified in sysctl.c */
43750
43751 @@ -191,18 +212,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
43752 int write)
43753 {
43754 struct page *page;
43755 - int ret;
43756
43757 -#ifdef CONFIG_STACK_GROWSUP
43758 - if (write) {
43759 - ret = expand_downwards(bprm->vma, pos);
43760 - if (ret < 0)
43761 - return NULL;
43762 - }
43763 -#endif
43764 - ret = get_user_pages(current, bprm->mm, pos,
43765 - 1, write, 1, &page, NULL);
43766 - if (ret <= 0)
43767 + if (0 > expand_downwards(bprm->vma, pos))
43768 + return NULL;
43769 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
43770 return NULL;
43771
43772 if (write) {
43773 @@ -218,6 +231,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
43774 if (size <= ARG_MAX)
43775 return page;
43776
43777 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43778 + // only allow 512KB for argv+env on suid/sgid binaries
43779 + // to prevent easy ASLR exhaustion
43780 + if (((bprm->cred->euid != current_euid()) ||
43781 + (bprm->cred->egid != current_egid())) &&
43782 + (size > (512 * 1024))) {
43783 + put_page(page);
43784 + return NULL;
43785 + }
43786 +#endif
43787 +
43788 /*
43789 * Limit to 1/4-th the stack size for the argv+env strings.
43790 * This ensures that:
43791 @@ -277,6 +301,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
43792 vma->vm_end = STACK_TOP_MAX;
43793 vma->vm_start = vma->vm_end - PAGE_SIZE;
43794 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
43795 +
43796 +#ifdef CONFIG_PAX_SEGMEXEC
43797 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
43798 +#endif
43799 +
43800 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
43801 INIT_LIST_HEAD(&vma->anon_vma_chain);
43802
43803 @@ -291,6 +320,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
43804 mm->stack_vm = mm->total_vm = 1;
43805 up_write(&mm->mmap_sem);
43806 bprm->p = vma->vm_end - sizeof(void *);
43807 +
43808 +#ifdef CONFIG_PAX_RANDUSTACK
43809 + if (randomize_va_space)
43810 + bprm->p ^= random32() & ~PAGE_MASK;
43811 +#endif
43812 +
43813 return 0;
43814 err:
43815 up_write(&mm->mmap_sem);
43816 @@ -399,19 +434,7 @@ err:
43817 return err;
43818 }
43819
43820 -struct user_arg_ptr {
43821 -#ifdef CONFIG_COMPAT
43822 - bool is_compat;
43823 -#endif
43824 - union {
43825 - const char __user *const __user *native;
43826 -#ifdef CONFIG_COMPAT
43827 - compat_uptr_t __user *compat;
43828 -#endif
43829 - } ptr;
43830 -};
43831 -
43832 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43833 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43834 {
43835 const char __user *native;
43836
43837 @@ -420,14 +443,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43838 compat_uptr_t compat;
43839
43840 if (get_user(compat, argv.ptr.compat + nr))
43841 - return ERR_PTR(-EFAULT);
43842 + return (const char __force_user *)ERR_PTR(-EFAULT);
43843
43844 return compat_ptr(compat);
43845 }
43846 #endif
43847
43848 if (get_user(native, argv.ptr.native + nr))
43849 - return ERR_PTR(-EFAULT);
43850 + return (const char __force_user *)ERR_PTR(-EFAULT);
43851
43852 return native;
43853 }
43854 @@ -446,7 +469,7 @@ static int count(struct user_arg_ptr argv, int max)
43855 if (!p)
43856 break;
43857
43858 - if (IS_ERR(p))
43859 + if (IS_ERR((const char __force_kernel *)p))
43860 return -EFAULT;
43861
43862 if (i++ >= max)
43863 @@ -480,7 +503,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
43864
43865 ret = -EFAULT;
43866 str = get_user_arg_ptr(argv, argc);
43867 - if (IS_ERR(str))
43868 + if (IS_ERR((const char __force_kernel *)str))
43869 goto out;
43870
43871 len = strnlen_user(str, MAX_ARG_STRLEN);
43872 @@ -562,7 +585,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
43873 int r;
43874 mm_segment_t oldfs = get_fs();
43875 struct user_arg_ptr argv = {
43876 - .ptr.native = (const char __user *const __user *)__argv,
43877 + .ptr.native = (const char __force_user *const __force_user *)__argv,
43878 };
43879
43880 set_fs(KERNEL_DS);
43881 @@ -597,7 +620,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
43882 unsigned long new_end = old_end - shift;
43883 struct mmu_gather tlb;
43884
43885 - BUG_ON(new_start > new_end);
43886 + if (new_start >= new_end || new_start < mmap_min_addr)
43887 + return -ENOMEM;
43888
43889 /*
43890 * ensure there are no vmas between where we want to go
43891 @@ -606,6 +630,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
43892 if (vma != find_vma(mm, new_start))
43893 return -EFAULT;
43894
43895 +#ifdef CONFIG_PAX_SEGMEXEC
43896 + BUG_ON(pax_find_mirror_vma(vma));
43897 +#endif
43898 +
43899 /*
43900 * cover the whole range: [new_start, old_end)
43901 */
43902 @@ -686,10 +714,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
43903 stack_top = arch_align_stack(stack_top);
43904 stack_top = PAGE_ALIGN(stack_top);
43905
43906 - if (unlikely(stack_top < mmap_min_addr) ||
43907 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
43908 - return -ENOMEM;
43909 -
43910 stack_shift = vma->vm_end - stack_top;
43911
43912 bprm->p -= stack_shift;
43913 @@ -701,8 +725,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
43914 bprm->exec -= stack_shift;
43915
43916 down_write(&mm->mmap_sem);
43917 +
43918 + /* Move stack pages down in memory. */
43919 + if (stack_shift) {
43920 + ret = shift_arg_pages(vma, stack_shift);
43921 + if (ret)
43922 + goto out_unlock;
43923 + }
43924 +
43925 vm_flags = VM_STACK_FLAGS;
43926
43927 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43928 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43929 + vm_flags &= ~VM_EXEC;
43930 +
43931 +#ifdef CONFIG_PAX_MPROTECT
43932 + if (mm->pax_flags & MF_PAX_MPROTECT)
43933 + vm_flags &= ~VM_MAYEXEC;
43934 +#endif
43935 +
43936 + }
43937 +#endif
43938 +
43939 /*
43940 * Adjust stack execute permissions; explicitly enable for
43941 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
43942 @@ -721,13 +765,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
43943 goto out_unlock;
43944 BUG_ON(prev != vma);
43945
43946 - /* Move stack pages down in memory. */
43947 - if (stack_shift) {
43948 - ret = shift_arg_pages(vma, stack_shift);
43949 - if (ret)
43950 - goto out_unlock;
43951 - }
43952 -
43953 /* mprotect_fixup is overkill to remove the temporary stack flags */
43954 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
43955
43956 @@ -785,6 +822,8 @@ struct file *open_exec(const char *name)
43957
43958 fsnotify_open(file);
43959
43960 + trace_open_exec(name);
43961 +
43962 err = deny_write_access(file);
43963 if (err)
43964 goto exit;
43965 @@ -808,7 +847,7 @@ int kernel_read(struct file *file, loff_t offset,
43966 old_fs = get_fs();
43967 set_fs(get_ds());
43968 /* The cast to a user pointer is valid due to the set_fs() */
43969 - result = vfs_read(file, (void __user *)addr, count, &pos);
43970 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
43971 set_fs(old_fs);
43972 return result;
43973 }
43974 @@ -1254,7 +1293,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
43975 }
43976 rcu_read_unlock();
43977
43978 - if (p->fs->users > n_fs) {
43979 + if (atomic_read(&p->fs->users) > n_fs) {
43980 bprm->unsafe |= LSM_UNSAFE_SHARE;
43981 } else {
43982 res = -EAGAIN;
43983 @@ -1451,6 +1490,28 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
43984
43985 EXPORT_SYMBOL(search_binary_handler);
43986
43987 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43988 +static DEFINE_PER_CPU(u64, exec_counter);
43989 +static int __init init_exec_counters(void)
43990 +{
43991 + unsigned int cpu;
43992 +
43993 + for_each_possible_cpu(cpu) {
43994 + per_cpu(exec_counter, cpu) = (u64)cpu;
43995 + }
43996 +
43997 + return 0;
43998 +}
43999 +early_initcall(init_exec_counters);
44000 +static inline void increment_exec_counter(void)
44001 +{
44002 + BUILD_BUG_ON(NR_CPUS > (1 << 16));
44003 + current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
44004 +}
44005 +#else
44006 +static inline void increment_exec_counter(void) {}
44007 +#endif
44008 +
44009 /*
44010 * sys_execve() executes a new program.
44011 */
44012 @@ -1459,6 +1520,11 @@ static int do_execve_common(const char *filename,
44013 struct user_arg_ptr envp,
44014 struct pt_regs *regs)
44015 {
44016 +#ifdef CONFIG_GRKERNSEC
44017 + struct file *old_exec_file;
44018 + struct acl_subject_label *old_acl;
44019 + struct rlimit old_rlim[RLIM_NLIMITS];
44020 +#endif
44021 struct linux_binprm *bprm;
44022 struct file *file;
44023 struct files_struct *displaced;
44024 @@ -1466,6 +1532,8 @@ static int do_execve_common(const char *filename,
44025 int retval;
44026 const struct cred *cred = current_cred();
44027
44028 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
44029 +
44030 /*
44031 * We move the actual failure in case of RLIMIT_NPROC excess from
44032 * set*uid() to execve() because too many poorly written programs
44033 @@ -1506,12 +1574,27 @@ static int do_execve_common(const char *filename,
44034 if (IS_ERR(file))
44035 goto out_unmark;
44036
44037 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
44038 + retval = -EPERM;
44039 + goto out_file;
44040 + }
44041 +
44042 sched_exec();
44043
44044 bprm->file = file;
44045 bprm->filename = filename;
44046 bprm->interp = filename;
44047
44048 + if (gr_process_user_ban()) {
44049 + retval = -EPERM;
44050 + goto out_file;
44051 + }
44052 +
44053 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
44054 + retval = -EACCES;
44055 + goto out_file;
44056 + }
44057 +
44058 retval = bprm_mm_init(bprm);
44059 if (retval)
44060 goto out_file;
44061 @@ -1528,24 +1611,65 @@ static int do_execve_common(const char *filename,
44062 if (retval < 0)
44063 goto out;
44064
44065 +#ifdef CONFIG_GRKERNSEC
44066 + old_acl = current->acl;
44067 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
44068 + old_exec_file = current->exec_file;
44069 + get_file(file);
44070 + current->exec_file = file;
44071 +#endif
44072 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44073 + /* limit suid stack to 8MB
44074 + we saved the old limits above and will restore them if this exec fails
44075 + */
44076 + if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
44077 + (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
44078 + current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
44079 +#endif
44080 +
44081 + if (!gr_tpe_allow(file)) {
44082 + retval = -EACCES;
44083 + goto out_fail;
44084 + }
44085 +
44086 + if (gr_check_crash_exec(file)) {
44087 + retval = -EACCES;
44088 + goto out_fail;
44089 + }
44090 +
44091 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
44092 + bprm->unsafe);
44093 + if (retval < 0)
44094 + goto out_fail;
44095 +
44096 retval = copy_strings_kernel(1, &bprm->filename, bprm);
44097 if (retval < 0)
44098 - goto out;
44099 + goto out_fail;
44100
44101 bprm->exec = bprm->p;
44102 retval = copy_strings(bprm->envc, envp, bprm);
44103 if (retval < 0)
44104 - goto out;
44105 + goto out_fail;
44106
44107 retval = copy_strings(bprm->argc, argv, bprm);
44108 if (retval < 0)
44109 - goto out;
44110 + goto out_fail;
44111 +
44112 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
44113 +
44114 + gr_handle_exec_args(bprm, argv);
44115
44116 retval = search_binary_handler(bprm,regs);
44117 if (retval < 0)
44118 - goto out;
44119 + goto out_fail;
44120 +#ifdef CONFIG_GRKERNSEC
44121 + if (old_exec_file)
44122 + fput(old_exec_file);
44123 +#endif
44124
44125 /* execve succeeded */
44126 +
44127 + increment_exec_counter();
44128 current->fs->in_exec = 0;
44129 current->in_execve = 0;
44130 acct_update_integrals(current);
44131 @@ -1554,6 +1678,14 @@ static int do_execve_common(const char *filename,
44132 put_files_struct(displaced);
44133 return retval;
44134
44135 +out_fail:
44136 +#ifdef CONFIG_GRKERNSEC
44137 + current->acl = old_acl;
44138 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
44139 + fput(current->exec_file);
44140 + current->exec_file = old_exec_file;
44141 +#endif
44142 +
44143 out:
44144 if (bprm->mm) {
44145 acct_arg_size(bprm, 0);
44146 @@ -1627,7 +1759,7 @@ static int expand_corename(struct core_name *cn)
44147 {
44148 char *old_corename = cn->corename;
44149
44150 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
44151 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
44152 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
44153
44154 if (!cn->corename) {
44155 @@ -1724,7 +1856,7 @@ static int format_corename(struct core_name *cn, long signr)
44156 int pid_in_pattern = 0;
44157 int err = 0;
44158
44159 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
44160 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
44161 cn->corename = kmalloc(cn->size, GFP_KERNEL);
44162 cn->used = 0;
44163
44164 @@ -1821,6 +1953,228 @@ out:
44165 return ispipe;
44166 }
44167
44168 +int pax_check_flags(unsigned long *flags)
44169 +{
44170 + int retval = 0;
44171 +
44172 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
44173 + if (*flags & MF_PAX_SEGMEXEC)
44174 + {
44175 + *flags &= ~MF_PAX_SEGMEXEC;
44176 + retval = -EINVAL;
44177 + }
44178 +#endif
44179 +
44180 + if ((*flags & MF_PAX_PAGEEXEC)
44181 +
44182 +#ifdef CONFIG_PAX_PAGEEXEC
44183 + && (*flags & MF_PAX_SEGMEXEC)
44184 +#endif
44185 +
44186 + )
44187 + {
44188 + *flags &= ~MF_PAX_PAGEEXEC;
44189 + retval = -EINVAL;
44190 + }
44191 +
44192 + if ((*flags & MF_PAX_MPROTECT)
44193 +
44194 +#ifdef CONFIG_PAX_MPROTECT
44195 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
44196 +#endif
44197 +
44198 + )
44199 + {
44200 + *flags &= ~MF_PAX_MPROTECT;
44201 + retval = -EINVAL;
44202 + }
44203 +
44204 + if ((*flags & MF_PAX_EMUTRAMP)
44205 +
44206 +#ifdef CONFIG_PAX_EMUTRAMP
44207 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
44208 +#endif
44209 +
44210 + )
44211 + {
44212 + *flags &= ~MF_PAX_EMUTRAMP;
44213 + retval = -EINVAL;
44214 + }
44215 +
44216 + return retval;
44217 +}
44218 +
44219 +EXPORT_SYMBOL(pax_check_flags);
44220 +
44221 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
44222 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
44223 +{
44224 + struct task_struct *tsk = current;
44225 + struct mm_struct *mm = current->mm;
44226 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
44227 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
44228 + char *path_exec = NULL;
44229 + char *path_fault = NULL;
44230 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
44231 +
44232 + if (buffer_exec && buffer_fault) {
44233 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
44234 +
44235 + down_read(&mm->mmap_sem);
44236 + vma = mm->mmap;
44237 + while (vma && (!vma_exec || !vma_fault)) {
44238 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
44239 + vma_exec = vma;
44240 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
44241 + vma_fault = vma;
44242 + vma = vma->vm_next;
44243 + }
44244 + if (vma_exec) {
44245 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
44246 + if (IS_ERR(path_exec))
44247 + path_exec = "<path too long>";
44248 + else {
44249 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
44250 + if (path_exec) {
44251 + *path_exec = 0;
44252 + path_exec = buffer_exec;
44253 + } else
44254 + path_exec = "<path too long>";
44255 + }
44256 + }
44257 + if (vma_fault) {
44258 + start = vma_fault->vm_start;
44259 + end = vma_fault->vm_end;
44260 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
44261 + if (vma_fault->vm_file) {
44262 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
44263 + if (IS_ERR(path_fault))
44264 + path_fault = "<path too long>";
44265 + else {
44266 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
44267 + if (path_fault) {
44268 + *path_fault = 0;
44269 + path_fault = buffer_fault;
44270 + } else
44271 + path_fault = "<path too long>";
44272 + }
44273 + } else
44274 + path_fault = "<anonymous mapping>";
44275 + }
44276 + up_read(&mm->mmap_sem);
44277 + }
44278 + if (tsk->signal->curr_ip)
44279 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
44280 + else
44281 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
44282 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
44283 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
44284 + task_uid(tsk), task_euid(tsk), pc, sp);
44285 + free_page((unsigned long)buffer_exec);
44286 + free_page((unsigned long)buffer_fault);
44287 + pax_report_insns(regs, pc, sp);
44288 + do_coredump(SIGKILL, SIGKILL, regs);
44289 +}
44290 +#endif
44291 +
44292 +#ifdef CONFIG_PAX_REFCOUNT
44293 +void pax_report_refcount_overflow(struct pt_regs *regs)
44294 +{
44295 + if (current->signal->curr_ip)
44296 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
44297 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
44298 + else
44299 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
44300 + current->comm, task_pid_nr(current), current_uid(), current_euid());
44301 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
44302 + show_regs(regs);
44303 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
44304 +}
44305 +#endif
44306 +
44307 +#ifdef CONFIG_PAX_USERCOPY
44308 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
44309 +int object_is_on_stack(const void *obj, unsigned long len)
44310 +{
44311 + const void * const stack = task_stack_page(current);
44312 + const void * const stackend = stack + THREAD_SIZE;
44313 +
44314 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
44315 + const void *frame = NULL;
44316 + const void *oldframe;
44317 +#endif
44318 +
44319 + if (obj + len < obj)
44320 + return -1;
44321 +
44322 + if (obj + len <= stack || stackend <= obj)
44323 + return 0;
44324 +
44325 + if (obj < stack || stackend < obj + len)
44326 + return -1;
44327 +
44328 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
44329 + oldframe = __builtin_frame_address(1);
44330 + if (oldframe)
44331 + frame = __builtin_frame_address(2);
44332 + /*
44333 + low ----------------------------------------------> high
44334 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
44335 + ^----------------^
44336 + allow copies only within here
44337 + */
44338 + while (stack <= frame && frame < stackend) {
44339 + /* if obj + len extends past the last frame, this
44340 + check won't pass and the next frame will be 0,
44341 + causing us to bail out and correctly report
44342 + the copy as invalid
44343 + */
44344 + if (obj + len <= frame)
44345 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
44346 + oldframe = frame;
44347 + frame = *(const void * const *)frame;
44348 + }
44349 + return -1;
44350 +#else
44351 + return 1;
44352 +#endif
44353 +}
44354 +
44355 +__noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
44356 +{
44357 + if (current->signal->curr_ip)
44358 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
44359 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
44360 + else
44361 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
44362 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
44363 + dump_stack();
44364 + gr_handle_kernel_exploit();
44365 + do_group_exit(SIGKILL);
44366 +}
44367 +#endif
44368 +
44369 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
44370 +void pax_track_stack(void)
44371 +{
44372 + unsigned long sp = (unsigned long)&sp;
44373 + if (sp < current_thread_info()->lowest_stack &&
44374 + sp > (unsigned long)task_stack_page(current))
44375 + current_thread_info()->lowest_stack = sp;
44376 +}
44377 +EXPORT_SYMBOL(pax_track_stack);
44378 +#endif
44379 +
44380 +#ifdef CONFIG_PAX_SIZE_OVERFLOW
44381 +void report_size_overflow(const char *file, unsigned int line, const char *func)
44382 +{
44383 + printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u\n", func, file, line);
44384 + dump_stack();
44385 + do_group_exit(SIGKILL);
44386 +}
44387 +EXPORT_SYMBOL(report_size_overflow);
44388 +#endif
44389 +
44390 static int zap_process(struct task_struct *start, int exit_code)
44391 {
44392 struct task_struct *t;
44393 @@ -2018,17 +2372,17 @@ static void wait_for_dump_helpers(struct file *file)
44394 pipe = file->f_path.dentry->d_inode->i_pipe;
44395
44396 pipe_lock(pipe);
44397 - pipe->readers++;
44398 - pipe->writers--;
44399 + atomic_inc(&pipe->readers);
44400 + atomic_dec(&pipe->writers);
44401
44402 - while ((pipe->readers > 1) && (!signal_pending(current))) {
44403 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
44404 wake_up_interruptible_sync(&pipe->wait);
44405 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
44406 pipe_wait(pipe);
44407 }
44408
44409 - pipe->readers--;
44410 - pipe->writers++;
44411 + atomic_dec(&pipe->readers);
44412 + atomic_inc(&pipe->writers);
44413 pipe_unlock(pipe);
44414
44415 }
44416 @@ -2089,7 +2443,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44417 int retval = 0;
44418 int flag = 0;
44419 int ispipe;
44420 - static atomic_t core_dump_count = ATOMIC_INIT(0);
44421 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
44422 struct coredump_params cprm = {
44423 .signr = signr,
44424 .regs = regs,
44425 @@ -2104,6 +2458,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44426
44427 audit_core_dumps(signr);
44428
44429 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
44430 + gr_handle_brute_attach(current, cprm.mm_flags);
44431 +
44432 binfmt = mm->binfmt;
44433 if (!binfmt || !binfmt->core_dump)
44434 goto fail;
44435 @@ -2171,7 +2528,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44436 }
44437 cprm.limit = RLIM_INFINITY;
44438
44439 - dump_count = atomic_inc_return(&core_dump_count);
44440 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
44441 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
44442 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
44443 task_tgid_vnr(current), current->comm);
44444 @@ -2198,6 +2555,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
44445 } else {
44446 struct inode *inode;
44447
44448 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
44449 +
44450 if (cprm.limit < binfmt->min_coredump)
44451 goto fail_unlock;
44452
44453 @@ -2241,7 +2600,7 @@ close_fail:
44454 filp_close(cprm.file, NULL);
44455 fail_dropcount:
44456 if (ispipe)
44457 - atomic_dec(&core_dump_count);
44458 + atomic_dec_unchecked(&core_dump_count);
44459 fail_unlock:
44460 kfree(cn.corename);
44461 fail_corename:
44462 @@ -2260,7 +2619,7 @@ fail:
44463 */
44464 int dump_write(struct file *file, const void *addr, int nr)
44465 {
44466 - return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
44467 + return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
44468 }
44469 EXPORT_SYMBOL(dump_write);
44470
44471 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
44472 index a8cbe1b..fed04cb 100644
44473 --- a/fs/ext2/balloc.c
44474 +++ b/fs/ext2/balloc.c
44475 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
44476
44477 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
44478 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
44479 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
44480 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
44481 sbi->s_resuid != current_fsuid() &&
44482 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
44483 return 0;
44484 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
44485 index baac1b1..1499b62 100644
44486 --- a/fs/ext3/balloc.c
44487 +++ b/fs/ext3/balloc.c
44488 @@ -1438,9 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
44489
44490 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
44491 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
44492 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
44493 + if (free_blocks < root_blocks + 1 &&
44494 !use_reservation && sbi->s_resuid != current_fsuid() &&
44495 - (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
44496 + (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)) &&
44497 + !capable_nolog(CAP_SYS_RESOURCE)) {
44498 return 0;
44499 }
44500 return 1;
44501 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
44502 index 8da837b..ed3835b 100644
44503 --- a/fs/ext4/balloc.c
44504 +++ b/fs/ext4/balloc.c
44505 @@ -463,8 +463,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
44506 /* Hm, nope. Are (enough) root reserved clusters available? */
44507 if (sbi->s_resuid == current_fsuid() ||
44508 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
44509 - capable(CAP_SYS_RESOURCE) ||
44510 - (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
44511 + (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
44512 + capable_nolog(CAP_SYS_RESOURCE)) {
44513
44514 if (free_clusters >= (nclusters + dirty_clusters))
44515 return 1;
44516 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
44517 index 0e01e90..ae2bd5e 100644
44518 --- a/fs/ext4/ext4.h
44519 +++ b/fs/ext4/ext4.h
44520 @@ -1225,19 +1225,19 @@ struct ext4_sb_info {
44521 unsigned long s_mb_last_start;
44522
44523 /* stats for buddy allocator */
44524 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
44525 - atomic_t s_bal_success; /* we found long enough chunks */
44526 - atomic_t s_bal_allocated; /* in blocks */
44527 - atomic_t s_bal_ex_scanned; /* total extents scanned */
44528 - atomic_t s_bal_goals; /* goal hits */
44529 - atomic_t s_bal_breaks; /* too long searches */
44530 - atomic_t s_bal_2orders; /* 2^order hits */
44531 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
44532 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
44533 + atomic_unchecked_t s_bal_allocated; /* in blocks */
44534 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
44535 + atomic_unchecked_t s_bal_goals; /* goal hits */
44536 + atomic_unchecked_t s_bal_breaks; /* too long searches */
44537 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
44538 spinlock_t s_bal_lock;
44539 unsigned long s_mb_buddies_generated;
44540 unsigned long long s_mb_generation_time;
44541 - atomic_t s_mb_lost_chunks;
44542 - atomic_t s_mb_preallocated;
44543 - atomic_t s_mb_discarded;
44544 + atomic_unchecked_t s_mb_lost_chunks;
44545 + atomic_unchecked_t s_mb_preallocated;
44546 + atomic_unchecked_t s_mb_discarded;
44547 atomic_t s_lock_busy;
44548
44549 /* locality groups */
44550 diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
44551 index 1365903..9727522 100644
44552 --- a/fs/ext4/ioctl.c
44553 +++ b/fs/ext4/ioctl.c
44554 @@ -261,7 +261,6 @@ group_extend_out:
44555 err = ext4_move_extents(filp, donor_filp, me.orig_start,
44556 me.donor_start, me.len, &me.moved_len);
44557 mnt_drop_write_file(filp);
44558 - mnt_drop_write(filp->f_path.mnt);
44559
44560 if (copy_to_user((struct move_extent __user *)arg,
44561 &me, sizeof(me)))
44562 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
44563 index 6b0a57e..1955a44 100644
44564 --- a/fs/ext4/mballoc.c
44565 +++ b/fs/ext4/mballoc.c
44566 @@ -1747,7 +1747,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
44567 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
44568
44569 if (EXT4_SB(sb)->s_mb_stats)
44570 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
44571 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
44572
44573 break;
44574 }
44575 @@ -2041,7 +2041,7 @@ repeat:
44576 ac->ac_status = AC_STATUS_CONTINUE;
44577 ac->ac_flags |= EXT4_MB_HINT_FIRST;
44578 cr = 3;
44579 - atomic_inc(&sbi->s_mb_lost_chunks);
44580 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
44581 goto repeat;
44582 }
44583 }
44584 @@ -2545,25 +2545,25 @@ int ext4_mb_release(struct super_block *sb)
44585 if (sbi->s_mb_stats) {
44586 ext4_msg(sb, KERN_INFO,
44587 "mballoc: %u blocks %u reqs (%u success)",
44588 - atomic_read(&sbi->s_bal_allocated),
44589 - atomic_read(&sbi->s_bal_reqs),
44590 - atomic_read(&sbi->s_bal_success));
44591 + atomic_read_unchecked(&sbi->s_bal_allocated),
44592 + atomic_read_unchecked(&sbi->s_bal_reqs),
44593 + atomic_read_unchecked(&sbi->s_bal_success));
44594 ext4_msg(sb, KERN_INFO,
44595 "mballoc: %u extents scanned, %u goal hits, "
44596 "%u 2^N hits, %u breaks, %u lost",
44597 - atomic_read(&sbi->s_bal_ex_scanned),
44598 - atomic_read(&sbi->s_bal_goals),
44599 - atomic_read(&sbi->s_bal_2orders),
44600 - atomic_read(&sbi->s_bal_breaks),
44601 - atomic_read(&sbi->s_mb_lost_chunks));
44602 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
44603 + atomic_read_unchecked(&sbi->s_bal_goals),
44604 + atomic_read_unchecked(&sbi->s_bal_2orders),
44605 + atomic_read_unchecked(&sbi->s_bal_breaks),
44606 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
44607 ext4_msg(sb, KERN_INFO,
44608 "mballoc: %lu generated and it took %Lu",
44609 sbi->s_mb_buddies_generated,
44610 sbi->s_mb_generation_time);
44611 ext4_msg(sb, KERN_INFO,
44612 "mballoc: %u preallocated, %u discarded",
44613 - atomic_read(&sbi->s_mb_preallocated),
44614 - atomic_read(&sbi->s_mb_discarded));
44615 + atomic_read_unchecked(&sbi->s_mb_preallocated),
44616 + atomic_read_unchecked(&sbi->s_mb_discarded));
44617 }
44618
44619 free_percpu(sbi->s_locality_groups);
44620 @@ -3045,16 +3045,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
44621 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
44622
44623 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
44624 - atomic_inc(&sbi->s_bal_reqs);
44625 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
44626 + atomic_inc_unchecked(&sbi->s_bal_reqs);
44627 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
44628 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
44629 - atomic_inc(&sbi->s_bal_success);
44630 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
44631 + atomic_inc_unchecked(&sbi->s_bal_success);
44632 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
44633 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
44634 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
44635 - atomic_inc(&sbi->s_bal_goals);
44636 + atomic_inc_unchecked(&sbi->s_bal_goals);
44637 if (ac->ac_found > sbi->s_mb_max_to_scan)
44638 - atomic_inc(&sbi->s_bal_breaks);
44639 + atomic_inc_unchecked(&sbi->s_bal_breaks);
44640 }
44641
44642 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
44643 @@ -3458,7 +3458,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
44644 trace_ext4_mb_new_inode_pa(ac, pa);
44645
44646 ext4_mb_use_inode_pa(ac, pa);
44647 - atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
44648 + atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
44649
44650 ei = EXT4_I(ac->ac_inode);
44651 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
44652 @@ -3518,7 +3518,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
44653 trace_ext4_mb_new_group_pa(ac, pa);
44654
44655 ext4_mb_use_group_pa(ac, pa);
44656 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44657 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44658
44659 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
44660 lg = ac->ac_lg;
44661 @@ -3607,7 +3607,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
44662 * from the bitmap and continue.
44663 */
44664 }
44665 - atomic_add(free, &sbi->s_mb_discarded);
44666 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
44667
44668 return err;
44669 }
44670 @@ -3625,7 +3625,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
44671 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
44672 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
44673 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
44674 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44675 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44676 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
44677
44678 return 0;
44679 diff --git a/fs/fcntl.c b/fs/fcntl.c
44680 index 75e7c1f..1eb3e4d 100644
44681 --- a/fs/fcntl.c
44682 +++ b/fs/fcntl.c
44683 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
44684 if (err)
44685 return err;
44686
44687 + if (gr_handle_chroot_fowner(pid, type))
44688 + return -ENOENT;
44689 + if (gr_check_protected_task_fowner(pid, type))
44690 + return -EACCES;
44691 +
44692 f_modown(filp, pid, type, force);
44693 return 0;
44694 }
44695 @@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
44696
44697 static int f_setown_ex(struct file *filp, unsigned long arg)
44698 {
44699 - struct f_owner_ex * __user owner_p = (void * __user)arg;
44700 + struct f_owner_ex __user *owner_p = (void __user *)arg;
44701 struct f_owner_ex owner;
44702 struct pid *pid;
44703 int type;
44704 @@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
44705
44706 static int f_getown_ex(struct file *filp, unsigned long arg)
44707 {
44708 - struct f_owner_ex * __user owner_p = (void * __user)arg;
44709 + struct f_owner_ex __user *owner_p = (void __user *)arg;
44710 struct f_owner_ex owner;
44711 int ret = 0;
44712
44713 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
44714 switch (cmd) {
44715 case F_DUPFD:
44716 case F_DUPFD_CLOEXEC:
44717 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
44718 if (arg >= rlimit(RLIMIT_NOFILE))
44719 break;
44720 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
44721 diff --git a/fs/fifo.c b/fs/fifo.c
44722 index cf6f434..3d7942c 100644
44723 --- a/fs/fifo.c
44724 +++ b/fs/fifo.c
44725 @@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
44726 */
44727 filp->f_op = &read_pipefifo_fops;
44728 pipe->r_counter++;
44729 - if (pipe->readers++ == 0)
44730 + if (atomic_inc_return(&pipe->readers) == 1)
44731 wake_up_partner(inode);
44732
44733 - if (!pipe->writers) {
44734 + if (!atomic_read(&pipe->writers)) {
44735 if ((filp->f_flags & O_NONBLOCK)) {
44736 /* suppress POLLHUP until we have
44737 * seen a writer */
44738 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
44739 * errno=ENXIO when there is no process reading the FIFO.
44740 */
44741 ret = -ENXIO;
44742 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
44743 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
44744 goto err;
44745
44746 filp->f_op = &write_pipefifo_fops;
44747 pipe->w_counter++;
44748 - if (!pipe->writers++)
44749 + if (atomic_inc_return(&pipe->writers) == 1)
44750 wake_up_partner(inode);
44751
44752 - if (!pipe->readers) {
44753 + if (!atomic_read(&pipe->readers)) {
44754 if (wait_for_partner(inode, &pipe->r_counter))
44755 goto err_wr;
44756 }
44757 @@ -104,11 +104,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
44758 */
44759 filp->f_op = &rdwr_pipefifo_fops;
44760
44761 - pipe->readers++;
44762 - pipe->writers++;
44763 + atomic_inc(&pipe->readers);
44764 + atomic_inc(&pipe->writers);
44765 pipe->r_counter++;
44766 pipe->w_counter++;
44767 - if (pipe->readers == 1 || pipe->writers == 1)
44768 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
44769 wake_up_partner(inode);
44770 break;
44771
44772 @@ -122,19 +122,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
44773 return 0;
44774
44775 err_rd:
44776 - if (!--pipe->readers)
44777 + if (atomic_dec_and_test(&pipe->readers))
44778 wake_up_interruptible(&pipe->wait);
44779 ret = -ERESTARTSYS;
44780 goto err;
44781
44782 err_wr:
44783 - if (!--pipe->writers)
44784 + if (atomic_dec_and_test(&pipe->writers))
44785 wake_up_interruptible(&pipe->wait);
44786 ret = -ERESTARTSYS;
44787 goto err;
44788
44789 err:
44790 - if (!pipe->readers && !pipe->writers)
44791 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
44792 free_pipe_info(inode);
44793
44794 err_nocleanup:
44795 diff --git a/fs/file.c b/fs/file.c
44796 index ba3f605..fade102 100644
44797 --- a/fs/file.c
44798 +++ b/fs/file.c
44799 @@ -15,6 +15,7 @@
44800 #include <linux/slab.h>
44801 #include <linux/vmalloc.h>
44802 #include <linux/file.h>
44803 +#include <linux/security.h>
44804 #include <linux/fdtable.h>
44805 #include <linux/bitops.h>
44806 #include <linux/interrupt.h>
44807 @@ -255,6 +256,7 @@ int expand_files(struct files_struct *files, int nr)
44808 * N.B. For clone tasks sharing a files structure, this test
44809 * will limit the total number of files that can be opened.
44810 */
44811 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
44812 if (nr >= rlimit(RLIMIT_NOFILE))
44813 return -EMFILE;
44814
44815 diff --git a/fs/filesystems.c b/fs/filesystems.c
44816 index 96f2428..f5eeb8e 100644
44817 --- a/fs/filesystems.c
44818 +++ b/fs/filesystems.c
44819 @@ -273,7 +273,12 @@ struct file_system_type *get_fs_type(const char *name)
44820 int len = dot ? dot - name : strlen(name);
44821
44822 fs = __get_fs_type(name, len);
44823 +
44824 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
44825 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
44826 +#else
44827 if (!fs && (request_module("%.*s", len, name) == 0))
44828 +#endif
44829 fs = __get_fs_type(name, len);
44830
44831 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
44832 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
44833 index e159e68..e7d2a6f 100644
44834 --- a/fs/fs_struct.c
44835 +++ b/fs/fs_struct.c
44836 @@ -4,6 +4,7 @@
44837 #include <linux/path.h>
44838 #include <linux/slab.h>
44839 #include <linux/fs_struct.h>
44840 +#include <linux/grsecurity.h>
44841 #include "internal.h"
44842
44843 static inline void path_get_longterm(struct path *path)
44844 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
44845 write_seqcount_begin(&fs->seq);
44846 old_root = fs->root;
44847 fs->root = *path;
44848 + gr_set_chroot_entries(current, path);
44849 write_seqcount_end(&fs->seq);
44850 spin_unlock(&fs->lock);
44851 if (old_root.dentry)
44852 @@ -65,6 +67,17 @@ static inline int replace_path(struct path *p, const struct path *old, const str
44853 return 1;
44854 }
44855
44856 +static inline int replace_root_path(struct task_struct *task, struct path *p, const struct path *old, struct path *new)
44857 +{
44858 + if (likely(p->dentry != old->dentry || p->mnt != old->mnt))
44859 + return 0;
44860 + *p = *new;
44861 +
44862 + gr_set_chroot_entries(task, new);
44863 +
44864 + return 1;
44865 +}
44866 +
44867 void chroot_fs_refs(struct path *old_root, struct path *new_root)
44868 {
44869 struct task_struct *g, *p;
44870 @@ -79,7 +92,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
44871 int hits = 0;
44872 spin_lock(&fs->lock);
44873 write_seqcount_begin(&fs->seq);
44874 - hits += replace_path(&fs->root, old_root, new_root);
44875 + hits += replace_root_path(p, &fs->root, old_root, new_root);
44876 hits += replace_path(&fs->pwd, old_root, new_root);
44877 write_seqcount_end(&fs->seq);
44878 while (hits--) {
44879 @@ -111,7 +124,8 @@ void exit_fs(struct task_struct *tsk)
44880 task_lock(tsk);
44881 spin_lock(&fs->lock);
44882 tsk->fs = NULL;
44883 - kill = !--fs->users;
44884 + gr_clear_chroot_entries(tsk);
44885 + kill = !atomic_dec_return(&fs->users);
44886 spin_unlock(&fs->lock);
44887 task_unlock(tsk);
44888 if (kill)
44889 @@ -124,7 +138,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
44890 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
44891 /* We don't need to lock fs - think why ;-) */
44892 if (fs) {
44893 - fs->users = 1;
44894 + atomic_set(&fs->users, 1);
44895 fs->in_exec = 0;
44896 spin_lock_init(&fs->lock);
44897 seqcount_init(&fs->seq);
44898 @@ -133,6 +147,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
44899 spin_lock(&old->lock);
44900 fs->root = old->root;
44901 path_get_longterm(&fs->root);
44902 + /* instead of calling gr_set_chroot_entries here,
44903 + we call it from every caller of this function
44904 + */
44905 fs->pwd = old->pwd;
44906 path_get_longterm(&fs->pwd);
44907 spin_unlock(&old->lock);
44908 @@ -151,8 +168,9 @@ int unshare_fs_struct(void)
44909
44910 task_lock(current);
44911 spin_lock(&fs->lock);
44912 - kill = !--fs->users;
44913 + kill = !atomic_dec_return(&fs->users);
44914 current->fs = new_fs;
44915 + gr_set_chroot_entries(current, &new_fs->root);
44916 spin_unlock(&fs->lock);
44917 task_unlock(current);
44918
44919 @@ -165,13 +183,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
44920
44921 int current_umask(void)
44922 {
44923 - return current->fs->umask;
44924 + return current->fs->umask | gr_acl_umask();
44925 }
44926 EXPORT_SYMBOL(current_umask);
44927
44928 /* to be mentioned only in INIT_TASK */
44929 struct fs_struct init_fs = {
44930 - .users = 1,
44931 + .users = ATOMIC_INIT(1),
44932 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
44933 .seq = SEQCNT_ZERO,
44934 .umask = 0022,
44935 @@ -187,12 +205,13 @@ void daemonize_fs_struct(void)
44936 task_lock(current);
44937
44938 spin_lock(&init_fs.lock);
44939 - init_fs.users++;
44940 + atomic_inc(&init_fs.users);
44941 spin_unlock(&init_fs.lock);
44942
44943 spin_lock(&fs->lock);
44944 current->fs = &init_fs;
44945 - kill = !--fs->users;
44946 + gr_set_chroot_entries(current, &current->fs->root);
44947 + kill = !atomic_dec_return(&fs->users);
44948 spin_unlock(&fs->lock);
44949
44950 task_unlock(current);
44951 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
44952 index 9905350..02eaec4 100644
44953 --- a/fs/fscache/cookie.c
44954 +++ b/fs/fscache/cookie.c
44955 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
44956 parent ? (char *) parent->def->name : "<no-parent>",
44957 def->name, netfs_data);
44958
44959 - fscache_stat(&fscache_n_acquires);
44960 + fscache_stat_unchecked(&fscache_n_acquires);
44961
44962 /* if there's no parent cookie, then we don't create one here either */
44963 if (!parent) {
44964 - fscache_stat(&fscache_n_acquires_null);
44965 + fscache_stat_unchecked(&fscache_n_acquires_null);
44966 _leave(" [no parent]");
44967 return NULL;
44968 }
44969 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
44970 /* allocate and initialise a cookie */
44971 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
44972 if (!cookie) {
44973 - fscache_stat(&fscache_n_acquires_oom);
44974 + fscache_stat_unchecked(&fscache_n_acquires_oom);
44975 _leave(" [ENOMEM]");
44976 return NULL;
44977 }
44978 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
44979
44980 switch (cookie->def->type) {
44981 case FSCACHE_COOKIE_TYPE_INDEX:
44982 - fscache_stat(&fscache_n_cookie_index);
44983 + fscache_stat_unchecked(&fscache_n_cookie_index);
44984 break;
44985 case FSCACHE_COOKIE_TYPE_DATAFILE:
44986 - fscache_stat(&fscache_n_cookie_data);
44987 + fscache_stat_unchecked(&fscache_n_cookie_data);
44988 break;
44989 default:
44990 - fscache_stat(&fscache_n_cookie_special);
44991 + fscache_stat_unchecked(&fscache_n_cookie_special);
44992 break;
44993 }
44994
44995 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
44996 if (fscache_acquire_non_index_cookie(cookie) < 0) {
44997 atomic_dec(&parent->n_children);
44998 __fscache_cookie_put(cookie);
44999 - fscache_stat(&fscache_n_acquires_nobufs);
45000 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
45001 _leave(" = NULL");
45002 return NULL;
45003 }
45004 }
45005
45006 - fscache_stat(&fscache_n_acquires_ok);
45007 + fscache_stat_unchecked(&fscache_n_acquires_ok);
45008 _leave(" = %p", cookie);
45009 return cookie;
45010 }
45011 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
45012 cache = fscache_select_cache_for_object(cookie->parent);
45013 if (!cache) {
45014 up_read(&fscache_addremove_sem);
45015 - fscache_stat(&fscache_n_acquires_no_cache);
45016 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
45017 _leave(" = -ENOMEDIUM [no cache]");
45018 return -ENOMEDIUM;
45019 }
45020 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
45021 object = cache->ops->alloc_object(cache, cookie);
45022 fscache_stat_d(&fscache_n_cop_alloc_object);
45023 if (IS_ERR(object)) {
45024 - fscache_stat(&fscache_n_object_no_alloc);
45025 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
45026 ret = PTR_ERR(object);
45027 goto error;
45028 }
45029
45030 - fscache_stat(&fscache_n_object_alloc);
45031 + fscache_stat_unchecked(&fscache_n_object_alloc);
45032
45033 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
45034
45035 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
45036 struct fscache_object *object;
45037 struct hlist_node *_p;
45038
45039 - fscache_stat(&fscache_n_updates);
45040 + fscache_stat_unchecked(&fscache_n_updates);
45041
45042 if (!cookie) {
45043 - fscache_stat(&fscache_n_updates_null);
45044 + fscache_stat_unchecked(&fscache_n_updates_null);
45045 _leave(" [no cookie]");
45046 return;
45047 }
45048 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
45049 struct fscache_object *object;
45050 unsigned long event;
45051
45052 - fscache_stat(&fscache_n_relinquishes);
45053 + fscache_stat_unchecked(&fscache_n_relinquishes);
45054 if (retire)
45055 - fscache_stat(&fscache_n_relinquishes_retire);
45056 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
45057
45058 if (!cookie) {
45059 - fscache_stat(&fscache_n_relinquishes_null);
45060 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
45061 _leave(" [no cookie]");
45062 return;
45063 }
45064 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
45065
45066 /* wait for the cookie to finish being instantiated (or to fail) */
45067 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
45068 - fscache_stat(&fscache_n_relinquishes_waitcrt);
45069 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
45070 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
45071 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
45072 }
45073 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
45074 index f6aad48..88dcf26 100644
45075 --- a/fs/fscache/internal.h
45076 +++ b/fs/fscache/internal.h
45077 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
45078 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
45079 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
45080
45081 -extern atomic_t fscache_n_op_pend;
45082 -extern atomic_t fscache_n_op_run;
45083 -extern atomic_t fscache_n_op_enqueue;
45084 -extern atomic_t fscache_n_op_deferred_release;
45085 -extern atomic_t fscache_n_op_release;
45086 -extern atomic_t fscache_n_op_gc;
45087 -extern atomic_t fscache_n_op_cancelled;
45088 -extern atomic_t fscache_n_op_rejected;
45089 +extern atomic_unchecked_t fscache_n_op_pend;
45090 +extern atomic_unchecked_t fscache_n_op_run;
45091 +extern atomic_unchecked_t fscache_n_op_enqueue;
45092 +extern atomic_unchecked_t fscache_n_op_deferred_release;
45093 +extern atomic_unchecked_t fscache_n_op_release;
45094 +extern atomic_unchecked_t fscache_n_op_gc;
45095 +extern atomic_unchecked_t fscache_n_op_cancelled;
45096 +extern atomic_unchecked_t fscache_n_op_rejected;
45097
45098 -extern atomic_t fscache_n_attr_changed;
45099 -extern atomic_t fscache_n_attr_changed_ok;
45100 -extern atomic_t fscache_n_attr_changed_nobufs;
45101 -extern atomic_t fscache_n_attr_changed_nomem;
45102 -extern atomic_t fscache_n_attr_changed_calls;
45103 +extern atomic_unchecked_t fscache_n_attr_changed;
45104 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
45105 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
45106 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
45107 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
45108
45109 -extern atomic_t fscache_n_allocs;
45110 -extern atomic_t fscache_n_allocs_ok;
45111 -extern atomic_t fscache_n_allocs_wait;
45112 -extern atomic_t fscache_n_allocs_nobufs;
45113 -extern atomic_t fscache_n_allocs_intr;
45114 -extern atomic_t fscache_n_allocs_object_dead;
45115 -extern atomic_t fscache_n_alloc_ops;
45116 -extern atomic_t fscache_n_alloc_op_waits;
45117 +extern atomic_unchecked_t fscache_n_allocs;
45118 +extern atomic_unchecked_t fscache_n_allocs_ok;
45119 +extern atomic_unchecked_t fscache_n_allocs_wait;
45120 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
45121 +extern atomic_unchecked_t fscache_n_allocs_intr;
45122 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
45123 +extern atomic_unchecked_t fscache_n_alloc_ops;
45124 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
45125
45126 -extern atomic_t fscache_n_retrievals;
45127 -extern atomic_t fscache_n_retrievals_ok;
45128 -extern atomic_t fscache_n_retrievals_wait;
45129 -extern atomic_t fscache_n_retrievals_nodata;
45130 -extern atomic_t fscache_n_retrievals_nobufs;
45131 -extern atomic_t fscache_n_retrievals_intr;
45132 -extern atomic_t fscache_n_retrievals_nomem;
45133 -extern atomic_t fscache_n_retrievals_object_dead;
45134 -extern atomic_t fscache_n_retrieval_ops;
45135 -extern atomic_t fscache_n_retrieval_op_waits;
45136 +extern atomic_unchecked_t fscache_n_retrievals;
45137 +extern atomic_unchecked_t fscache_n_retrievals_ok;
45138 +extern atomic_unchecked_t fscache_n_retrievals_wait;
45139 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
45140 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
45141 +extern atomic_unchecked_t fscache_n_retrievals_intr;
45142 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
45143 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
45144 +extern atomic_unchecked_t fscache_n_retrieval_ops;
45145 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
45146
45147 -extern atomic_t fscache_n_stores;
45148 -extern atomic_t fscache_n_stores_ok;
45149 -extern atomic_t fscache_n_stores_again;
45150 -extern atomic_t fscache_n_stores_nobufs;
45151 -extern atomic_t fscache_n_stores_oom;
45152 -extern atomic_t fscache_n_store_ops;
45153 -extern atomic_t fscache_n_store_calls;
45154 -extern atomic_t fscache_n_store_pages;
45155 -extern atomic_t fscache_n_store_radix_deletes;
45156 -extern atomic_t fscache_n_store_pages_over_limit;
45157 +extern atomic_unchecked_t fscache_n_stores;
45158 +extern atomic_unchecked_t fscache_n_stores_ok;
45159 +extern atomic_unchecked_t fscache_n_stores_again;
45160 +extern atomic_unchecked_t fscache_n_stores_nobufs;
45161 +extern atomic_unchecked_t fscache_n_stores_oom;
45162 +extern atomic_unchecked_t fscache_n_store_ops;
45163 +extern atomic_unchecked_t fscache_n_store_calls;
45164 +extern atomic_unchecked_t fscache_n_store_pages;
45165 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
45166 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
45167
45168 -extern atomic_t fscache_n_store_vmscan_not_storing;
45169 -extern atomic_t fscache_n_store_vmscan_gone;
45170 -extern atomic_t fscache_n_store_vmscan_busy;
45171 -extern atomic_t fscache_n_store_vmscan_cancelled;
45172 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
45173 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
45174 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
45175 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
45176
45177 -extern atomic_t fscache_n_marks;
45178 -extern atomic_t fscache_n_uncaches;
45179 +extern atomic_unchecked_t fscache_n_marks;
45180 +extern atomic_unchecked_t fscache_n_uncaches;
45181
45182 -extern atomic_t fscache_n_acquires;
45183 -extern atomic_t fscache_n_acquires_null;
45184 -extern atomic_t fscache_n_acquires_no_cache;
45185 -extern atomic_t fscache_n_acquires_ok;
45186 -extern atomic_t fscache_n_acquires_nobufs;
45187 -extern atomic_t fscache_n_acquires_oom;
45188 +extern atomic_unchecked_t fscache_n_acquires;
45189 +extern atomic_unchecked_t fscache_n_acquires_null;
45190 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
45191 +extern atomic_unchecked_t fscache_n_acquires_ok;
45192 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
45193 +extern atomic_unchecked_t fscache_n_acquires_oom;
45194
45195 -extern atomic_t fscache_n_updates;
45196 -extern atomic_t fscache_n_updates_null;
45197 -extern atomic_t fscache_n_updates_run;
45198 +extern atomic_unchecked_t fscache_n_updates;
45199 +extern atomic_unchecked_t fscache_n_updates_null;
45200 +extern atomic_unchecked_t fscache_n_updates_run;
45201
45202 -extern atomic_t fscache_n_relinquishes;
45203 -extern atomic_t fscache_n_relinquishes_null;
45204 -extern atomic_t fscache_n_relinquishes_waitcrt;
45205 -extern atomic_t fscache_n_relinquishes_retire;
45206 +extern atomic_unchecked_t fscache_n_relinquishes;
45207 +extern atomic_unchecked_t fscache_n_relinquishes_null;
45208 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
45209 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
45210
45211 -extern atomic_t fscache_n_cookie_index;
45212 -extern atomic_t fscache_n_cookie_data;
45213 -extern atomic_t fscache_n_cookie_special;
45214 +extern atomic_unchecked_t fscache_n_cookie_index;
45215 +extern atomic_unchecked_t fscache_n_cookie_data;
45216 +extern atomic_unchecked_t fscache_n_cookie_special;
45217
45218 -extern atomic_t fscache_n_object_alloc;
45219 -extern atomic_t fscache_n_object_no_alloc;
45220 -extern atomic_t fscache_n_object_lookups;
45221 -extern atomic_t fscache_n_object_lookups_negative;
45222 -extern atomic_t fscache_n_object_lookups_positive;
45223 -extern atomic_t fscache_n_object_lookups_timed_out;
45224 -extern atomic_t fscache_n_object_created;
45225 -extern atomic_t fscache_n_object_avail;
45226 -extern atomic_t fscache_n_object_dead;
45227 +extern atomic_unchecked_t fscache_n_object_alloc;
45228 +extern atomic_unchecked_t fscache_n_object_no_alloc;
45229 +extern atomic_unchecked_t fscache_n_object_lookups;
45230 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
45231 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
45232 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
45233 +extern atomic_unchecked_t fscache_n_object_created;
45234 +extern atomic_unchecked_t fscache_n_object_avail;
45235 +extern atomic_unchecked_t fscache_n_object_dead;
45236
45237 -extern atomic_t fscache_n_checkaux_none;
45238 -extern atomic_t fscache_n_checkaux_okay;
45239 -extern atomic_t fscache_n_checkaux_update;
45240 -extern atomic_t fscache_n_checkaux_obsolete;
45241 +extern atomic_unchecked_t fscache_n_checkaux_none;
45242 +extern atomic_unchecked_t fscache_n_checkaux_okay;
45243 +extern atomic_unchecked_t fscache_n_checkaux_update;
45244 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
45245
45246 extern atomic_t fscache_n_cop_alloc_object;
45247 extern atomic_t fscache_n_cop_lookup_object;
45248 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
45249 atomic_inc(stat);
45250 }
45251
45252 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
45253 +{
45254 + atomic_inc_unchecked(stat);
45255 +}
45256 +
45257 static inline void fscache_stat_d(atomic_t *stat)
45258 {
45259 atomic_dec(stat);
45260 @@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
45261
45262 #define __fscache_stat(stat) (NULL)
45263 #define fscache_stat(stat) do {} while (0)
45264 +#define fscache_stat_unchecked(stat) do {} while (0)
45265 #define fscache_stat_d(stat) do {} while (0)
45266 #endif
45267
45268 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
45269 index b6b897c..0ffff9c 100644
45270 --- a/fs/fscache/object.c
45271 +++ b/fs/fscache/object.c
45272 @@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
45273 /* update the object metadata on disk */
45274 case FSCACHE_OBJECT_UPDATING:
45275 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
45276 - fscache_stat(&fscache_n_updates_run);
45277 + fscache_stat_unchecked(&fscache_n_updates_run);
45278 fscache_stat(&fscache_n_cop_update_object);
45279 object->cache->ops->update_object(object);
45280 fscache_stat_d(&fscache_n_cop_update_object);
45281 @@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
45282 spin_lock(&object->lock);
45283 object->state = FSCACHE_OBJECT_DEAD;
45284 spin_unlock(&object->lock);
45285 - fscache_stat(&fscache_n_object_dead);
45286 + fscache_stat_unchecked(&fscache_n_object_dead);
45287 goto terminal_transit;
45288
45289 /* handle the parent cache of this object being withdrawn from
45290 @@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
45291 spin_lock(&object->lock);
45292 object->state = FSCACHE_OBJECT_DEAD;
45293 spin_unlock(&object->lock);
45294 - fscache_stat(&fscache_n_object_dead);
45295 + fscache_stat_unchecked(&fscache_n_object_dead);
45296 goto terminal_transit;
45297
45298 /* complain about the object being woken up once it is
45299 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
45300 parent->cookie->def->name, cookie->def->name,
45301 object->cache->tag->name);
45302
45303 - fscache_stat(&fscache_n_object_lookups);
45304 + fscache_stat_unchecked(&fscache_n_object_lookups);
45305 fscache_stat(&fscache_n_cop_lookup_object);
45306 ret = object->cache->ops->lookup_object(object);
45307 fscache_stat_d(&fscache_n_cop_lookup_object);
45308 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
45309 if (ret == -ETIMEDOUT) {
45310 /* probably stuck behind another object, so move this one to
45311 * the back of the queue */
45312 - fscache_stat(&fscache_n_object_lookups_timed_out);
45313 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
45314 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
45315 }
45316
45317 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
45318
45319 spin_lock(&object->lock);
45320 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
45321 - fscache_stat(&fscache_n_object_lookups_negative);
45322 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
45323
45324 /* transit here to allow write requests to begin stacking up
45325 * and read requests to begin returning ENODATA */
45326 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
45327 * result, in which case there may be data available */
45328 spin_lock(&object->lock);
45329 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
45330 - fscache_stat(&fscache_n_object_lookups_positive);
45331 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
45332
45333 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
45334
45335 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
45336 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
45337 } else {
45338 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
45339 - fscache_stat(&fscache_n_object_created);
45340 + fscache_stat_unchecked(&fscache_n_object_created);
45341
45342 object->state = FSCACHE_OBJECT_AVAILABLE;
45343 spin_unlock(&object->lock);
45344 @@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
45345 fscache_enqueue_dependents(object);
45346
45347 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
45348 - fscache_stat(&fscache_n_object_avail);
45349 + fscache_stat_unchecked(&fscache_n_object_avail);
45350
45351 _leave("");
45352 }
45353 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
45354 enum fscache_checkaux result;
45355
45356 if (!object->cookie->def->check_aux) {
45357 - fscache_stat(&fscache_n_checkaux_none);
45358 + fscache_stat_unchecked(&fscache_n_checkaux_none);
45359 return FSCACHE_CHECKAUX_OKAY;
45360 }
45361
45362 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
45363 switch (result) {
45364 /* entry okay as is */
45365 case FSCACHE_CHECKAUX_OKAY:
45366 - fscache_stat(&fscache_n_checkaux_okay);
45367 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
45368 break;
45369
45370 /* entry requires update */
45371 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
45372 - fscache_stat(&fscache_n_checkaux_update);
45373 + fscache_stat_unchecked(&fscache_n_checkaux_update);
45374 break;
45375
45376 /* entry requires deletion */
45377 case FSCACHE_CHECKAUX_OBSOLETE:
45378 - fscache_stat(&fscache_n_checkaux_obsolete);
45379 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
45380 break;
45381
45382 default:
45383 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
45384 index 30afdfa..2256596 100644
45385 --- a/fs/fscache/operation.c
45386 +++ b/fs/fscache/operation.c
45387 @@ -17,7 +17,7 @@
45388 #include <linux/slab.h>
45389 #include "internal.h"
45390
45391 -atomic_t fscache_op_debug_id;
45392 +atomic_unchecked_t fscache_op_debug_id;
45393 EXPORT_SYMBOL(fscache_op_debug_id);
45394
45395 /**
45396 @@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
45397 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
45398 ASSERTCMP(atomic_read(&op->usage), >, 0);
45399
45400 - fscache_stat(&fscache_n_op_enqueue);
45401 + fscache_stat_unchecked(&fscache_n_op_enqueue);
45402 switch (op->flags & FSCACHE_OP_TYPE) {
45403 case FSCACHE_OP_ASYNC:
45404 _debug("queue async");
45405 @@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
45406 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
45407 if (op->processor)
45408 fscache_enqueue_operation(op);
45409 - fscache_stat(&fscache_n_op_run);
45410 + fscache_stat_unchecked(&fscache_n_op_run);
45411 }
45412
45413 /*
45414 @@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
45415 if (object->n_ops > 1) {
45416 atomic_inc(&op->usage);
45417 list_add_tail(&op->pend_link, &object->pending_ops);
45418 - fscache_stat(&fscache_n_op_pend);
45419 + fscache_stat_unchecked(&fscache_n_op_pend);
45420 } else if (!list_empty(&object->pending_ops)) {
45421 atomic_inc(&op->usage);
45422 list_add_tail(&op->pend_link, &object->pending_ops);
45423 - fscache_stat(&fscache_n_op_pend);
45424 + fscache_stat_unchecked(&fscache_n_op_pend);
45425 fscache_start_operations(object);
45426 } else {
45427 ASSERTCMP(object->n_in_progress, ==, 0);
45428 @@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
45429 object->n_exclusive++; /* reads and writes must wait */
45430 atomic_inc(&op->usage);
45431 list_add_tail(&op->pend_link, &object->pending_ops);
45432 - fscache_stat(&fscache_n_op_pend);
45433 + fscache_stat_unchecked(&fscache_n_op_pend);
45434 ret = 0;
45435 } else {
45436 /* not allowed to submit ops in any other state */
45437 @@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
45438 if (object->n_exclusive > 0) {
45439 atomic_inc(&op->usage);
45440 list_add_tail(&op->pend_link, &object->pending_ops);
45441 - fscache_stat(&fscache_n_op_pend);
45442 + fscache_stat_unchecked(&fscache_n_op_pend);
45443 } else if (!list_empty(&object->pending_ops)) {
45444 atomic_inc(&op->usage);
45445 list_add_tail(&op->pend_link, &object->pending_ops);
45446 - fscache_stat(&fscache_n_op_pend);
45447 + fscache_stat_unchecked(&fscache_n_op_pend);
45448 fscache_start_operations(object);
45449 } else {
45450 ASSERTCMP(object->n_exclusive, ==, 0);
45451 @@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
45452 object->n_ops++;
45453 atomic_inc(&op->usage);
45454 list_add_tail(&op->pend_link, &object->pending_ops);
45455 - fscache_stat(&fscache_n_op_pend);
45456 + fscache_stat_unchecked(&fscache_n_op_pend);
45457 ret = 0;
45458 } else if (object->state == FSCACHE_OBJECT_DYING ||
45459 object->state == FSCACHE_OBJECT_LC_DYING ||
45460 object->state == FSCACHE_OBJECT_WITHDRAWING) {
45461 - fscache_stat(&fscache_n_op_rejected);
45462 + fscache_stat_unchecked(&fscache_n_op_rejected);
45463 ret = -ENOBUFS;
45464 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
45465 fscache_report_unexpected_submission(object, op, ostate);
45466 @@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
45467
45468 ret = -EBUSY;
45469 if (!list_empty(&op->pend_link)) {
45470 - fscache_stat(&fscache_n_op_cancelled);
45471 + fscache_stat_unchecked(&fscache_n_op_cancelled);
45472 list_del_init(&op->pend_link);
45473 object->n_ops--;
45474 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
45475 @@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
45476 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
45477 BUG();
45478
45479 - fscache_stat(&fscache_n_op_release);
45480 + fscache_stat_unchecked(&fscache_n_op_release);
45481
45482 if (op->release) {
45483 op->release(op);
45484 @@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
45485 * lock, and defer it otherwise */
45486 if (!spin_trylock(&object->lock)) {
45487 _debug("defer put");
45488 - fscache_stat(&fscache_n_op_deferred_release);
45489 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
45490
45491 cache = object->cache;
45492 spin_lock(&cache->op_gc_list_lock);
45493 @@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
45494
45495 _debug("GC DEFERRED REL OBJ%x OP%x",
45496 object->debug_id, op->debug_id);
45497 - fscache_stat(&fscache_n_op_gc);
45498 + fscache_stat_unchecked(&fscache_n_op_gc);
45499
45500 ASSERTCMP(atomic_read(&op->usage), ==, 0);
45501
45502 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
45503 index 3f7a59b..cf196cc 100644
45504 --- a/fs/fscache/page.c
45505 +++ b/fs/fscache/page.c
45506 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
45507 val = radix_tree_lookup(&cookie->stores, page->index);
45508 if (!val) {
45509 rcu_read_unlock();
45510 - fscache_stat(&fscache_n_store_vmscan_not_storing);
45511 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
45512 __fscache_uncache_page(cookie, page);
45513 return true;
45514 }
45515 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
45516 spin_unlock(&cookie->stores_lock);
45517
45518 if (xpage) {
45519 - fscache_stat(&fscache_n_store_vmscan_cancelled);
45520 - fscache_stat(&fscache_n_store_radix_deletes);
45521 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
45522 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
45523 ASSERTCMP(xpage, ==, page);
45524 } else {
45525 - fscache_stat(&fscache_n_store_vmscan_gone);
45526 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
45527 }
45528
45529 wake_up_bit(&cookie->flags, 0);
45530 @@ -107,7 +107,7 @@ page_busy:
45531 /* we might want to wait here, but that could deadlock the allocator as
45532 * the work threads writing to the cache may all end up sleeping
45533 * on memory allocation */
45534 - fscache_stat(&fscache_n_store_vmscan_busy);
45535 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
45536 return false;
45537 }
45538 EXPORT_SYMBOL(__fscache_maybe_release_page);
45539 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
45540 FSCACHE_COOKIE_STORING_TAG);
45541 if (!radix_tree_tag_get(&cookie->stores, page->index,
45542 FSCACHE_COOKIE_PENDING_TAG)) {
45543 - fscache_stat(&fscache_n_store_radix_deletes);
45544 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
45545 xpage = radix_tree_delete(&cookie->stores, page->index);
45546 }
45547 spin_unlock(&cookie->stores_lock);
45548 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
45549
45550 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
45551
45552 - fscache_stat(&fscache_n_attr_changed_calls);
45553 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
45554
45555 if (fscache_object_is_active(object)) {
45556 fscache_stat(&fscache_n_cop_attr_changed);
45557 @@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45558
45559 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45560
45561 - fscache_stat(&fscache_n_attr_changed);
45562 + fscache_stat_unchecked(&fscache_n_attr_changed);
45563
45564 op = kzalloc(sizeof(*op), GFP_KERNEL);
45565 if (!op) {
45566 - fscache_stat(&fscache_n_attr_changed_nomem);
45567 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
45568 _leave(" = -ENOMEM");
45569 return -ENOMEM;
45570 }
45571 @@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45572 if (fscache_submit_exclusive_op(object, op) < 0)
45573 goto nobufs;
45574 spin_unlock(&cookie->lock);
45575 - fscache_stat(&fscache_n_attr_changed_ok);
45576 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
45577 fscache_put_operation(op);
45578 _leave(" = 0");
45579 return 0;
45580 @@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
45581 nobufs:
45582 spin_unlock(&cookie->lock);
45583 kfree(op);
45584 - fscache_stat(&fscache_n_attr_changed_nobufs);
45585 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
45586 _leave(" = %d", -ENOBUFS);
45587 return -ENOBUFS;
45588 }
45589 @@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
45590 /* allocate a retrieval operation and attempt to submit it */
45591 op = kzalloc(sizeof(*op), GFP_NOIO);
45592 if (!op) {
45593 - fscache_stat(&fscache_n_retrievals_nomem);
45594 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45595 return NULL;
45596 }
45597
45598 @@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
45599 return 0;
45600 }
45601
45602 - fscache_stat(&fscache_n_retrievals_wait);
45603 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
45604
45605 jif = jiffies;
45606 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
45607 fscache_wait_bit_interruptible,
45608 TASK_INTERRUPTIBLE) != 0) {
45609 - fscache_stat(&fscache_n_retrievals_intr);
45610 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
45611 _leave(" = -ERESTARTSYS");
45612 return -ERESTARTSYS;
45613 }
45614 @@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
45615 */
45616 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45617 struct fscache_retrieval *op,
45618 - atomic_t *stat_op_waits,
45619 - atomic_t *stat_object_dead)
45620 + atomic_unchecked_t *stat_op_waits,
45621 + atomic_unchecked_t *stat_object_dead)
45622 {
45623 int ret;
45624
45625 @@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45626 goto check_if_dead;
45627
45628 _debug(">>> WT");
45629 - fscache_stat(stat_op_waits);
45630 + fscache_stat_unchecked(stat_op_waits);
45631 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
45632 fscache_wait_bit_interruptible,
45633 TASK_INTERRUPTIBLE) < 0) {
45634 @@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45635
45636 check_if_dead:
45637 if (unlikely(fscache_object_is_dead(object))) {
45638 - fscache_stat(stat_object_dead);
45639 + fscache_stat_unchecked(stat_object_dead);
45640 return -ENOBUFS;
45641 }
45642 return 0;
45643 @@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45644
45645 _enter("%p,%p,,,", cookie, page);
45646
45647 - fscache_stat(&fscache_n_retrievals);
45648 + fscache_stat_unchecked(&fscache_n_retrievals);
45649
45650 if (hlist_empty(&cookie->backing_objects))
45651 goto nobufs;
45652 @@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45653 goto nobufs_unlock;
45654 spin_unlock(&cookie->lock);
45655
45656 - fscache_stat(&fscache_n_retrieval_ops);
45657 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
45658
45659 /* pin the netfs read context in case we need to do the actual netfs
45660 * read because we've encountered a cache read failure */
45661 @@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
45662
45663 error:
45664 if (ret == -ENOMEM)
45665 - fscache_stat(&fscache_n_retrievals_nomem);
45666 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45667 else if (ret == -ERESTARTSYS)
45668 - fscache_stat(&fscache_n_retrievals_intr);
45669 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
45670 else if (ret == -ENODATA)
45671 - fscache_stat(&fscache_n_retrievals_nodata);
45672 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
45673 else if (ret < 0)
45674 - fscache_stat(&fscache_n_retrievals_nobufs);
45675 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45676 else
45677 - fscache_stat(&fscache_n_retrievals_ok);
45678 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
45679
45680 fscache_put_retrieval(op);
45681 _leave(" = %d", ret);
45682 @@ -429,7 +429,7 @@ nobufs_unlock:
45683 spin_unlock(&cookie->lock);
45684 kfree(op);
45685 nobufs:
45686 - fscache_stat(&fscache_n_retrievals_nobufs);
45687 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45688 _leave(" = -ENOBUFS");
45689 return -ENOBUFS;
45690 }
45691 @@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45692
45693 _enter("%p,,%d,,,", cookie, *nr_pages);
45694
45695 - fscache_stat(&fscache_n_retrievals);
45696 + fscache_stat_unchecked(&fscache_n_retrievals);
45697
45698 if (hlist_empty(&cookie->backing_objects))
45699 goto nobufs;
45700 @@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45701 goto nobufs_unlock;
45702 spin_unlock(&cookie->lock);
45703
45704 - fscache_stat(&fscache_n_retrieval_ops);
45705 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
45706
45707 /* pin the netfs read context in case we need to do the actual netfs
45708 * read because we've encountered a cache read failure */
45709 @@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
45710
45711 error:
45712 if (ret == -ENOMEM)
45713 - fscache_stat(&fscache_n_retrievals_nomem);
45714 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45715 else if (ret == -ERESTARTSYS)
45716 - fscache_stat(&fscache_n_retrievals_intr);
45717 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
45718 else if (ret == -ENODATA)
45719 - fscache_stat(&fscache_n_retrievals_nodata);
45720 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
45721 else if (ret < 0)
45722 - fscache_stat(&fscache_n_retrievals_nobufs);
45723 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45724 else
45725 - fscache_stat(&fscache_n_retrievals_ok);
45726 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
45727
45728 fscache_put_retrieval(op);
45729 _leave(" = %d", ret);
45730 @@ -545,7 +545,7 @@ nobufs_unlock:
45731 spin_unlock(&cookie->lock);
45732 kfree(op);
45733 nobufs:
45734 - fscache_stat(&fscache_n_retrievals_nobufs);
45735 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45736 _leave(" = -ENOBUFS");
45737 return -ENOBUFS;
45738 }
45739 @@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45740
45741 _enter("%p,%p,,,", cookie, page);
45742
45743 - fscache_stat(&fscache_n_allocs);
45744 + fscache_stat_unchecked(&fscache_n_allocs);
45745
45746 if (hlist_empty(&cookie->backing_objects))
45747 goto nobufs;
45748 @@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45749 goto nobufs_unlock;
45750 spin_unlock(&cookie->lock);
45751
45752 - fscache_stat(&fscache_n_alloc_ops);
45753 + fscache_stat_unchecked(&fscache_n_alloc_ops);
45754
45755 ret = fscache_wait_for_retrieval_activation(
45756 object, op,
45757 @@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
45758
45759 error:
45760 if (ret == -ERESTARTSYS)
45761 - fscache_stat(&fscache_n_allocs_intr);
45762 + fscache_stat_unchecked(&fscache_n_allocs_intr);
45763 else if (ret < 0)
45764 - fscache_stat(&fscache_n_allocs_nobufs);
45765 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45766 else
45767 - fscache_stat(&fscache_n_allocs_ok);
45768 + fscache_stat_unchecked(&fscache_n_allocs_ok);
45769
45770 fscache_put_retrieval(op);
45771 _leave(" = %d", ret);
45772 @@ -625,7 +625,7 @@ nobufs_unlock:
45773 spin_unlock(&cookie->lock);
45774 kfree(op);
45775 nobufs:
45776 - fscache_stat(&fscache_n_allocs_nobufs);
45777 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45778 _leave(" = -ENOBUFS");
45779 return -ENOBUFS;
45780 }
45781 @@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45782
45783 spin_lock(&cookie->stores_lock);
45784
45785 - fscache_stat(&fscache_n_store_calls);
45786 + fscache_stat_unchecked(&fscache_n_store_calls);
45787
45788 /* find a page to store */
45789 page = NULL;
45790 @@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45791 page = results[0];
45792 _debug("gang %d [%lx]", n, page->index);
45793 if (page->index > op->store_limit) {
45794 - fscache_stat(&fscache_n_store_pages_over_limit);
45795 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
45796 goto superseded;
45797 }
45798
45799 @@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
45800 spin_unlock(&cookie->stores_lock);
45801 spin_unlock(&object->lock);
45802
45803 - fscache_stat(&fscache_n_store_pages);
45804 + fscache_stat_unchecked(&fscache_n_store_pages);
45805 fscache_stat(&fscache_n_cop_write_page);
45806 ret = object->cache->ops->write_page(op, page);
45807 fscache_stat_d(&fscache_n_cop_write_page);
45808 @@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45809 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45810 ASSERT(PageFsCache(page));
45811
45812 - fscache_stat(&fscache_n_stores);
45813 + fscache_stat_unchecked(&fscache_n_stores);
45814
45815 op = kzalloc(sizeof(*op), GFP_NOIO);
45816 if (!op)
45817 @@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45818 spin_unlock(&cookie->stores_lock);
45819 spin_unlock(&object->lock);
45820
45821 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
45822 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
45823 op->store_limit = object->store_limit;
45824
45825 if (fscache_submit_op(object, &op->op) < 0)
45826 @@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45827
45828 spin_unlock(&cookie->lock);
45829 radix_tree_preload_end();
45830 - fscache_stat(&fscache_n_store_ops);
45831 - fscache_stat(&fscache_n_stores_ok);
45832 + fscache_stat_unchecked(&fscache_n_store_ops);
45833 + fscache_stat_unchecked(&fscache_n_stores_ok);
45834
45835 /* the work queue now carries its own ref on the object */
45836 fscache_put_operation(&op->op);
45837 @@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
45838 return 0;
45839
45840 already_queued:
45841 - fscache_stat(&fscache_n_stores_again);
45842 + fscache_stat_unchecked(&fscache_n_stores_again);
45843 already_pending:
45844 spin_unlock(&cookie->stores_lock);
45845 spin_unlock(&object->lock);
45846 spin_unlock(&cookie->lock);
45847 radix_tree_preload_end();
45848 kfree(op);
45849 - fscache_stat(&fscache_n_stores_ok);
45850 + fscache_stat_unchecked(&fscache_n_stores_ok);
45851 _leave(" = 0");
45852 return 0;
45853
45854 @@ -851,14 +851,14 @@ nobufs:
45855 spin_unlock(&cookie->lock);
45856 radix_tree_preload_end();
45857 kfree(op);
45858 - fscache_stat(&fscache_n_stores_nobufs);
45859 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
45860 _leave(" = -ENOBUFS");
45861 return -ENOBUFS;
45862
45863 nomem_free:
45864 kfree(op);
45865 nomem:
45866 - fscache_stat(&fscache_n_stores_oom);
45867 + fscache_stat_unchecked(&fscache_n_stores_oom);
45868 _leave(" = -ENOMEM");
45869 return -ENOMEM;
45870 }
45871 @@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
45872 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45873 ASSERTCMP(page, !=, NULL);
45874
45875 - fscache_stat(&fscache_n_uncaches);
45876 + fscache_stat_unchecked(&fscache_n_uncaches);
45877
45878 /* cache withdrawal may beat us to it */
45879 if (!PageFsCache(page))
45880 @@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
45881 unsigned long loop;
45882
45883 #ifdef CONFIG_FSCACHE_STATS
45884 - atomic_add(pagevec->nr, &fscache_n_marks);
45885 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
45886 #endif
45887
45888 for (loop = 0; loop < pagevec->nr; loop++) {
45889 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
45890 index 4765190..2a067f2 100644
45891 --- a/fs/fscache/stats.c
45892 +++ b/fs/fscache/stats.c
45893 @@ -18,95 +18,95 @@
45894 /*
45895 * operation counters
45896 */
45897 -atomic_t fscache_n_op_pend;
45898 -atomic_t fscache_n_op_run;
45899 -atomic_t fscache_n_op_enqueue;
45900 -atomic_t fscache_n_op_requeue;
45901 -atomic_t fscache_n_op_deferred_release;
45902 -atomic_t fscache_n_op_release;
45903 -atomic_t fscache_n_op_gc;
45904 -atomic_t fscache_n_op_cancelled;
45905 -atomic_t fscache_n_op_rejected;
45906 +atomic_unchecked_t fscache_n_op_pend;
45907 +atomic_unchecked_t fscache_n_op_run;
45908 +atomic_unchecked_t fscache_n_op_enqueue;
45909 +atomic_unchecked_t fscache_n_op_requeue;
45910 +atomic_unchecked_t fscache_n_op_deferred_release;
45911 +atomic_unchecked_t fscache_n_op_release;
45912 +atomic_unchecked_t fscache_n_op_gc;
45913 +atomic_unchecked_t fscache_n_op_cancelled;
45914 +atomic_unchecked_t fscache_n_op_rejected;
45915
45916 -atomic_t fscache_n_attr_changed;
45917 -atomic_t fscache_n_attr_changed_ok;
45918 -atomic_t fscache_n_attr_changed_nobufs;
45919 -atomic_t fscache_n_attr_changed_nomem;
45920 -atomic_t fscache_n_attr_changed_calls;
45921 +atomic_unchecked_t fscache_n_attr_changed;
45922 +atomic_unchecked_t fscache_n_attr_changed_ok;
45923 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
45924 +atomic_unchecked_t fscache_n_attr_changed_nomem;
45925 +atomic_unchecked_t fscache_n_attr_changed_calls;
45926
45927 -atomic_t fscache_n_allocs;
45928 -atomic_t fscache_n_allocs_ok;
45929 -atomic_t fscache_n_allocs_wait;
45930 -atomic_t fscache_n_allocs_nobufs;
45931 -atomic_t fscache_n_allocs_intr;
45932 -atomic_t fscache_n_allocs_object_dead;
45933 -atomic_t fscache_n_alloc_ops;
45934 -atomic_t fscache_n_alloc_op_waits;
45935 +atomic_unchecked_t fscache_n_allocs;
45936 +atomic_unchecked_t fscache_n_allocs_ok;
45937 +atomic_unchecked_t fscache_n_allocs_wait;
45938 +atomic_unchecked_t fscache_n_allocs_nobufs;
45939 +atomic_unchecked_t fscache_n_allocs_intr;
45940 +atomic_unchecked_t fscache_n_allocs_object_dead;
45941 +atomic_unchecked_t fscache_n_alloc_ops;
45942 +atomic_unchecked_t fscache_n_alloc_op_waits;
45943
45944 -atomic_t fscache_n_retrievals;
45945 -atomic_t fscache_n_retrievals_ok;
45946 -atomic_t fscache_n_retrievals_wait;
45947 -atomic_t fscache_n_retrievals_nodata;
45948 -atomic_t fscache_n_retrievals_nobufs;
45949 -atomic_t fscache_n_retrievals_intr;
45950 -atomic_t fscache_n_retrievals_nomem;
45951 -atomic_t fscache_n_retrievals_object_dead;
45952 -atomic_t fscache_n_retrieval_ops;
45953 -atomic_t fscache_n_retrieval_op_waits;
45954 +atomic_unchecked_t fscache_n_retrievals;
45955 +atomic_unchecked_t fscache_n_retrievals_ok;
45956 +atomic_unchecked_t fscache_n_retrievals_wait;
45957 +atomic_unchecked_t fscache_n_retrievals_nodata;
45958 +atomic_unchecked_t fscache_n_retrievals_nobufs;
45959 +atomic_unchecked_t fscache_n_retrievals_intr;
45960 +atomic_unchecked_t fscache_n_retrievals_nomem;
45961 +atomic_unchecked_t fscache_n_retrievals_object_dead;
45962 +atomic_unchecked_t fscache_n_retrieval_ops;
45963 +atomic_unchecked_t fscache_n_retrieval_op_waits;
45964
45965 -atomic_t fscache_n_stores;
45966 -atomic_t fscache_n_stores_ok;
45967 -atomic_t fscache_n_stores_again;
45968 -atomic_t fscache_n_stores_nobufs;
45969 -atomic_t fscache_n_stores_oom;
45970 -atomic_t fscache_n_store_ops;
45971 -atomic_t fscache_n_store_calls;
45972 -atomic_t fscache_n_store_pages;
45973 -atomic_t fscache_n_store_radix_deletes;
45974 -atomic_t fscache_n_store_pages_over_limit;
45975 +atomic_unchecked_t fscache_n_stores;
45976 +atomic_unchecked_t fscache_n_stores_ok;
45977 +atomic_unchecked_t fscache_n_stores_again;
45978 +atomic_unchecked_t fscache_n_stores_nobufs;
45979 +atomic_unchecked_t fscache_n_stores_oom;
45980 +atomic_unchecked_t fscache_n_store_ops;
45981 +atomic_unchecked_t fscache_n_store_calls;
45982 +atomic_unchecked_t fscache_n_store_pages;
45983 +atomic_unchecked_t fscache_n_store_radix_deletes;
45984 +atomic_unchecked_t fscache_n_store_pages_over_limit;
45985
45986 -atomic_t fscache_n_store_vmscan_not_storing;
45987 -atomic_t fscache_n_store_vmscan_gone;
45988 -atomic_t fscache_n_store_vmscan_busy;
45989 -atomic_t fscache_n_store_vmscan_cancelled;
45990 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
45991 +atomic_unchecked_t fscache_n_store_vmscan_gone;
45992 +atomic_unchecked_t fscache_n_store_vmscan_busy;
45993 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
45994
45995 -atomic_t fscache_n_marks;
45996 -atomic_t fscache_n_uncaches;
45997 +atomic_unchecked_t fscache_n_marks;
45998 +atomic_unchecked_t fscache_n_uncaches;
45999
46000 -atomic_t fscache_n_acquires;
46001 -atomic_t fscache_n_acquires_null;
46002 -atomic_t fscache_n_acquires_no_cache;
46003 -atomic_t fscache_n_acquires_ok;
46004 -atomic_t fscache_n_acquires_nobufs;
46005 -atomic_t fscache_n_acquires_oom;
46006 +atomic_unchecked_t fscache_n_acquires;
46007 +atomic_unchecked_t fscache_n_acquires_null;
46008 +atomic_unchecked_t fscache_n_acquires_no_cache;
46009 +atomic_unchecked_t fscache_n_acquires_ok;
46010 +atomic_unchecked_t fscache_n_acquires_nobufs;
46011 +atomic_unchecked_t fscache_n_acquires_oom;
46012
46013 -atomic_t fscache_n_updates;
46014 -atomic_t fscache_n_updates_null;
46015 -atomic_t fscache_n_updates_run;
46016 +atomic_unchecked_t fscache_n_updates;
46017 +atomic_unchecked_t fscache_n_updates_null;
46018 +atomic_unchecked_t fscache_n_updates_run;
46019
46020 -atomic_t fscache_n_relinquishes;
46021 -atomic_t fscache_n_relinquishes_null;
46022 -atomic_t fscache_n_relinquishes_waitcrt;
46023 -atomic_t fscache_n_relinquishes_retire;
46024 +atomic_unchecked_t fscache_n_relinquishes;
46025 +atomic_unchecked_t fscache_n_relinquishes_null;
46026 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
46027 +atomic_unchecked_t fscache_n_relinquishes_retire;
46028
46029 -atomic_t fscache_n_cookie_index;
46030 -atomic_t fscache_n_cookie_data;
46031 -atomic_t fscache_n_cookie_special;
46032 +atomic_unchecked_t fscache_n_cookie_index;
46033 +atomic_unchecked_t fscache_n_cookie_data;
46034 +atomic_unchecked_t fscache_n_cookie_special;
46035
46036 -atomic_t fscache_n_object_alloc;
46037 -atomic_t fscache_n_object_no_alloc;
46038 -atomic_t fscache_n_object_lookups;
46039 -atomic_t fscache_n_object_lookups_negative;
46040 -atomic_t fscache_n_object_lookups_positive;
46041 -atomic_t fscache_n_object_lookups_timed_out;
46042 -atomic_t fscache_n_object_created;
46043 -atomic_t fscache_n_object_avail;
46044 -atomic_t fscache_n_object_dead;
46045 +atomic_unchecked_t fscache_n_object_alloc;
46046 +atomic_unchecked_t fscache_n_object_no_alloc;
46047 +atomic_unchecked_t fscache_n_object_lookups;
46048 +atomic_unchecked_t fscache_n_object_lookups_negative;
46049 +atomic_unchecked_t fscache_n_object_lookups_positive;
46050 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
46051 +atomic_unchecked_t fscache_n_object_created;
46052 +atomic_unchecked_t fscache_n_object_avail;
46053 +atomic_unchecked_t fscache_n_object_dead;
46054
46055 -atomic_t fscache_n_checkaux_none;
46056 -atomic_t fscache_n_checkaux_okay;
46057 -atomic_t fscache_n_checkaux_update;
46058 -atomic_t fscache_n_checkaux_obsolete;
46059 +atomic_unchecked_t fscache_n_checkaux_none;
46060 +atomic_unchecked_t fscache_n_checkaux_okay;
46061 +atomic_unchecked_t fscache_n_checkaux_update;
46062 +atomic_unchecked_t fscache_n_checkaux_obsolete;
46063
46064 atomic_t fscache_n_cop_alloc_object;
46065 atomic_t fscache_n_cop_lookup_object;
46066 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
46067 seq_puts(m, "FS-Cache statistics\n");
46068
46069 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
46070 - atomic_read(&fscache_n_cookie_index),
46071 - atomic_read(&fscache_n_cookie_data),
46072 - atomic_read(&fscache_n_cookie_special));
46073 + atomic_read_unchecked(&fscache_n_cookie_index),
46074 + atomic_read_unchecked(&fscache_n_cookie_data),
46075 + atomic_read_unchecked(&fscache_n_cookie_special));
46076
46077 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
46078 - atomic_read(&fscache_n_object_alloc),
46079 - atomic_read(&fscache_n_object_no_alloc),
46080 - atomic_read(&fscache_n_object_avail),
46081 - atomic_read(&fscache_n_object_dead));
46082 + atomic_read_unchecked(&fscache_n_object_alloc),
46083 + atomic_read_unchecked(&fscache_n_object_no_alloc),
46084 + atomic_read_unchecked(&fscache_n_object_avail),
46085 + atomic_read_unchecked(&fscache_n_object_dead));
46086 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
46087 - atomic_read(&fscache_n_checkaux_none),
46088 - atomic_read(&fscache_n_checkaux_okay),
46089 - atomic_read(&fscache_n_checkaux_update),
46090 - atomic_read(&fscache_n_checkaux_obsolete));
46091 + atomic_read_unchecked(&fscache_n_checkaux_none),
46092 + atomic_read_unchecked(&fscache_n_checkaux_okay),
46093 + atomic_read_unchecked(&fscache_n_checkaux_update),
46094 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
46095
46096 seq_printf(m, "Pages : mrk=%u unc=%u\n",
46097 - atomic_read(&fscache_n_marks),
46098 - atomic_read(&fscache_n_uncaches));
46099 + atomic_read_unchecked(&fscache_n_marks),
46100 + atomic_read_unchecked(&fscache_n_uncaches));
46101
46102 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
46103 " oom=%u\n",
46104 - atomic_read(&fscache_n_acquires),
46105 - atomic_read(&fscache_n_acquires_null),
46106 - atomic_read(&fscache_n_acquires_no_cache),
46107 - atomic_read(&fscache_n_acquires_ok),
46108 - atomic_read(&fscache_n_acquires_nobufs),
46109 - atomic_read(&fscache_n_acquires_oom));
46110 + atomic_read_unchecked(&fscache_n_acquires),
46111 + atomic_read_unchecked(&fscache_n_acquires_null),
46112 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
46113 + atomic_read_unchecked(&fscache_n_acquires_ok),
46114 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
46115 + atomic_read_unchecked(&fscache_n_acquires_oom));
46116
46117 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
46118 - atomic_read(&fscache_n_object_lookups),
46119 - atomic_read(&fscache_n_object_lookups_negative),
46120 - atomic_read(&fscache_n_object_lookups_positive),
46121 - atomic_read(&fscache_n_object_created),
46122 - atomic_read(&fscache_n_object_lookups_timed_out));
46123 + atomic_read_unchecked(&fscache_n_object_lookups),
46124 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
46125 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
46126 + atomic_read_unchecked(&fscache_n_object_created),
46127 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
46128
46129 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
46130 - atomic_read(&fscache_n_updates),
46131 - atomic_read(&fscache_n_updates_null),
46132 - atomic_read(&fscache_n_updates_run));
46133 + atomic_read_unchecked(&fscache_n_updates),
46134 + atomic_read_unchecked(&fscache_n_updates_null),
46135 + atomic_read_unchecked(&fscache_n_updates_run));
46136
46137 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
46138 - atomic_read(&fscache_n_relinquishes),
46139 - atomic_read(&fscache_n_relinquishes_null),
46140 - atomic_read(&fscache_n_relinquishes_waitcrt),
46141 - atomic_read(&fscache_n_relinquishes_retire));
46142 + atomic_read_unchecked(&fscache_n_relinquishes),
46143 + atomic_read_unchecked(&fscache_n_relinquishes_null),
46144 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
46145 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
46146
46147 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
46148 - atomic_read(&fscache_n_attr_changed),
46149 - atomic_read(&fscache_n_attr_changed_ok),
46150 - atomic_read(&fscache_n_attr_changed_nobufs),
46151 - atomic_read(&fscache_n_attr_changed_nomem),
46152 - atomic_read(&fscache_n_attr_changed_calls));
46153 + atomic_read_unchecked(&fscache_n_attr_changed),
46154 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
46155 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
46156 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
46157 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
46158
46159 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
46160 - atomic_read(&fscache_n_allocs),
46161 - atomic_read(&fscache_n_allocs_ok),
46162 - atomic_read(&fscache_n_allocs_wait),
46163 - atomic_read(&fscache_n_allocs_nobufs),
46164 - atomic_read(&fscache_n_allocs_intr));
46165 + atomic_read_unchecked(&fscache_n_allocs),
46166 + atomic_read_unchecked(&fscache_n_allocs_ok),
46167 + atomic_read_unchecked(&fscache_n_allocs_wait),
46168 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
46169 + atomic_read_unchecked(&fscache_n_allocs_intr));
46170 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
46171 - atomic_read(&fscache_n_alloc_ops),
46172 - atomic_read(&fscache_n_alloc_op_waits),
46173 - atomic_read(&fscache_n_allocs_object_dead));
46174 + atomic_read_unchecked(&fscache_n_alloc_ops),
46175 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
46176 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
46177
46178 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
46179 " int=%u oom=%u\n",
46180 - atomic_read(&fscache_n_retrievals),
46181 - atomic_read(&fscache_n_retrievals_ok),
46182 - atomic_read(&fscache_n_retrievals_wait),
46183 - atomic_read(&fscache_n_retrievals_nodata),
46184 - atomic_read(&fscache_n_retrievals_nobufs),
46185 - atomic_read(&fscache_n_retrievals_intr),
46186 - atomic_read(&fscache_n_retrievals_nomem));
46187 + atomic_read_unchecked(&fscache_n_retrievals),
46188 + atomic_read_unchecked(&fscache_n_retrievals_ok),
46189 + atomic_read_unchecked(&fscache_n_retrievals_wait),
46190 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
46191 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
46192 + atomic_read_unchecked(&fscache_n_retrievals_intr),
46193 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
46194 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
46195 - atomic_read(&fscache_n_retrieval_ops),
46196 - atomic_read(&fscache_n_retrieval_op_waits),
46197 - atomic_read(&fscache_n_retrievals_object_dead));
46198 + atomic_read_unchecked(&fscache_n_retrieval_ops),
46199 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
46200 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
46201
46202 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
46203 - atomic_read(&fscache_n_stores),
46204 - atomic_read(&fscache_n_stores_ok),
46205 - atomic_read(&fscache_n_stores_again),
46206 - atomic_read(&fscache_n_stores_nobufs),
46207 - atomic_read(&fscache_n_stores_oom));
46208 + atomic_read_unchecked(&fscache_n_stores),
46209 + atomic_read_unchecked(&fscache_n_stores_ok),
46210 + atomic_read_unchecked(&fscache_n_stores_again),
46211 + atomic_read_unchecked(&fscache_n_stores_nobufs),
46212 + atomic_read_unchecked(&fscache_n_stores_oom));
46213 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
46214 - atomic_read(&fscache_n_store_ops),
46215 - atomic_read(&fscache_n_store_calls),
46216 - atomic_read(&fscache_n_store_pages),
46217 - atomic_read(&fscache_n_store_radix_deletes),
46218 - atomic_read(&fscache_n_store_pages_over_limit));
46219 + atomic_read_unchecked(&fscache_n_store_ops),
46220 + atomic_read_unchecked(&fscache_n_store_calls),
46221 + atomic_read_unchecked(&fscache_n_store_pages),
46222 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
46223 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
46224
46225 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
46226 - atomic_read(&fscache_n_store_vmscan_not_storing),
46227 - atomic_read(&fscache_n_store_vmscan_gone),
46228 - atomic_read(&fscache_n_store_vmscan_busy),
46229 - atomic_read(&fscache_n_store_vmscan_cancelled));
46230 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
46231 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
46232 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
46233 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
46234
46235 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
46236 - atomic_read(&fscache_n_op_pend),
46237 - atomic_read(&fscache_n_op_run),
46238 - atomic_read(&fscache_n_op_enqueue),
46239 - atomic_read(&fscache_n_op_cancelled),
46240 - atomic_read(&fscache_n_op_rejected));
46241 + atomic_read_unchecked(&fscache_n_op_pend),
46242 + atomic_read_unchecked(&fscache_n_op_run),
46243 + atomic_read_unchecked(&fscache_n_op_enqueue),
46244 + atomic_read_unchecked(&fscache_n_op_cancelled),
46245 + atomic_read_unchecked(&fscache_n_op_rejected));
46246 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
46247 - atomic_read(&fscache_n_op_deferred_release),
46248 - atomic_read(&fscache_n_op_release),
46249 - atomic_read(&fscache_n_op_gc));
46250 + atomic_read_unchecked(&fscache_n_op_deferred_release),
46251 + atomic_read_unchecked(&fscache_n_op_release),
46252 + atomic_read_unchecked(&fscache_n_op_gc));
46253
46254 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
46255 atomic_read(&fscache_n_cop_alloc_object),
46256 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
46257 index 3426521..3b75162 100644
46258 --- a/fs/fuse/cuse.c
46259 +++ b/fs/fuse/cuse.c
46260 @@ -587,10 +587,12 @@ static int __init cuse_init(void)
46261 INIT_LIST_HEAD(&cuse_conntbl[i]);
46262
46263 /* inherit and extend fuse_dev_operations */
46264 - cuse_channel_fops = fuse_dev_operations;
46265 - cuse_channel_fops.owner = THIS_MODULE;
46266 - cuse_channel_fops.open = cuse_channel_open;
46267 - cuse_channel_fops.release = cuse_channel_release;
46268 + pax_open_kernel();
46269 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
46270 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
46271 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
46272 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
46273 + pax_close_kernel();
46274
46275 cuse_class = class_create(THIS_MODULE, "cuse");
46276 if (IS_ERR(cuse_class))
46277 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
46278 index 7df2b5e..5804aa7 100644
46279 --- a/fs/fuse/dev.c
46280 +++ b/fs/fuse/dev.c
46281 @@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
46282 ret = 0;
46283 pipe_lock(pipe);
46284
46285 - if (!pipe->readers) {
46286 + if (!atomic_read(&pipe->readers)) {
46287 send_sig(SIGPIPE, current, 0);
46288 if (!ret)
46289 ret = -EPIPE;
46290 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
46291 index bc43832..0cfe5a6 100644
46292 --- a/fs/fuse/dir.c
46293 +++ b/fs/fuse/dir.c
46294 @@ -1181,7 +1181,7 @@ static char *read_link(struct dentry *dentry)
46295 return link;
46296 }
46297
46298 -static void free_link(char *link)
46299 +static void free_link(const char *link)
46300 {
46301 if (!IS_ERR(link))
46302 free_page((unsigned long) link);
46303 diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
46304 index a9ba244..d9df391 100644
46305 --- a/fs/gfs2/inode.c
46306 +++ b/fs/gfs2/inode.c
46307 @@ -1496,7 +1496,7 @@ out:
46308
46309 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46310 {
46311 - char *s = nd_get_link(nd);
46312 + const char *s = nd_get_link(nd);
46313 if (!IS_ERR(s))
46314 kfree(s);
46315 }
46316 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
46317 index 001ef01..f7d5f07 100644
46318 --- a/fs/hugetlbfs/inode.c
46319 +++ b/fs/hugetlbfs/inode.c
46320 @@ -920,7 +920,7 @@ static struct file_system_type hugetlbfs_fs_type = {
46321 .kill_sb = kill_litter_super,
46322 };
46323
46324 -static struct vfsmount *hugetlbfs_vfsmount;
46325 +struct vfsmount *hugetlbfs_vfsmount;
46326
46327 static int can_do_hugetlb_shm(void)
46328 {
46329 diff --git a/fs/inode.c b/fs/inode.c
46330 index 9f4f5fe..6214688 100644
46331 --- a/fs/inode.c
46332 +++ b/fs/inode.c
46333 @@ -860,8 +860,8 @@ unsigned int get_next_ino(void)
46334
46335 #ifdef CONFIG_SMP
46336 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
46337 - static atomic_t shared_last_ino;
46338 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
46339 + static atomic_unchecked_t shared_last_ino;
46340 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
46341
46342 res = next - LAST_INO_BATCH;
46343 }
46344 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
46345 index 4a6cf28..d3a29d3 100644
46346 --- a/fs/jffs2/erase.c
46347 +++ b/fs/jffs2/erase.c
46348 @@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
46349 struct jffs2_unknown_node marker = {
46350 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
46351 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
46352 - .totlen = cpu_to_je32(c->cleanmarker_size)
46353 + .totlen = cpu_to_je32(c->cleanmarker_size),
46354 + .hdr_crc = cpu_to_je32(0)
46355 };
46356
46357 jffs2_prealloc_raw_node_refs(c, jeb, 1);
46358 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
46359 index 74d9be1..d5dd140 100644
46360 --- a/fs/jffs2/wbuf.c
46361 +++ b/fs/jffs2/wbuf.c
46362 @@ -1022,7 +1022,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
46363 {
46364 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
46365 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
46366 - .totlen = constant_cpu_to_je32(8)
46367 + .totlen = constant_cpu_to_je32(8),
46368 + .hdr_crc = constant_cpu_to_je32(0)
46369 };
46370
46371 /*
46372 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
46373 index 4a82950..bcaa0cb 100644
46374 --- a/fs/jfs/super.c
46375 +++ b/fs/jfs/super.c
46376 @@ -801,7 +801,7 @@ static int __init init_jfs_fs(void)
46377
46378 jfs_inode_cachep =
46379 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
46380 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
46381 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
46382 init_once);
46383 if (jfs_inode_cachep == NULL)
46384 return -ENOMEM;
46385 diff --git a/fs/libfs.c b/fs/libfs.c
46386 index 18d08f5..fe3dc64 100644
46387 --- a/fs/libfs.c
46388 +++ b/fs/libfs.c
46389 @@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
46390
46391 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
46392 struct dentry *next;
46393 + char d_name[sizeof(next->d_iname)];
46394 + const unsigned char *name;
46395 +
46396 next = list_entry(p, struct dentry, d_u.d_child);
46397 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
46398 if (!simple_positive(next)) {
46399 @@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
46400
46401 spin_unlock(&next->d_lock);
46402 spin_unlock(&dentry->d_lock);
46403 - if (filldir(dirent, next->d_name.name,
46404 + name = next->d_name.name;
46405 + if (name == next->d_iname) {
46406 + memcpy(d_name, name, next->d_name.len);
46407 + name = d_name;
46408 + }
46409 + if (filldir(dirent, name,
46410 next->d_name.len, filp->f_pos,
46411 next->d_inode->i_ino,
46412 dt_type(next->d_inode)) < 0)
46413 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
46414 index 8392cb8..80d6193 100644
46415 --- a/fs/lockd/clntproc.c
46416 +++ b/fs/lockd/clntproc.c
46417 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
46418 /*
46419 * Cookie counter for NLM requests
46420 */
46421 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
46422 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
46423
46424 void nlmclnt_next_cookie(struct nlm_cookie *c)
46425 {
46426 - u32 cookie = atomic_inc_return(&nlm_cookie);
46427 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
46428
46429 memcpy(c->data, &cookie, 4);
46430 c->len=4;
46431 diff --git a/fs/locks.c b/fs/locks.c
46432 index 6a64f15..3114738 100644
46433 --- a/fs/locks.c
46434 +++ b/fs/locks.c
46435 @@ -2075,16 +2075,16 @@ void locks_remove_flock(struct file *filp)
46436 return;
46437
46438 if (filp->f_op && filp->f_op->flock) {
46439 - struct file_lock fl = {
46440 + struct file_lock flock = {
46441 .fl_pid = current->tgid,
46442 .fl_file = filp,
46443 .fl_flags = FL_FLOCK,
46444 .fl_type = F_UNLCK,
46445 .fl_end = OFFSET_MAX,
46446 };
46447 - filp->f_op->flock(filp, F_SETLKW, &fl);
46448 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
46449 - fl.fl_ops->fl_release_private(&fl);
46450 + filp->f_op->flock(filp, F_SETLKW, &flock);
46451 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
46452 + flock.fl_ops->fl_release_private(&flock);
46453 }
46454
46455 lock_flocks();
46456 diff --git a/fs/namei.c b/fs/namei.c
46457 index c427919..232326c 100644
46458 --- a/fs/namei.c
46459 +++ b/fs/namei.c
46460 @@ -278,16 +278,32 @@ int generic_permission(struct inode *inode, int mask)
46461 if (ret != -EACCES)
46462 return ret;
46463
46464 +#ifdef CONFIG_GRKERNSEC
46465 + /* we'll block if we have to log due to a denied capability use */
46466 + if (mask & MAY_NOT_BLOCK)
46467 + return -ECHILD;
46468 +#endif
46469 +
46470 if (S_ISDIR(inode->i_mode)) {
46471 /* DACs are overridable for directories */
46472 - if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46473 - return 0;
46474 if (!(mask & MAY_WRITE))
46475 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46476 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
46477 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46478 return 0;
46479 + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46480 + return 0;
46481 return -EACCES;
46482 }
46483 /*
46484 + * Searching includes executable on directories, else just read.
46485 + */
46486 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
46487 + if (mask == MAY_READ)
46488 + if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
46489 + ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46490 + return 0;
46491 +
46492 + /*
46493 * Read/write DACs are always overridable.
46494 * Executable DACs are overridable when there is
46495 * at least one exec bit set.
46496 @@ -296,14 +312,6 @@ int generic_permission(struct inode *inode, int mask)
46497 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46498 return 0;
46499
46500 - /*
46501 - * Searching includes executable on directories, else just read.
46502 - */
46503 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
46504 - if (mask == MAY_READ)
46505 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46506 - return 0;
46507 -
46508 return -EACCES;
46509 }
46510
46511 @@ -652,11 +660,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
46512 return error;
46513 }
46514
46515 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
46516 + dentry->d_inode, dentry, nd->path.mnt)) {
46517 + error = -EACCES;
46518 + *p = ERR_PTR(error); /* no ->put_link(), please */
46519 + path_put(&nd->path);
46520 + return error;
46521 + }
46522 +
46523 nd->last_type = LAST_BIND;
46524 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
46525 error = PTR_ERR(*p);
46526 if (!IS_ERR(*p)) {
46527 - char *s = nd_get_link(nd);
46528 + const char *s = nd_get_link(nd);
46529 error = 0;
46530 if (s)
46531 error = __vfs_follow_link(nd, s);
46532 @@ -1355,6 +1371,9 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
46533 if (!res)
46534 res = walk_component(nd, path, &nd->last,
46535 nd->last_type, LOOKUP_FOLLOW);
46536 + if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode)) {
46537 + res = -EACCES;
46538 + }
46539 put_link(nd, &link, cookie);
46540 } while (res > 0);
46541
46542 @@ -1746,6 +1765,9 @@ static int path_lookupat(int dfd, const char *name,
46543 err = follow_link(&link, nd, &cookie);
46544 if (!err)
46545 err = lookup_last(nd, &path);
46546 + if (!err && gr_handle_symlink_owner(&link, nd->inode)) {
46547 + err = -EACCES;
46548 + }
46549 put_link(nd, &link, cookie);
46550 }
46551 }
46552 @@ -1753,6 +1775,21 @@ static int path_lookupat(int dfd, const char *name,
46553 if (!err)
46554 err = complete_walk(nd);
46555
46556 + if (!(nd->flags & LOOKUP_PARENT)) {
46557 +#ifdef CONFIG_GRKERNSEC
46558 + if (flags & LOOKUP_RCU) {
46559 + if (!err)
46560 + path_put(&nd->path);
46561 + err = -ECHILD;
46562 + } else
46563 +#endif
46564 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46565 + if (!err)
46566 + path_put(&nd->path);
46567 + err = -ENOENT;
46568 + }
46569 + }
46570 +
46571 if (!err && nd->flags & LOOKUP_DIRECTORY) {
46572 if (!nd->inode->i_op->lookup) {
46573 path_put(&nd->path);
46574 @@ -1780,6 +1817,15 @@ static int do_path_lookup(int dfd, const char *name,
46575 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
46576
46577 if (likely(!retval)) {
46578 + if (*name != '/' && nd->path.dentry && nd->inode) {
46579 +#ifdef CONFIG_GRKERNSEC
46580 + if (flags & LOOKUP_RCU)
46581 + return -ECHILD;
46582 +#endif
46583 + if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
46584 + return -ENOENT;
46585 + }
46586 +
46587 if (unlikely(!audit_dummy_context())) {
46588 if (nd->path.dentry && nd->inode)
46589 audit_inode(name, nd->path.dentry);
46590 @@ -2126,6 +2172,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
46591 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
46592 return -EPERM;
46593
46594 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
46595 + return -EPERM;
46596 + if (gr_handle_rawio(inode))
46597 + return -EPERM;
46598 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
46599 + return -EACCES;
46600 +
46601 return 0;
46602 }
46603
46604 @@ -2187,6 +2240,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46605 error = complete_walk(nd);
46606 if (error)
46607 return ERR_PTR(error);
46608 +#ifdef CONFIG_GRKERNSEC
46609 + if (nd->flags & LOOKUP_RCU) {
46610 + error = -ECHILD;
46611 + goto exit;
46612 + }
46613 +#endif
46614 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46615 + error = -ENOENT;
46616 + goto exit;
46617 + }
46618 audit_inode(pathname, nd->path.dentry);
46619 if (open_flag & O_CREAT) {
46620 error = -EISDIR;
46621 @@ -2197,6 +2260,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46622 error = complete_walk(nd);
46623 if (error)
46624 return ERR_PTR(error);
46625 +#ifdef CONFIG_GRKERNSEC
46626 + if (nd->flags & LOOKUP_RCU) {
46627 + error = -ECHILD;
46628 + goto exit;
46629 + }
46630 +#endif
46631 + if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
46632 + error = -ENOENT;
46633 + goto exit;
46634 + }
46635 audit_inode(pathname, dir);
46636 goto ok;
46637 }
46638 @@ -2218,6 +2291,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46639 error = complete_walk(nd);
46640 if (error)
46641 return ERR_PTR(error);
46642 +#ifdef CONFIG_GRKERNSEC
46643 + if (nd->flags & LOOKUP_RCU) {
46644 + error = -ECHILD;
46645 + goto exit;
46646 + }
46647 +#endif
46648 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46649 + error = -ENOENT;
46650 + goto exit;
46651 + }
46652
46653 error = -ENOTDIR;
46654 if (nd->flags & LOOKUP_DIRECTORY) {
46655 @@ -2258,6 +2341,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46656 /* Negative dentry, just create the file */
46657 if (!dentry->d_inode) {
46658 umode_t mode = op->mode;
46659 +
46660 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
46661 + error = -EACCES;
46662 + goto exit_mutex_unlock;
46663 + }
46664 +
46665 if (!IS_POSIXACL(dir->d_inode))
46666 mode &= ~current_umask();
46667 /*
46668 @@ -2281,6 +2370,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46669 error = vfs_create(dir->d_inode, dentry, mode, nd);
46670 if (error)
46671 goto exit_mutex_unlock;
46672 + else
46673 + gr_handle_create(path->dentry, path->mnt);
46674 mutex_unlock(&dir->d_inode->i_mutex);
46675 dput(nd->path.dentry);
46676 nd->path.dentry = dentry;
46677 @@ -2290,6 +2381,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
46678 /*
46679 * It already exists.
46680 */
46681 +
46682 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
46683 + error = -ENOENT;
46684 + goto exit_mutex_unlock;
46685 + }
46686 +
46687 + /* only check if O_CREAT is specified, all other checks need to go
46688 + into may_open */
46689 + if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
46690 + error = -EACCES;
46691 + goto exit_mutex_unlock;
46692 + }
46693 +
46694 mutex_unlock(&dir->d_inode->i_mutex);
46695 audit_inode(pathname, path->dentry);
46696
46697 @@ -2407,8 +2511,14 @@ static struct file *path_openat(int dfd, const char *pathname,
46698 error = follow_link(&link, nd, &cookie);
46699 if (unlikely(error))
46700 filp = ERR_PTR(error);
46701 - else
46702 + else {
46703 filp = do_last(nd, &path, op, pathname);
46704 + if (!IS_ERR(filp) && gr_handle_symlink_owner(&link, nd->inode)) {
46705 + if (filp)
46706 + fput(filp);
46707 + filp = ERR_PTR(-EACCES);
46708 + }
46709 + }
46710 put_link(nd, &link, cookie);
46711 }
46712 out:
46713 @@ -2502,6 +2612,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
46714 *path = nd.path;
46715 return dentry;
46716 eexist:
46717 + if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
46718 + dput(dentry);
46719 + dentry = ERR_PTR(-ENOENT);
46720 + goto fail;
46721 + }
46722 dput(dentry);
46723 dentry = ERR_PTR(-EEXIST);
46724 fail:
46725 @@ -2524,6 +2639,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
46726 }
46727 EXPORT_SYMBOL(user_path_create);
46728
46729 +static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
46730 +{
46731 + char *tmp = getname(pathname);
46732 + struct dentry *res;
46733 + if (IS_ERR(tmp))
46734 + return ERR_CAST(tmp);
46735 + res = kern_path_create(dfd, tmp, path, is_dir);
46736 + if (IS_ERR(res))
46737 + putname(tmp);
46738 + else
46739 + *to = tmp;
46740 + return res;
46741 +}
46742 +
46743 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
46744 {
46745 int error = may_create(dir, dentry);
46746 @@ -2591,6 +2720,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
46747 error = mnt_want_write(path.mnt);
46748 if (error)
46749 goto out_dput;
46750 +
46751 + if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
46752 + error = -EPERM;
46753 + goto out_drop_write;
46754 + }
46755 +
46756 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
46757 + error = -EACCES;
46758 + goto out_drop_write;
46759 + }
46760 +
46761 error = security_path_mknod(&path, dentry, mode, dev);
46762 if (error)
46763 goto out_drop_write;
46764 @@ -2608,6 +2748,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
46765 }
46766 out_drop_write:
46767 mnt_drop_write(path.mnt);
46768 +
46769 + if (!error)
46770 + gr_handle_create(dentry, path.mnt);
46771 out_dput:
46772 dput(dentry);
46773 mutex_unlock(&path.dentry->d_inode->i_mutex);
46774 @@ -2661,12 +2804,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
46775 error = mnt_want_write(path.mnt);
46776 if (error)
46777 goto out_dput;
46778 +
46779 + if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
46780 + error = -EACCES;
46781 + goto out_drop_write;
46782 + }
46783 +
46784 error = security_path_mkdir(&path, dentry, mode);
46785 if (error)
46786 goto out_drop_write;
46787 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
46788 out_drop_write:
46789 mnt_drop_write(path.mnt);
46790 +
46791 + if (!error)
46792 + gr_handle_create(dentry, path.mnt);
46793 out_dput:
46794 dput(dentry);
46795 mutex_unlock(&path.dentry->d_inode->i_mutex);
46796 @@ -2746,6 +2898,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
46797 char * name;
46798 struct dentry *dentry;
46799 struct nameidata nd;
46800 + ino_t saved_ino = 0;
46801 + dev_t saved_dev = 0;
46802
46803 error = user_path_parent(dfd, pathname, &nd, &name);
46804 if (error)
46805 @@ -2774,6 +2928,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
46806 error = -ENOENT;
46807 goto exit3;
46808 }
46809 +
46810 + saved_ino = dentry->d_inode->i_ino;
46811 + saved_dev = gr_get_dev_from_dentry(dentry);
46812 +
46813 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
46814 + error = -EACCES;
46815 + goto exit3;
46816 + }
46817 +
46818 error = mnt_want_write(nd.path.mnt);
46819 if (error)
46820 goto exit3;
46821 @@ -2781,6 +2944,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
46822 if (error)
46823 goto exit4;
46824 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
46825 + if (!error && (saved_dev || saved_ino))
46826 + gr_handle_delete(saved_ino, saved_dev);
46827 exit4:
46828 mnt_drop_write(nd.path.mnt);
46829 exit3:
46830 @@ -2843,6 +3008,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46831 struct dentry *dentry;
46832 struct nameidata nd;
46833 struct inode *inode = NULL;
46834 + ino_t saved_ino = 0;
46835 + dev_t saved_dev = 0;
46836
46837 error = user_path_parent(dfd, pathname, &nd, &name);
46838 if (error)
46839 @@ -2865,6 +3032,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46840 if (!inode)
46841 goto slashes;
46842 ihold(inode);
46843 +
46844 + if (inode->i_nlink <= 1) {
46845 + saved_ino = inode->i_ino;
46846 + saved_dev = gr_get_dev_from_dentry(dentry);
46847 + }
46848 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
46849 + error = -EACCES;
46850 + goto exit2;
46851 + }
46852 +
46853 error = mnt_want_write(nd.path.mnt);
46854 if (error)
46855 goto exit2;
46856 @@ -2872,6 +3049,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
46857 if (error)
46858 goto exit3;
46859 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
46860 + if (!error && (saved_ino || saved_dev))
46861 + gr_handle_delete(saved_ino, saved_dev);
46862 exit3:
46863 mnt_drop_write(nd.path.mnt);
46864 exit2:
46865 @@ -2947,10 +3126,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
46866 error = mnt_want_write(path.mnt);
46867 if (error)
46868 goto out_dput;
46869 +
46870 + if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
46871 + error = -EACCES;
46872 + goto out_drop_write;
46873 + }
46874 +
46875 error = security_path_symlink(&path, dentry, from);
46876 if (error)
46877 goto out_drop_write;
46878 error = vfs_symlink(path.dentry->d_inode, dentry, from);
46879 + if (!error)
46880 + gr_handle_create(dentry, path.mnt);
46881 out_drop_write:
46882 mnt_drop_write(path.mnt);
46883 out_dput:
46884 @@ -3025,6 +3212,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46885 {
46886 struct dentry *new_dentry;
46887 struct path old_path, new_path;
46888 + char *to = NULL;
46889 int how = 0;
46890 int error;
46891
46892 @@ -3048,7 +3236,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46893 if (error)
46894 return error;
46895
46896 - new_dentry = user_path_create(newdfd, newname, &new_path, 0);
46897 + new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
46898 error = PTR_ERR(new_dentry);
46899 if (IS_ERR(new_dentry))
46900 goto out;
46901 @@ -3059,13 +3247,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
46902 error = mnt_want_write(new_path.mnt);
46903 if (error)
46904 goto out_dput;
46905 +
46906 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
46907 + old_path.dentry->d_inode,
46908 + old_path.dentry->d_inode->i_mode, to)) {
46909 + error = -EACCES;
46910 + goto out_drop_write;
46911 + }
46912 +
46913 + if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
46914 + old_path.dentry, old_path.mnt, to)) {
46915 + error = -EACCES;
46916 + goto out_drop_write;
46917 + }
46918 +
46919 error = security_path_link(old_path.dentry, &new_path, new_dentry);
46920 if (error)
46921 goto out_drop_write;
46922 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
46923 + if (!error)
46924 + gr_handle_create(new_dentry, new_path.mnt);
46925 out_drop_write:
46926 mnt_drop_write(new_path.mnt);
46927 out_dput:
46928 + putname(to);
46929 dput(new_dentry);
46930 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
46931 path_put(&new_path);
46932 @@ -3299,6 +3504,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
46933 if (new_dentry == trap)
46934 goto exit5;
46935
46936 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
46937 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
46938 + to);
46939 + if (error)
46940 + goto exit5;
46941 +
46942 error = mnt_want_write(oldnd.path.mnt);
46943 if (error)
46944 goto exit5;
46945 @@ -3308,6 +3519,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
46946 goto exit6;
46947 error = vfs_rename(old_dir->d_inode, old_dentry,
46948 new_dir->d_inode, new_dentry);
46949 + if (!error)
46950 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
46951 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
46952 exit6:
46953 mnt_drop_write(oldnd.path.mnt);
46954 exit5:
46955 @@ -3333,6 +3547,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
46956
46957 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
46958 {
46959 + char tmpbuf[64];
46960 + const char *newlink;
46961 int len;
46962
46963 len = PTR_ERR(link);
46964 @@ -3342,7 +3558,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
46965 len = strlen(link);
46966 if (len > (unsigned) buflen)
46967 len = buflen;
46968 - if (copy_to_user(buffer, link, len))
46969 +
46970 + if (len < sizeof(tmpbuf)) {
46971 + memcpy(tmpbuf, link, len);
46972 + newlink = tmpbuf;
46973 + } else
46974 + newlink = link;
46975 +
46976 + if (copy_to_user(buffer, newlink, len))
46977 len = -EFAULT;
46978 out:
46979 return len;
46980 diff --git a/fs/namespace.c b/fs/namespace.c
46981 index 4e46539..b28253c 100644
46982 --- a/fs/namespace.c
46983 +++ b/fs/namespace.c
46984 @@ -1156,6 +1156,9 @@ static int do_umount(struct mount *mnt, int flags)
46985 if (!(sb->s_flags & MS_RDONLY))
46986 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
46987 up_write(&sb->s_umount);
46988 +
46989 + gr_log_remount(mnt->mnt_devname, retval);
46990 +
46991 return retval;
46992 }
46993
46994 @@ -1175,6 +1178,9 @@ static int do_umount(struct mount *mnt, int flags)
46995 br_write_unlock(vfsmount_lock);
46996 up_write(&namespace_sem);
46997 release_mounts(&umount_list);
46998 +
46999 + gr_log_unmount(mnt->mnt_devname, retval);
47000 +
47001 return retval;
47002 }
47003
47004 @@ -2176,6 +2182,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
47005 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
47006 MS_STRICTATIME);
47007
47008 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
47009 + retval = -EPERM;
47010 + goto dput_out;
47011 + }
47012 +
47013 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
47014 + retval = -EPERM;
47015 + goto dput_out;
47016 + }
47017 +
47018 if (flags & MS_REMOUNT)
47019 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
47020 data_page);
47021 @@ -2190,6 +2206,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
47022 dev_name, data_page);
47023 dput_out:
47024 path_put(&path);
47025 +
47026 + gr_log_mount(dev_name, dir_name, retval);
47027 +
47028 return retval;
47029 }
47030
47031 @@ -2471,6 +2490,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
47032 if (error)
47033 goto out2;
47034
47035 + if (gr_handle_chroot_pivot()) {
47036 + error = -EPERM;
47037 + goto out2;
47038 + }
47039 +
47040 get_fs_root(current->fs, &root);
47041 error = lock_mount(&old);
47042 if (error)
47043 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
47044 index e8bbfa5..864f936 100644
47045 --- a/fs/nfs/inode.c
47046 +++ b/fs/nfs/inode.c
47047 @@ -152,7 +152,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
47048 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
47049 nfsi->attrtimeo_timestamp = jiffies;
47050
47051 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
47052 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
47053 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
47054 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
47055 else
47056 @@ -1005,16 +1005,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
47057 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
47058 }
47059
47060 -static atomic_long_t nfs_attr_generation_counter;
47061 +static atomic_long_unchecked_t nfs_attr_generation_counter;
47062
47063 static unsigned long nfs_read_attr_generation_counter(void)
47064 {
47065 - return atomic_long_read(&nfs_attr_generation_counter);
47066 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
47067 }
47068
47069 unsigned long nfs_inc_attr_generation_counter(void)
47070 {
47071 - return atomic_long_inc_return(&nfs_attr_generation_counter);
47072 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
47073 }
47074
47075 void nfs_fattr_init(struct nfs_fattr *fattr)
47076 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
47077 index 5686661..80a9a3a 100644
47078 --- a/fs/nfsd/vfs.c
47079 +++ b/fs/nfsd/vfs.c
47080 @@ -933,7 +933,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
47081 } else {
47082 oldfs = get_fs();
47083 set_fs(KERNEL_DS);
47084 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
47085 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
47086 set_fs(oldfs);
47087 }
47088
47089 @@ -1037,7 +1037,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
47090
47091 /* Write the data. */
47092 oldfs = get_fs(); set_fs(KERNEL_DS);
47093 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
47094 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
47095 set_fs(oldfs);
47096 if (host_err < 0)
47097 goto out_nfserr;
47098 @@ -1573,7 +1573,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
47099 */
47100
47101 oldfs = get_fs(); set_fs(KERNEL_DS);
47102 - host_err = inode->i_op->readlink(path.dentry, buf, *lenp);
47103 + host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
47104 set_fs(oldfs);
47105
47106 if (host_err < 0)
47107 diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
47108 index 3568c8a..e0240d8 100644
47109 --- a/fs/notify/fanotify/fanotify_user.c
47110 +++ b/fs/notify/fanotify/fanotify_user.c
47111 @@ -278,7 +278,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
47112 goto out_close_fd;
47113
47114 ret = -EFAULT;
47115 - if (copy_to_user(buf, &fanotify_event_metadata,
47116 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
47117 + copy_to_user(buf, &fanotify_event_metadata,
47118 fanotify_event_metadata.event_len))
47119 goto out_kill_access_response;
47120
47121 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
47122 index c887b13..0fdf472 100644
47123 --- a/fs/notify/notification.c
47124 +++ b/fs/notify/notification.c
47125 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
47126 * get set to 0 so it will never get 'freed'
47127 */
47128 static struct fsnotify_event *q_overflow_event;
47129 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47130 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47131
47132 /**
47133 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
47134 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47135 */
47136 u32 fsnotify_get_cookie(void)
47137 {
47138 - return atomic_inc_return(&fsnotify_sync_cookie);
47139 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
47140 }
47141 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
47142
47143 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
47144 index 99e3610..02c1068 100644
47145 --- a/fs/ntfs/dir.c
47146 +++ b/fs/ntfs/dir.c
47147 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
47148 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
47149 ~(s64)(ndir->itype.index.block_size - 1)));
47150 /* Bounds checks. */
47151 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
47152 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
47153 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
47154 "inode 0x%lx or driver bug.", vdir->i_ino);
47155 goto err_out;
47156 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
47157 index 8639169..76697aa 100644
47158 --- a/fs/ntfs/file.c
47159 +++ b/fs/ntfs/file.c
47160 @@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
47161 #endif /* NTFS_RW */
47162 };
47163
47164 -const struct file_operations ntfs_empty_file_ops = {};
47165 +const struct file_operations ntfs_empty_file_ops __read_only;
47166
47167 -const struct inode_operations ntfs_empty_inode_ops = {};
47168 +const struct inode_operations ntfs_empty_inode_ops __read_only;
47169 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
47170 index 210c352..a174f83 100644
47171 --- a/fs/ocfs2/localalloc.c
47172 +++ b/fs/ocfs2/localalloc.c
47173 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
47174 goto bail;
47175 }
47176
47177 - atomic_inc(&osb->alloc_stats.moves);
47178 + atomic_inc_unchecked(&osb->alloc_stats.moves);
47179
47180 bail:
47181 if (handle)
47182 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
47183 index d355e6e..578d905 100644
47184 --- a/fs/ocfs2/ocfs2.h
47185 +++ b/fs/ocfs2/ocfs2.h
47186 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
47187
47188 struct ocfs2_alloc_stats
47189 {
47190 - atomic_t moves;
47191 - atomic_t local_data;
47192 - atomic_t bitmap_data;
47193 - atomic_t bg_allocs;
47194 - atomic_t bg_extends;
47195 + atomic_unchecked_t moves;
47196 + atomic_unchecked_t local_data;
47197 + atomic_unchecked_t bitmap_data;
47198 + atomic_unchecked_t bg_allocs;
47199 + atomic_unchecked_t bg_extends;
47200 };
47201
47202 enum ocfs2_local_alloc_state
47203 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
47204 index f169da4..9112253 100644
47205 --- a/fs/ocfs2/suballoc.c
47206 +++ b/fs/ocfs2/suballoc.c
47207 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
47208 mlog_errno(status);
47209 goto bail;
47210 }
47211 - atomic_inc(&osb->alloc_stats.bg_extends);
47212 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
47213
47214 /* You should never ask for this much metadata */
47215 BUG_ON(bits_wanted >
47216 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
47217 mlog_errno(status);
47218 goto bail;
47219 }
47220 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47221 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47222
47223 *suballoc_loc = res.sr_bg_blkno;
47224 *suballoc_bit_start = res.sr_bit_offset;
47225 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
47226 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
47227 res->sr_bits);
47228
47229 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47230 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47231
47232 BUG_ON(res->sr_bits != 1);
47233
47234 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
47235 mlog_errno(status);
47236 goto bail;
47237 }
47238 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47239 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47240
47241 BUG_ON(res.sr_bits != 1);
47242
47243 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
47244 cluster_start,
47245 num_clusters);
47246 if (!status)
47247 - atomic_inc(&osb->alloc_stats.local_data);
47248 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
47249 } else {
47250 if (min_clusters > (osb->bitmap_cpg - 1)) {
47251 /* The only paths asking for contiguousness
47252 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
47253 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
47254 res.sr_bg_blkno,
47255 res.sr_bit_offset);
47256 - atomic_inc(&osb->alloc_stats.bitmap_data);
47257 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
47258 *num_clusters = res.sr_bits;
47259 }
47260 }
47261 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
47262 index 68f4541..89cfe6a 100644
47263 --- a/fs/ocfs2/super.c
47264 +++ b/fs/ocfs2/super.c
47265 @@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
47266 "%10s => GlobalAllocs: %d LocalAllocs: %d "
47267 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
47268 "Stats",
47269 - atomic_read(&osb->alloc_stats.bitmap_data),
47270 - atomic_read(&osb->alloc_stats.local_data),
47271 - atomic_read(&osb->alloc_stats.bg_allocs),
47272 - atomic_read(&osb->alloc_stats.moves),
47273 - atomic_read(&osb->alloc_stats.bg_extends));
47274 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
47275 + atomic_read_unchecked(&osb->alloc_stats.local_data),
47276 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
47277 + atomic_read_unchecked(&osb->alloc_stats.moves),
47278 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
47279
47280 out += snprintf(buf + out, len - out,
47281 "%10s => State: %u Descriptor: %llu Size: %u bits "
47282 @@ -2116,11 +2116,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
47283 spin_lock_init(&osb->osb_xattr_lock);
47284 ocfs2_init_steal_slots(osb);
47285
47286 - atomic_set(&osb->alloc_stats.moves, 0);
47287 - atomic_set(&osb->alloc_stats.local_data, 0);
47288 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
47289 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
47290 - atomic_set(&osb->alloc_stats.bg_extends, 0);
47291 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
47292 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
47293 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
47294 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
47295 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
47296
47297 /* Copy the blockcheck stats from the superblock probe */
47298 osb->osb_ecc_stats = *stats;
47299 diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
47300 index 5d22872..523db20 100644
47301 --- a/fs/ocfs2/symlink.c
47302 +++ b/fs/ocfs2/symlink.c
47303 @@ -142,7 +142,7 @@ bail:
47304
47305 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
47306 {
47307 - char *link = nd_get_link(nd);
47308 + const char *link = nd_get_link(nd);
47309 if (!IS_ERR(link))
47310 kfree(link);
47311 }
47312 diff --git a/fs/open.c b/fs/open.c
47313 index 3f1108b..822d7f7 100644
47314 --- a/fs/open.c
47315 +++ b/fs/open.c
47316 @@ -31,6 +31,8 @@
47317 #include <linux/ima.h>
47318 #include <linux/dnotify.h>
47319
47320 +#define CREATE_TRACE_POINTS
47321 +#include <trace/events/fs.h>
47322 #include "internal.h"
47323
47324 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
47325 @@ -112,6 +114,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
47326 error = locks_verify_truncate(inode, NULL, length);
47327 if (!error)
47328 error = security_path_truncate(&path);
47329 +
47330 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
47331 + error = -EACCES;
47332 +
47333 if (!error)
47334 error = do_truncate(path.dentry, length, 0, NULL);
47335
47336 @@ -358,6 +364,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
47337 if (__mnt_is_readonly(path.mnt))
47338 res = -EROFS;
47339
47340 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
47341 + res = -EACCES;
47342 +
47343 out_path_release:
47344 path_put(&path);
47345 out:
47346 @@ -384,6 +393,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
47347 if (error)
47348 goto dput_and_out;
47349
47350 + gr_log_chdir(path.dentry, path.mnt);
47351 +
47352 set_fs_pwd(current->fs, &path);
47353
47354 dput_and_out:
47355 @@ -410,6 +421,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
47356 goto out_putf;
47357
47358 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
47359 +
47360 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
47361 + error = -EPERM;
47362 +
47363 + if (!error)
47364 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
47365 +
47366 if (!error)
47367 set_fs_pwd(current->fs, &file->f_path);
47368 out_putf:
47369 @@ -438,7 +456,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
47370 if (error)
47371 goto dput_and_out;
47372
47373 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
47374 + goto dput_and_out;
47375 +
47376 set_fs_root(current->fs, &path);
47377 +
47378 + gr_handle_chroot_chdir(&path);
47379 +
47380 error = 0;
47381 dput_and_out:
47382 path_put(&path);
47383 @@ -456,6 +480,16 @@ static int chmod_common(struct path *path, umode_t mode)
47384 if (error)
47385 return error;
47386 mutex_lock(&inode->i_mutex);
47387 +
47388 + if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
47389 + error = -EACCES;
47390 + goto out_unlock;
47391 + }
47392 + if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
47393 + error = -EACCES;
47394 + goto out_unlock;
47395 + }
47396 +
47397 error = security_path_chmod(path, mode);
47398 if (error)
47399 goto out_unlock;
47400 @@ -506,6 +540,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
47401 int error;
47402 struct iattr newattrs;
47403
47404 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
47405 + return -EACCES;
47406 +
47407 newattrs.ia_valid = ATTR_CTIME;
47408 if (user != (uid_t) -1) {
47409 newattrs.ia_valid |= ATTR_UID;
47410 @@ -987,6 +1024,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
47411 } else {
47412 fsnotify_open(f);
47413 fd_install(fd, f);
47414 + trace_do_sys_open(tmp, flags, mode);
47415 }
47416 }
47417 putname(tmp);
47418 diff --git a/fs/pipe.c b/fs/pipe.c
47419 index fec5e4a..f4210f9 100644
47420 --- a/fs/pipe.c
47421 +++ b/fs/pipe.c
47422 @@ -438,9 +438,9 @@ redo:
47423 }
47424 if (bufs) /* More to do? */
47425 continue;
47426 - if (!pipe->writers)
47427 + if (!atomic_read(&pipe->writers))
47428 break;
47429 - if (!pipe->waiting_writers) {
47430 + if (!atomic_read(&pipe->waiting_writers)) {
47431 /* syscall merging: Usually we must not sleep
47432 * if O_NONBLOCK is set, or if we got some data.
47433 * But if a writer sleeps in kernel space, then
47434 @@ -504,7 +504,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
47435 mutex_lock(&inode->i_mutex);
47436 pipe = inode->i_pipe;
47437
47438 - if (!pipe->readers) {
47439 + if (!atomic_read(&pipe->readers)) {
47440 send_sig(SIGPIPE, current, 0);
47441 ret = -EPIPE;
47442 goto out;
47443 @@ -553,7 +553,7 @@ redo1:
47444 for (;;) {
47445 int bufs;
47446
47447 - if (!pipe->readers) {
47448 + if (!atomic_read(&pipe->readers)) {
47449 send_sig(SIGPIPE, current, 0);
47450 if (!ret)
47451 ret = -EPIPE;
47452 @@ -644,9 +644,9 @@ redo2:
47453 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
47454 do_wakeup = 0;
47455 }
47456 - pipe->waiting_writers++;
47457 + atomic_inc(&pipe->waiting_writers);
47458 pipe_wait(pipe);
47459 - pipe->waiting_writers--;
47460 + atomic_dec(&pipe->waiting_writers);
47461 }
47462 out:
47463 mutex_unlock(&inode->i_mutex);
47464 @@ -713,7 +713,7 @@ pipe_poll(struct file *filp, poll_table *wait)
47465 mask = 0;
47466 if (filp->f_mode & FMODE_READ) {
47467 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
47468 - if (!pipe->writers && filp->f_version != pipe->w_counter)
47469 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
47470 mask |= POLLHUP;
47471 }
47472
47473 @@ -723,7 +723,7 @@ pipe_poll(struct file *filp, poll_table *wait)
47474 * Most Unices do not set POLLERR for FIFOs but on Linux they
47475 * behave exactly like pipes for poll().
47476 */
47477 - if (!pipe->readers)
47478 + if (!atomic_read(&pipe->readers))
47479 mask |= POLLERR;
47480 }
47481
47482 @@ -737,10 +737,10 @@ pipe_release(struct inode *inode, int decr, int decw)
47483
47484 mutex_lock(&inode->i_mutex);
47485 pipe = inode->i_pipe;
47486 - pipe->readers -= decr;
47487 - pipe->writers -= decw;
47488 + atomic_sub(decr, &pipe->readers);
47489 + atomic_sub(decw, &pipe->writers);
47490
47491 - if (!pipe->readers && !pipe->writers) {
47492 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
47493 free_pipe_info(inode);
47494 } else {
47495 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
47496 @@ -830,7 +830,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
47497
47498 if (inode->i_pipe) {
47499 ret = 0;
47500 - inode->i_pipe->readers++;
47501 + atomic_inc(&inode->i_pipe->readers);
47502 }
47503
47504 mutex_unlock(&inode->i_mutex);
47505 @@ -847,7 +847,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
47506
47507 if (inode->i_pipe) {
47508 ret = 0;
47509 - inode->i_pipe->writers++;
47510 + atomic_inc(&inode->i_pipe->writers);
47511 }
47512
47513 mutex_unlock(&inode->i_mutex);
47514 @@ -865,9 +865,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
47515 if (inode->i_pipe) {
47516 ret = 0;
47517 if (filp->f_mode & FMODE_READ)
47518 - inode->i_pipe->readers++;
47519 + atomic_inc(&inode->i_pipe->readers);
47520 if (filp->f_mode & FMODE_WRITE)
47521 - inode->i_pipe->writers++;
47522 + atomic_inc(&inode->i_pipe->writers);
47523 }
47524
47525 mutex_unlock(&inode->i_mutex);
47526 @@ -959,7 +959,7 @@ void free_pipe_info(struct inode *inode)
47527 inode->i_pipe = NULL;
47528 }
47529
47530 -static struct vfsmount *pipe_mnt __read_mostly;
47531 +struct vfsmount *pipe_mnt __read_mostly;
47532
47533 /*
47534 * pipefs_dname() is called from d_path().
47535 @@ -989,7 +989,8 @@ static struct inode * get_pipe_inode(void)
47536 goto fail_iput;
47537 inode->i_pipe = pipe;
47538
47539 - pipe->readers = pipe->writers = 1;
47540 + atomic_set(&pipe->readers, 1);
47541 + atomic_set(&pipe->writers, 1);
47542 inode->i_fop = &rdwr_pipefifo_fops;
47543
47544 /*
47545 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
47546 index 15af622..0e9f4467 100644
47547 --- a/fs/proc/Kconfig
47548 +++ b/fs/proc/Kconfig
47549 @@ -30,12 +30,12 @@ config PROC_FS
47550
47551 config PROC_KCORE
47552 bool "/proc/kcore support" if !ARM
47553 - depends on PROC_FS && MMU
47554 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
47555
47556 config PROC_VMCORE
47557 bool "/proc/vmcore support"
47558 - depends on PROC_FS && CRASH_DUMP
47559 - default y
47560 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
47561 + default n
47562 help
47563 Exports the dump image of crashed kernel in ELF format.
47564
47565 @@ -59,8 +59,8 @@ config PROC_SYSCTL
47566 limited in memory.
47567
47568 config PROC_PAGE_MONITOR
47569 - default y
47570 - depends on PROC_FS && MMU
47571 + default n
47572 + depends on PROC_FS && MMU && !GRKERNSEC
47573 bool "Enable /proc page monitoring" if EXPERT
47574 help
47575 Various /proc files exist to monitor process memory utilization:
47576 diff --git a/fs/proc/array.c b/fs/proc/array.c
47577 index f9bd395..acb7847 100644
47578 --- a/fs/proc/array.c
47579 +++ b/fs/proc/array.c
47580 @@ -60,6 +60,7 @@
47581 #include <linux/tty.h>
47582 #include <linux/string.h>
47583 #include <linux/mman.h>
47584 +#include <linux/grsecurity.h>
47585 #include <linux/proc_fs.h>
47586 #include <linux/ioport.h>
47587 #include <linux/uaccess.h>
47588 @@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
47589 seq_putc(m, '\n');
47590 }
47591
47592 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47593 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
47594 +{
47595 + if (p->mm)
47596 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
47597 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
47598 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
47599 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
47600 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
47601 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
47602 + else
47603 + seq_printf(m, "PaX:\t-----\n");
47604 +}
47605 +#endif
47606 +
47607 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47608 struct pid *pid, struct task_struct *task)
47609 {
47610 @@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47611 task_cpus_allowed(m, task);
47612 cpuset_task_status_allowed(m, task);
47613 task_context_switch_counts(m, task);
47614 +
47615 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47616 + task_pax(m, task);
47617 +#endif
47618 +
47619 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
47620 + task_grsec_rbac(m, task);
47621 +#endif
47622 +
47623 return 0;
47624 }
47625
47626 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47627 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47628 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
47629 + _mm->pax_flags & MF_PAX_SEGMEXEC))
47630 +#endif
47631 +
47632 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47633 struct pid *pid, struct task_struct *task, int whole)
47634 {
47635 @@ -378,6 +409,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47636 char tcomm[sizeof(task->comm)];
47637 unsigned long flags;
47638
47639 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47640 + if (current->exec_id != m->exec_id) {
47641 + gr_log_badprocpid("stat");
47642 + return 0;
47643 + }
47644 +#endif
47645 +
47646 state = *get_task_state(task);
47647 vsize = eip = esp = 0;
47648 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
47649 @@ -449,6 +487,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47650 gtime = task->gtime;
47651 }
47652
47653 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47654 + if (PAX_RAND_FLAGS(mm)) {
47655 + eip = 0;
47656 + esp = 0;
47657 + wchan = 0;
47658 + }
47659 +#endif
47660 +#ifdef CONFIG_GRKERNSEC_HIDESYM
47661 + wchan = 0;
47662 + eip =0;
47663 + esp =0;
47664 +#endif
47665 +
47666 /* scale priority and nice values from timeslices to -20..20 */
47667 /* to make it look like a "normal" Unix priority/nice value */
47668 priority = task_prio(task);
47669 @@ -485,9 +536,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47670 seq_put_decimal_ull(m, ' ', vsize);
47671 seq_put_decimal_ll(m, ' ', mm ? get_mm_rss(mm) : 0);
47672 seq_put_decimal_ull(m, ' ', rsslim);
47673 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47674 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
47675 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
47676 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
47677 +#else
47678 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
47679 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
47680 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
47681 +#endif
47682 seq_put_decimal_ull(m, ' ', esp);
47683 seq_put_decimal_ull(m, ' ', eip);
47684 /* The signal information here is obsolete.
47685 @@ -508,9 +565,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47686 seq_put_decimal_ull(m, ' ', delayacct_blkio_ticks(task));
47687 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
47688 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
47689 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47690 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((mm && permitted) ? mm->start_data : 0));
47691 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((mm && permitted) ? mm->end_data : 0));
47692 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((mm && permitted) ? mm->start_brk : 0));
47693 +#else
47694 seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->start_data : 0);
47695 seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->end_data : 0);
47696 seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->start_brk : 0);
47697 +#endif
47698 seq_putc(m, '\n');
47699 if (mm)
47700 mmput(mm);
47701 @@ -533,8 +596,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47702 struct pid *pid, struct task_struct *task)
47703 {
47704 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
47705 - struct mm_struct *mm = get_task_mm(task);
47706 + struct mm_struct *mm;
47707
47708 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47709 + if (current->exec_id != m->exec_id) {
47710 + gr_log_badprocpid("statm");
47711 + return 0;
47712 + }
47713 +#endif
47714 + mm = get_task_mm(task);
47715 if (mm) {
47716 size = task_statm(mm, &shared, &text, &data, &resident);
47717 mmput(mm);
47718 @@ -556,3 +626,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
47719
47720 return 0;
47721 }
47722 +
47723 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
47724 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
47725 +{
47726 + u32 curr_ip = 0;
47727 + unsigned long flags;
47728 +
47729 + if (lock_task_sighand(task, &flags)) {
47730 + curr_ip = task->signal->curr_ip;
47731 + unlock_task_sighand(task, &flags);
47732 + }
47733 +
47734 + return sprintf(buffer, "%pI4\n", &curr_ip);
47735 +}
47736 +#endif
47737 diff --git a/fs/proc/base.c b/fs/proc/base.c
47738 index 9fc77b4..04761b8 100644
47739 --- a/fs/proc/base.c
47740 +++ b/fs/proc/base.c
47741 @@ -109,6 +109,14 @@ struct pid_entry {
47742 union proc_op op;
47743 };
47744
47745 +struct getdents_callback {
47746 + struct linux_dirent __user * current_dir;
47747 + struct linux_dirent __user * previous;
47748 + struct file * file;
47749 + int count;
47750 + int error;
47751 +};
47752 +
47753 #define NOD(NAME, MODE, IOP, FOP, OP) { \
47754 .name = (NAME), \
47755 .len = sizeof(NAME) - 1, \
47756 @@ -213,6 +221,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
47757 if (!mm->arg_end)
47758 goto out_mm; /* Shh! No looking before we're done */
47759
47760 + if (gr_acl_handle_procpidmem(task))
47761 + goto out_mm;
47762 +
47763 len = mm->arg_end - mm->arg_start;
47764
47765 if (len > PAGE_SIZE)
47766 @@ -240,12 +251,28 @@ out:
47767 return res;
47768 }
47769
47770 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47771 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47772 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
47773 + _mm->pax_flags & MF_PAX_SEGMEXEC))
47774 +#endif
47775 +
47776 static int proc_pid_auxv(struct task_struct *task, char *buffer)
47777 {
47778 struct mm_struct *mm = mm_for_maps(task);
47779 int res = PTR_ERR(mm);
47780 if (mm && !IS_ERR(mm)) {
47781 unsigned int nwords = 0;
47782 +
47783 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47784 + /* allow if we're currently ptracing this task */
47785 + if (PAX_RAND_FLAGS(mm) &&
47786 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
47787 + mmput(mm);
47788 + return 0;
47789 + }
47790 +#endif
47791 +
47792 do {
47793 nwords += 2;
47794 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
47795 @@ -259,7 +286,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
47796 }
47797
47798
47799 -#ifdef CONFIG_KALLSYMS
47800 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47801 /*
47802 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
47803 * Returns the resolved symbol. If that fails, simply return the address.
47804 @@ -298,7 +325,7 @@ static void unlock_trace(struct task_struct *task)
47805 mutex_unlock(&task->signal->cred_guard_mutex);
47806 }
47807
47808 -#ifdef CONFIG_STACKTRACE
47809 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47810
47811 #define MAX_STACK_TRACE_DEPTH 64
47812
47813 @@ -489,7 +516,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
47814 return count;
47815 }
47816
47817 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
47818 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47819 static int proc_pid_syscall(struct task_struct *task, char *buffer)
47820 {
47821 long nr;
47822 @@ -518,7 +545,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
47823 /************************************************************************/
47824
47825 /* permission checks */
47826 -static int proc_fd_access_allowed(struct inode *inode)
47827 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
47828 {
47829 struct task_struct *task;
47830 int allowed = 0;
47831 @@ -528,7 +555,10 @@ static int proc_fd_access_allowed(struct inode *inode)
47832 */
47833 task = get_proc_task(inode);
47834 if (task) {
47835 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
47836 + if (log)
47837 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
47838 + else
47839 + allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
47840 put_task_struct(task);
47841 }
47842 return allowed;
47843 @@ -566,10 +596,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
47844 struct task_struct *task,
47845 int hide_pid_min)
47846 {
47847 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47848 + return false;
47849 +
47850 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47851 + rcu_read_lock();
47852 + {
47853 + const struct cred *tmpcred = current_cred();
47854 + const struct cred *cred = __task_cred(task);
47855 +
47856 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
47857 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47858 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
47859 +#endif
47860 + ) {
47861 + rcu_read_unlock();
47862 + return true;
47863 + }
47864 + }
47865 + rcu_read_unlock();
47866 +
47867 + if (!pid->hide_pid)
47868 + return false;
47869 +#endif
47870 +
47871 if (pid->hide_pid < hide_pid_min)
47872 return true;
47873 if (in_group_p(pid->pid_gid))
47874 return true;
47875 +
47876 return ptrace_may_access(task, PTRACE_MODE_READ);
47877 }
47878
47879 @@ -587,7 +642,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
47880 put_task_struct(task);
47881
47882 if (!has_perms) {
47883 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47884 + {
47885 +#else
47886 if (pid->hide_pid == 2) {
47887 +#endif
47888 /*
47889 * Let's make getdents(), stat(), and open()
47890 * consistent with each other. If a process
47891 @@ -702,6 +761,10 @@ static int mem_open(struct inode* inode, struct file* file)
47892 file->f_mode |= FMODE_UNSIGNED_OFFSET;
47893 file->private_data = mm;
47894
47895 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47896 + file->f_version = current->exec_id;
47897 +#endif
47898 +
47899 return 0;
47900 }
47901
47902 @@ -713,6 +776,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
47903 ssize_t copied;
47904 char *page;
47905
47906 +#ifdef CONFIG_GRKERNSEC
47907 + if (write)
47908 + return -EPERM;
47909 +#endif
47910 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47911 + if (file->f_version != current->exec_id) {
47912 + gr_log_badprocpid("mem");
47913 + return 0;
47914 + }
47915 +#endif
47916 +
47917 if (!mm)
47918 return 0;
47919
47920 @@ -813,6 +887,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
47921 if (!task)
47922 goto out_no_task;
47923
47924 + if (gr_acl_handle_procpidmem(task))
47925 + goto out;
47926 +
47927 ret = -ENOMEM;
47928 page = (char *)__get_free_page(GFP_TEMPORARY);
47929 if (!page)
47930 @@ -1433,7 +1510,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
47931 path_put(&nd->path);
47932
47933 /* Are we allowed to snoop on the tasks file descriptors? */
47934 - if (!proc_fd_access_allowed(inode))
47935 + if (!proc_fd_access_allowed(inode, 0))
47936 goto out;
47937
47938 error = PROC_I(inode)->op.proc_get_link(dentry, &nd->path);
47939 @@ -1472,8 +1549,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
47940 struct path path;
47941
47942 /* Are we allowed to snoop on the tasks file descriptors? */
47943 - if (!proc_fd_access_allowed(inode))
47944 - goto out;
47945 + /* logging this is needed for learning on chromium to work properly,
47946 + but we don't want to flood the logs from 'ps' which does a readlink
47947 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
47948 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
47949 + */
47950 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
47951 + if (!proc_fd_access_allowed(inode,0))
47952 + goto out;
47953 + } else {
47954 + if (!proc_fd_access_allowed(inode,1))
47955 + goto out;
47956 + }
47957
47958 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
47959 if (error)
47960 @@ -1538,7 +1625,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
47961 rcu_read_lock();
47962 cred = __task_cred(task);
47963 inode->i_uid = cred->euid;
47964 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47965 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
47966 +#else
47967 inode->i_gid = cred->egid;
47968 +#endif
47969 rcu_read_unlock();
47970 }
47971 security_task_to_inode(task, inode);
47972 @@ -1574,10 +1665,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
47973 return -ENOENT;
47974 }
47975 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
47976 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47977 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
47978 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47979 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
47980 +#endif
47981 task_dumpable(task)) {
47982 cred = __task_cred(task);
47983 stat->uid = cred->euid;
47984 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47985 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
47986 +#else
47987 stat->gid = cred->egid;
47988 +#endif
47989 }
47990 }
47991 rcu_read_unlock();
47992 @@ -1615,11 +1715,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
47993
47994 if (task) {
47995 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
47996 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47997 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
47998 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47999 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
48000 +#endif
48001 task_dumpable(task)) {
48002 rcu_read_lock();
48003 cred = __task_cred(task);
48004 inode->i_uid = cred->euid;
48005 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48006 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48007 +#else
48008 inode->i_gid = cred->egid;
48009 +#endif
48010 rcu_read_unlock();
48011 } else {
48012 inode->i_uid = 0;
48013 @@ -1737,7 +1846,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
48014 int fd = proc_fd(inode);
48015
48016 if (task) {
48017 - files = get_files_struct(task);
48018 + if (!gr_acl_handle_procpidmem(task))
48019 + files = get_files_struct(task);
48020 put_task_struct(task);
48021 }
48022 if (files) {
48023 @@ -2338,11 +2448,21 @@ static const struct file_operations proc_map_files_operations = {
48024 */
48025 static int proc_fd_permission(struct inode *inode, int mask)
48026 {
48027 + struct task_struct *task;
48028 int rv = generic_permission(inode, mask);
48029 - if (rv == 0)
48030 - return 0;
48031 +
48032 if (task_pid(current) == proc_pid(inode))
48033 rv = 0;
48034 +
48035 + task = get_proc_task(inode);
48036 + if (task == NULL)
48037 + return rv;
48038 +
48039 + if (gr_acl_handle_procpidmem(task))
48040 + rv = -EACCES;
48041 +
48042 + put_task_struct(task);
48043 +
48044 return rv;
48045 }
48046
48047 @@ -2452,6 +2572,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
48048 if (!task)
48049 goto out_no_task;
48050
48051 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48052 + goto out;
48053 +
48054 /*
48055 * Yes, it does not scale. And it should not. Don't add
48056 * new entries into /proc/<tgid>/ without very good reasons.
48057 @@ -2496,6 +2619,9 @@ static int proc_pident_readdir(struct file *filp,
48058 if (!task)
48059 goto out_no_task;
48060
48061 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48062 + goto out;
48063 +
48064 ret = 0;
48065 i = filp->f_pos;
48066 switch (i) {
48067 @@ -2766,7 +2892,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
48068 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
48069 void *cookie)
48070 {
48071 - char *s = nd_get_link(nd);
48072 + const char *s = nd_get_link(nd);
48073 if (!IS_ERR(s))
48074 __putname(s);
48075 }
48076 @@ -2967,7 +3093,7 @@ static const struct pid_entry tgid_base_stuff[] = {
48077 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
48078 #endif
48079 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
48080 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
48081 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
48082 INF("syscall", S_IRUGO, proc_pid_syscall),
48083 #endif
48084 INF("cmdline", S_IRUGO, proc_pid_cmdline),
48085 @@ -2992,10 +3118,10 @@ static const struct pid_entry tgid_base_stuff[] = {
48086 #ifdef CONFIG_SECURITY
48087 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
48088 #endif
48089 -#ifdef CONFIG_KALLSYMS
48090 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48091 INF("wchan", S_IRUGO, proc_pid_wchan),
48092 #endif
48093 -#ifdef CONFIG_STACKTRACE
48094 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48095 ONE("stack", S_IRUGO, proc_pid_stack),
48096 #endif
48097 #ifdef CONFIG_SCHEDSTATS
48098 @@ -3029,6 +3155,9 @@ static const struct pid_entry tgid_base_stuff[] = {
48099 #ifdef CONFIG_HARDWALL
48100 INF("hardwall", S_IRUGO, proc_pid_hardwall),
48101 #endif
48102 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
48103 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
48104 +#endif
48105 };
48106
48107 static int proc_tgid_base_readdir(struct file * filp,
48108 @@ -3155,7 +3284,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
48109 if (!inode)
48110 goto out;
48111
48112 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48113 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
48114 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48115 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48116 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
48117 +#else
48118 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
48119 +#endif
48120 inode->i_op = &proc_tgid_base_inode_operations;
48121 inode->i_fop = &proc_tgid_base_operations;
48122 inode->i_flags|=S_IMMUTABLE;
48123 @@ -3197,7 +3333,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
48124 if (!task)
48125 goto out;
48126
48127 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48128 + goto out_put_task;
48129 +
48130 result = proc_pid_instantiate(dir, dentry, task, NULL);
48131 +out_put_task:
48132 put_task_struct(task);
48133 out:
48134 return result;
48135 @@ -3260,6 +3400,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
48136 static int fake_filldir(void *buf, const char *name, int namelen,
48137 loff_t offset, u64 ino, unsigned d_type)
48138 {
48139 + struct getdents_callback * __buf = (struct getdents_callback *) buf;
48140 + __buf->error = -EINVAL;
48141 return 0;
48142 }
48143
48144 @@ -3326,7 +3468,7 @@ static const struct pid_entry tid_base_stuff[] = {
48145 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
48146 #endif
48147 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
48148 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
48149 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
48150 INF("syscall", S_IRUGO, proc_pid_syscall),
48151 #endif
48152 INF("cmdline", S_IRUGO, proc_pid_cmdline),
48153 @@ -3350,10 +3492,10 @@ static const struct pid_entry tid_base_stuff[] = {
48154 #ifdef CONFIG_SECURITY
48155 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
48156 #endif
48157 -#ifdef CONFIG_KALLSYMS
48158 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48159 INF("wchan", S_IRUGO, proc_pid_wchan),
48160 #endif
48161 -#ifdef CONFIG_STACKTRACE
48162 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
48163 ONE("stack", S_IRUGO, proc_pid_stack),
48164 #endif
48165 #ifdef CONFIG_SCHEDSTATS
48166 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
48167 index 82676e3..5f8518a 100644
48168 --- a/fs/proc/cmdline.c
48169 +++ b/fs/proc/cmdline.c
48170 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
48171
48172 static int __init proc_cmdline_init(void)
48173 {
48174 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
48175 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
48176 +#else
48177 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
48178 +#endif
48179 return 0;
48180 }
48181 module_init(proc_cmdline_init);
48182 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
48183 index b143471..bb105e5 100644
48184 --- a/fs/proc/devices.c
48185 +++ b/fs/proc/devices.c
48186 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
48187
48188 static int __init proc_devices_init(void)
48189 {
48190 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
48191 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
48192 +#else
48193 proc_create("devices", 0, NULL, &proc_devinfo_operations);
48194 +#endif
48195 return 0;
48196 }
48197 module_init(proc_devices_init);
48198 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
48199 index 205c922..2ee4c57 100644
48200 --- a/fs/proc/inode.c
48201 +++ b/fs/proc/inode.c
48202 @@ -21,11 +21,17 @@
48203 #include <linux/seq_file.h>
48204 #include <linux/slab.h>
48205 #include <linux/mount.h>
48206 +#include <linux/grsecurity.h>
48207
48208 #include <asm/uaccess.h>
48209
48210 #include "internal.h"
48211
48212 +#ifdef CONFIG_PROC_SYSCTL
48213 +extern const struct inode_operations proc_sys_inode_operations;
48214 +extern const struct inode_operations proc_sys_dir_operations;
48215 +#endif
48216 +
48217 static void proc_evict_inode(struct inode *inode)
48218 {
48219 struct proc_dir_entry *de;
48220 @@ -51,6 +57,13 @@ static void proc_evict_inode(struct inode *inode)
48221 ns_ops = PROC_I(inode)->ns_ops;
48222 if (ns_ops && ns_ops->put)
48223 ns_ops->put(PROC_I(inode)->ns);
48224 +
48225 +#ifdef CONFIG_PROC_SYSCTL
48226 + if (inode->i_op == &proc_sys_inode_operations ||
48227 + inode->i_op == &proc_sys_dir_operations)
48228 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
48229 +#endif
48230 +
48231 }
48232
48233 static struct kmem_cache * proc_inode_cachep;
48234 @@ -456,7 +469,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
48235 if (de->mode) {
48236 inode->i_mode = de->mode;
48237 inode->i_uid = de->uid;
48238 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48239 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48240 +#else
48241 inode->i_gid = de->gid;
48242 +#endif
48243 }
48244 if (de->size)
48245 inode->i_size = de->size;
48246 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
48247 index 5f79bb8..eeccee4 100644
48248 --- a/fs/proc/internal.h
48249 +++ b/fs/proc/internal.h
48250 @@ -54,6 +54,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
48251 struct pid *pid, struct task_struct *task);
48252 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
48253 struct pid *pid, struct task_struct *task);
48254 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
48255 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
48256 +#endif
48257 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
48258
48259 extern const struct file_operations proc_pid_maps_operations;
48260 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
48261 index 86c67ee..cdca321 100644
48262 --- a/fs/proc/kcore.c
48263 +++ b/fs/proc/kcore.c
48264 @@ -480,9 +480,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
48265 * the addresses in the elf_phdr on our list.
48266 */
48267 start = kc_offset_to_vaddr(*fpos - elf_buflen);
48268 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
48269 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
48270 + if (tsz > buflen)
48271 tsz = buflen;
48272 -
48273 +
48274 while (buflen) {
48275 struct kcore_list *m;
48276
48277 @@ -511,20 +512,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
48278 kfree(elf_buf);
48279 } else {
48280 if (kern_addr_valid(start)) {
48281 - unsigned long n;
48282 + char *elf_buf;
48283 + mm_segment_t oldfs;
48284
48285 - n = copy_to_user(buffer, (char *)start, tsz);
48286 - /*
48287 - * We cannot distinguish between fault on source
48288 - * and fault on destination. When this happens
48289 - * we clear too and hope it will trigger the
48290 - * EFAULT again.
48291 - */
48292 - if (n) {
48293 - if (clear_user(buffer + tsz - n,
48294 - n))
48295 + elf_buf = kmalloc(tsz, GFP_KERNEL);
48296 + if (!elf_buf)
48297 + return -ENOMEM;
48298 + oldfs = get_fs();
48299 + set_fs(KERNEL_DS);
48300 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
48301 + set_fs(oldfs);
48302 + if (copy_to_user(buffer, elf_buf, tsz)) {
48303 + kfree(elf_buf);
48304 return -EFAULT;
48305 + }
48306 }
48307 + set_fs(oldfs);
48308 + kfree(elf_buf);
48309 } else {
48310 if (clear_user(buffer, tsz))
48311 return -EFAULT;
48312 @@ -544,6 +548,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
48313
48314 static int open_kcore(struct inode *inode, struct file *filp)
48315 {
48316 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
48317 + return -EPERM;
48318 +#endif
48319 if (!capable(CAP_SYS_RAWIO))
48320 return -EPERM;
48321 if (kcore_need_update)
48322 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
48323 index 80e4645..53e5fcf 100644
48324 --- a/fs/proc/meminfo.c
48325 +++ b/fs/proc/meminfo.c
48326 @@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
48327 vmi.used >> 10,
48328 vmi.largest_chunk >> 10
48329 #ifdef CONFIG_MEMORY_FAILURE
48330 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
48331 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
48332 #endif
48333 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
48334 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
48335 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
48336 index b1822dd..df622cb 100644
48337 --- a/fs/proc/nommu.c
48338 +++ b/fs/proc/nommu.c
48339 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
48340 if (len < 1)
48341 len = 1;
48342 seq_printf(m, "%*c", len, ' ');
48343 - seq_path(m, &file->f_path, "");
48344 + seq_path(m, &file->f_path, "\n\\");
48345 }
48346
48347 seq_putc(m, '\n');
48348 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
48349 index 06e1cc1..177cd98 100644
48350 --- a/fs/proc/proc_net.c
48351 +++ b/fs/proc/proc_net.c
48352 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
48353 struct task_struct *task;
48354 struct nsproxy *ns;
48355 struct net *net = NULL;
48356 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48357 + const struct cred *cred = current_cred();
48358 +#endif
48359 +
48360 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48361 + if (cred->fsuid)
48362 + return net;
48363 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48364 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
48365 + return net;
48366 +#endif
48367
48368 rcu_read_lock();
48369 task = pid_task(proc_pid(dir), PIDTYPE_PID);
48370 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
48371 index 21d836f..bebf3ee 100644
48372 --- a/fs/proc/proc_sysctl.c
48373 +++ b/fs/proc/proc_sysctl.c
48374 @@ -12,11 +12,15 @@
48375 #include <linux/module.h>
48376 #include "internal.h"
48377
48378 +extern int gr_handle_chroot_sysctl(const int op);
48379 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
48380 + const int op);
48381 +
48382 static const struct dentry_operations proc_sys_dentry_operations;
48383 static const struct file_operations proc_sys_file_operations;
48384 -static const struct inode_operations proc_sys_inode_operations;
48385 +const struct inode_operations proc_sys_inode_operations;
48386 static const struct file_operations proc_sys_dir_file_operations;
48387 -static const struct inode_operations proc_sys_dir_operations;
48388 +const struct inode_operations proc_sys_dir_operations;
48389
48390 void proc_sys_poll_notify(struct ctl_table_poll *poll)
48391 {
48392 @@ -470,8 +474,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
48393
48394 err = NULL;
48395 d_set_d_op(dentry, &proc_sys_dentry_operations);
48396 +
48397 + gr_handle_proc_create(dentry, inode);
48398 +
48399 d_add(dentry, inode);
48400
48401 + if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt))
48402 + err = ERR_PTR(-ENOENT);
48403 +
48404 out:
48405 sysctl_head_finish(head);
48406 return err;
48407 @@ -483,18 +493,20 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
48408 struct inode *inode = filp->f_path.dentry->d_inode;
48409 struct ctl_table_header *head = grab_header(inode);
48410 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
48411 + int op = write ? MAY_WRITE : MAY_READ;
48412 ssize_t error;
48413 size_t res;
48414
48415 if (IS_ERR(head))
48416 return PTR_ERR(head);
48417
48418 +
48419 /*
48420 * At this point we know that the sysctl was not unregistered
48421 * and won't be until we finish.
48422 */
48423 error = -EPERM;
48424 - if (sysctl_perm(head->root, table, write ? MAY_WRITE : MAY_READ))
48425 + if (sysctl_perm(head->root, table, op))
48426 goto out;
48427
48428 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
48429 @@ -502,6 +514,22 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
48430 if (!table->proc_handler)
48431 goto out;
48432
48433 +#ifdef CONFIG_GRKERNSEC
48434 + error = -EPERM;
48435 + if (gr_handle_chroot_sysctl(op))
48436 + goto out;
48437 + dget(filp->f_path.dentry);
48438 + if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
48439 + dput(filp->f_path.dentry);
48440 + goto out;
48441 + }
48442 + dput(filp->f_path.dentry);
48443 + if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
48444 + goto out;
48445 + if (write && !capable(CAP_SYS_ADMIN))
48446 + goto out;
48447 +#endif
48448 +
48449 /* careful: calling conventions are nasty here */
48450 res = count;
48451 error = table->proc_handler(table, write, buf, &res, ppos);
48452 @@ -599,6 +627,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
48453 return -ENOMEM;
48454 } else {
48455 d_set_d_op(child, &proc_sys_dentry_operations);
48456 +
48457 + gr_handle_proc_create(child, inode);
48458 +
48459 d_add(child, inode);
48460 }
48461 } else {
48462 @@ -642,6 +673,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
48463 if ((*pos)++ < file->f_pos)
48464 return 0;
48465
48466 + if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
48467 + return 0;
48468 +
48469 if (unlikely(S_ISLNK(table->mode)))
48470 res = proc_sys_link_fill_cache(file, dirent, filldir, head, table);
48471 else
48472 @@ -759,6 +793,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
48473 if (IS_ERR(head))
48474 return PTR_ERR(head);
48475
48476 + if (table && !gr_acl_handle_hidden_file(dentry, mnt))
48477 + return -ENOENT;
48478 +
48479 generic_fillattr(inode, stat);
48480 if (table)
48481 stat->mode = (stat->mode & S_IFMT) | table->mode;
48482 @@ -781,13 +818,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
48483 .llseek = generic_file_llseek,
48484 };
48485
48486 -static const struct inode_operations proc_sys_inode_operations = {
48487 +const struct inode_operations proc_sys_inode_operations = {
48488 .permission = proc_sys_permission,
48489 .setattr = proc_sys_setattr,
48490 .getattr = proc_sys_getattr,
48491 };
48492
48493 -static const struct inode_operations proc_sys_dir_operations = {
48494 +const struct inode_operations proc_sys_dir_operations = {
48495 .lookup = proc_sys_lookup,
48496 .permission = proc_sys_permission,
48497 .setattr = proc_sys_setattr,
48498 diff --git a/fs/proc/root.c b/fs/proc/root.c
48499 index eed44bf..abeb499 100644
48500 --- a/fs/proc/root.c
48501 +++ b/fs/proc/root.c
48502 @@ -188,7 +188,15 @@ void __init proc_root_init(void)
48503 #ifdef CONFIG_PROC_DEVICETREE
48504 proc_device_tree_init();
48505 #endif
48506 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
48507 +#ifdef CONFIG_GRKERNSEC_PROC_USER
48508 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
48509 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48510 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
48511 +#endif
48512 +#else
48513 proc_mkdir("bus", NULL);
48514 +#endif
48515 proc_sys_init();
48516 }
48517
48518 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
48519 index 7faaf2a..096c28b 100644
48520 --- a/fs/proc/task_mmu.c
48521 +++ b/fs/proc/task_mmu.c
48522 @@ -11,12 +11,19 @@
48523 #include <linux/rmap.h>
48524 #include <linux/swap.h>
48525 #include <linux/swapops.h>
48526 +#include <linux/grsecurity.h>
48527
48528 #include <asm/elf.h>
48529 #include <asm/uaccess.h>
48530 #include <asm/tlbflush.h>
48531 #include "internal.h"
48532
48533 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48534 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
48535 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
48536 + _mm->pax_flags & MF_PAX_SEGMEXEC))
48537 +#endif
48538 +
48539 void task_mem(struct seq_file *m, struct mm_struct *mm)
48540 {
48541 unsigned long data, text, lib, swap;
48542 @@ -52,8 +59,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48543 "VmExe:\t%8lu kB\n"
48544 "VmLib:\t%8lu kB\n"
48545 "VmPTE:\t%8lu kB\n"
48546 - "VmSwap:\t%8lu kB\n",
48547 - hiwater_vm << (PAGE_SHIFT-10),
48548 + "VmSwap:\t%8lu kB\n"
48549 +
48550 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
48551 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
48552 +#endif
48553 +
48554 + ,hiwater_vm << (PAGE_SHIFT-10),
48555 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
48556 mm->locked_vm << (PAGE_SHIFT-10),
48557 mm->pinned_vm << (PAGE_SHIFT-10),
48558 @@ -62,7 +74,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48559 data << (PAGE_SHIFT-10),
48560 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
48561 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
48562 - swap << (PAGE_SHIFT-10));
48563 + swap << (PAGE_SHIFT-10)
48564 +
48565 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
48566 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48567 + , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
48568 + , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
48569 +#else
48570 + , mm->context.user_cs_base
48571 + , mm->context.user_cs_limit
48572 +#endif
48573 +#endif
48574 +
48575 + );
48576 }
48577
48578 unsigned long task_vsize(struct mm_struct *mm)
48579 @@ -231,13 +255,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
48580 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
48581 }
48582
48583 - /* We don't show the stack guard page in /proc/maps */
48584 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48585 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
48586 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
48587 +#else
48588 start = vma->vm_start;
48589 - if (stack_guard_page_start(vma, start))
48590 - start += PAGE_SIZE;
48591 end = vma->vm_end;
48592 - if (stack_guard_page_end(vma, end))
48593 - end -= PAGE_SIZE;
48594 +#endif
48595
48596 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
48597 start,
48598 @@ -246,7 +270,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
48599 flags & VM_WRITE ? 'w' : '-',
48600 flags & VM_EXEC ? 'x' : '-',
48601 flags & VM_MAYSHARE ? 's' : 'p',
48602 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48603 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
48604 +#else
48605 pgoff,
48606 +#endif
48607 MAJOR(dev), MINOR(dev), ino, &len);
48608
48609 /*
48610 @@ -255,7 +283,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
48611 */
48612 if (file) {
48613 pad_len_spaces(m, len);
48614 - seq_path(m, &file->f_path, "\n");
48615 + seq_path(m, &file->f_path, "\n\\");
48616 goto done;
48617 }
48618
48619 @@ -281,8 +309,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
48620 * Thread stack in /proc/PID/task/TID/maps or
48621 * the main process stack.
48622 */
48623 - if (!is_pid || (vma->vm_start <= mm->start_stack &&
48624 - vma->vm_end >= mm->start_stack)) {
48625 + if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
48626 + (vma->vm_start <= mm->start_stack &&
48627 + vma->vm_end >= mm->start_stack)) {
48628 name = "[stack]";
48629 } else {
48630 /* Thread stack in /proc/PID/maps */
48631 @@ -306,6 +335,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
48632 struct proc_maps_private *priv = m->private;
48633 struct task_struct *task = priv->task;
48634
48635 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48636 + if (current->exec_id != m->exec_id) {
48637 + gr_log_badprocpid("maps");
48638 + return 0;
48639 + }
48640 +#endif
48641 +
48642 show_map_vma(m, vma, is_pid);
48643
48644 if (m->count < m->size) /* vma is copied successfully */
48645 @@ -482,12 +518,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
48646 .private = &mss,
48647 };
48648
48649 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48650 + if (current->exec_id != m->exec_id) {
48651 + gr_log_badprocpid("smaps");
48652 + return 0;
48653 + }
48654 +#endif
48655 memset(&mss, 0, sizeof mss);
48656 - mss.vma = vma;
48657 - /* mmap_sem is held in m_start */
48658 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
48659 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
48660 -
48661 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48662 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
48663 +#endif
48664 + mss.vma = vma;
48665 + /* mmap_sem is held in m_start */
48666 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
48667 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
48668 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48669 + }
48670 +#endif
48671 show_map_vma(m, vma, is_pid);
48672
48673 seq_printf(m,
48674 @@ -505,7 +552,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
48675 "KernelPageSize: %8lu kB\n"
48676 "MMUPageSize: %8lu kB\n"
48677 "Locked: %8lu kB\n",
48678 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48679 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
48680 +#else
48681 (vma->vm_end - vma->vm_start) >> 10,
48682 +#endif
48683 mss.resident >> 10,
48684 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
48685 mss.shared_clean >> 10,
48686 @@ -1138,6 +1189,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
48687 int n;
48688 char buffer[50];
48689
48690 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48691 + if (current->exec_id != m->exec_id) {
48692 + gr_log_badprocpid("numa_maps");
48693 + return 0;
48694 + }
48695 +#endif
48696 +
48697 if (!mm)
48698 return 0;
48699
48700 @@ -1155,11 +1213,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
48701 mpol_to_str(buffer, sizeof(buffer), pol, 0);
48702 mpol_cond_put(pol);
48703
48704 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48705 + seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
48706 +#else
48707 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
48708 +#endif
48709
48710 if (file) {
48711 seq_printf(m, " file=");
48712 - seq_path(m, &file->f_path, "\n\t= ");
48713 + seq_path(m, &file->f_path, "\n\t\\= ");
48714 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
48715 seq_printf(m, " heap");
48716 } else {
48717 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
48718 index 74fe164..899e77b 100644
48719 --- a/fs/proc/task_nommu.c
48720 +++ b/fs/proc/task_nommu.c
48721 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
48722 else
48723 bytes += kobjsize(mm);
48724
48725 - if (current->fs && current->fs->users > 1)
48726 + if (current->fs && atomic_read(&current->fs->users) > 1)
48727 sbytes += kobjsize(current->fs);
48728 else
48729 bytes += kobjsize(current->fs);
48730 @@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
48731
48732 if (file) {
48733 pad_len_spaces(m, len);
48734 - seq_path(m, &file->f_path, "");
48735 + seq_path(m, &file->f_path, "\n\\");
48736 } else if (mm) {
48737 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
48738
48739 diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
48740 index d67908b..d13f6a6 100644
48741 --- a/fs/quota/netlink.c
48742 +++ b/fs/quota/netlink.c
48743 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
48744 void quota_send_warning(short type, unsigned int id, dev_t dev,
48745 const char warntype)
48746 {
48747 - static atomic_t seq;
48748 + static atomic_unchecked_t seq;
48749 struct sk_buff *skb;
48750 void *msg_head;
48751 int ret;
48752 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
48753 "VFS: Not enough memory to send quota warning.\n");
48754 return;
48755 }
48756 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
48757 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
48758 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
48759 if (!msg_head) {
48760 printk(KERN_ERR
48761 diff --git a/fs/readdir.c b/fs/readdir.c
48762 index cc0a822..43cb195 100644
48763 --- a/fs/readdir.c
48764 +++ b/fs/readdir.c
48765 @@ -17,6 +17,7 @@
48766 #include <linux/security.h>
48767 #include <linux/syscalls.h>
48768 #include <linux/unistd.h>
48769 +#include <linux/namei.h>
48770
48771 #include <asm/uaccess.h>
48772
48773 @@ -67,6 +68,7 @@ struct old_linux_dirent {
48774
48775 struct readdir_callback {
48776 struct old_linux_dirent __user * dirent;
48777 + struct file * file;
48778 int result;
48779 };
48780
48781 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
48782 buf->result = -EOVERFLOW;
48783 return -EOVERFLOW;
48784 }
48785 +
48786 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48787 + return 0;
48788 +
48789 buf->result++;
48790 dirent = buf->dirent;
48791 if (!access_ok(VERIFY_WRITE, dirent,
48792 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
48793
48794 buf.result = 0;
48795 buf.dirent = dirent;
48796 + buf.file = file;
48797
48798 error = vfs_readdir(file, fillonedir, &buf);
48799 if (buf.result)
48800 @@ -142,6 +149,7 @@ struct linux_dirent {
48801 struct getdents_callback {
48802 struct linux_dirent __user * current_dir;
48803 struct linux_dirent __user * previous;
48804 + struct file * file;
48805 int count;
48806 int error;
48807 };
48808 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
48809 buf->error = -EOVERFLOW;
48810 return -EOVERFLOW;
48811 }
48812 +
48813 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48814 + return 0;
48815 +
48816 dirent = buf->previous;
48817 if (dirent) {
48818 if (__put_user(offset, &dirent->d_off))
48819 @@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
48820 buf.previous = NULL;
48821 buf.count = count;
48822 buf.error = 0;
48823 + buf.file = file;
48824
48825 error = vfs_readdir(file, filldir, &buf);
48826 if (error >= 0)
48827 @@ -229,6 +242,7 @@ out:
48828 struct getdents_callback64 {
48829 struct linux_dirent64 __user * current_dir;
48830 struct linux_dirent64 __user * previous;
48831 + struct file *file;
48832 int count;
48833 int error;
48834 };
48835 @@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
48836 buf->error = -EINVAL; /* only used if we fail.. */
48837 if (reclen > buf->count)
48838 return -EINVAL;
48839 +
48840 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48841 + return 0;
48842 +
48843 dirent = buf->previous;
48844 if (dirent) {
48845 if (__put_user(offset, &dirent->d_off))
48846 @@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
48847
48848 buf.current_dir = dirent;
48849 buf.previous = NULL;
48850 + buf.file = file;
48851 buf.count = count;
48852 buf.error = 0;
48853
48854 @@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
48855 error = buf.error;
48856 lastdirent = buf.previous;
48857 if (lastdirent) {
48858 - typeof(lastdirent->d_off) d_off = file->f_pos;
48859 + typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
48860 if (__put_user(d_off, &lastdirent->d_off))
48861 error = -EFAULT;
48862 else
48863 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
48864 index 2b7882b..1c5ef48 100644
48865 --- a/fs/reiserfs/do_balan.c
48866 +++ b/fs/reiserfs/do_balan.c
48867 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
48868 return;
48869 }
48870
48871 - atomic_inc(&(fs_generation(tb->tb_sb)));
48872 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
48873 do_balance_starts(tb);
48874
48875 /* balance leaf returns 0 except if combining L R and S into
48876 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
48877 index 2c1ade6..8c59d8d 100644
48878 --- a/fs/reiserfs/procfs.c
48879 +++ b/fs/reiserfs/procfs.c
48880 @@ -112,7 +112,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
48881 "SMALL_TAILS " : "NO_TAILS ",
48882 replay_only(sb) ? "REPLAY_ONLY " : "",
48883 convert_reiserfs(sb) ? "CONV " : "",
48884 - atomic_read(&r->s_generation_counter),
48885 + atomic_read_unchecked(&r->s_generation_counter),
48886 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
48887 SF(s_do_balance), SF(s_unneeded_left_neighbor),
48888 SF(s_good_search_by_key_reada), SF(s_bmaps),
48889 diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
48890 index a59d271..e12d1cf 100644
48891 --- a/fs/reiserfs/reiserfs.h
48892 +++ b/fs/reiserfs/reiserfs.h
48893 @@ -453,7 +453,7 @@ struct reiserfs_sb_info {
48894 /* Comment? -Hans */
48895 wait_queue_head_t s_wait;
48896 /* To be obsoleted soon by per buffer seals.. -Hans */
48897 - atomic_t s_generation_counter; // increased by one every time the
48898 + atomic_unchecked_t s_generation_counter; // increased by one every time the
48899 // tree gets re-balanced
48900 unsigned long s_properties; /* File system properties. Currently holds
48901 on-disk FS format */
48902 @@ -1973,7 +1973,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
48903 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
48904
48905 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
48906 -#define get_generation(s) atomic_read (&fs_generation(s))
48907 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
48908 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
48909 #define __fs_changed(gen,s) (gen != get_generation (s))
48910 #define fs_changed(gen,s) \
48911 diff --git a/fs/select.c b/fs/select.c
48912 index 17d33d0..da0bf5c 100644
48913 --- a/fs/select.c
48914 +++ b/fs/select.c
48915 @@ -20,6 +20,7 @@
48916 #include <linux/export.h>
48917 #include <linux/slab.h>
48918 #include <linux/poll.h>
48919 +#include <linux/security.h>
48920 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
48921 #include <linux/file.h>
48922 #include <linux/fdtable.h>
48923 @@ -833,6 +834,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
48924 struct poll_list *walk = head;
48925 unsigned long todo = nfds;
48926
48927 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
48928 if (nfds > rlimit(RLIMIT_NOFILE))
48929 return -EINVAL;
48930
48931 diff --git a/fs/seq_file.c b/fs/seq_file.c
48932 index 0cbd049..e2773e2 100644
48933 --- a/fs/seq_file.c
48934 +++ b/fs/seq_file.c
48935 @@ -9,6 +9,7 @@
48936 #include <linux/export.h>
48937 #include <linux/seq_file.h>
48938 #include <linux/slab.h>
48939 +#include <linux/sched.h>
48940
48941 #include <asm/uaccess.h>
48942 #include <asm/page.h>
48943 @@ -56,6 +57,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
48944 memset(p, 0, sizeof(*p));
48945 mutex_init(&p->lock);
48946 p->op = op;
48947 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48948 + p->exec_id = current->exec_id;
48949 +#endif
48950
48951 /*
48952 * Wrappers around seq_open(e.g. swaps_open) need to be
48953 @@ -92,7 +96,11 @@ static int traverse(struct seq_file *m, loff_t offset)
48954 return 0;
48955 }
48956 if (!m->buf) {
48957 +#ifdef CONFIG_GRKERNSEC_HIDESYM
48958 + m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
48959 +#else
48960 m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
48961 +#endif
48962 if (!m->buf)
48963 return -ENOMEM;
48964 }
48965 @@ -132,7 +140,11 @@ static int traverse(struct seq_file *m, loff_t offset)
48966 Eoverflow:
48967 m->op->stop(m, p);
48968 kfree(m->buf);
48969 +#ifdef CONFIG_GRKERNSEC_HIDESYM
48970 + m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
48971 +#else
48972 m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
48973 +#endif
48974 return !m->buf ? -ENOMEM : -EAGAIN;
48975 }
48976
48977 @@ -187,7 +199,11 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
48978
48979 /* grab buffer if we didn't have one */
48980 if (!m->buf) {
48981 +#ifdef CONFIG_GRKERNSEC_HIDESYM
48982 + m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
48983 +#else
48984 m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
48985 +#endif
48986 if (!m->buf)
48987 goto Enomem;
48988 }
48989 @@ -228,7 +244,11 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
48990 goto Fill;
48991 m->op->stop(m, p);
48992 kfree(m->buf);
48993 +#ifdef CONFIG_GRKERNSEC_HIDESYM
48994 + m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
48995 +#else
48996 m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
48997 +#endif
48998 if (!m->buf)
48999 goto Enomem;
49000 m->count = 0;
49001 @@ -567,7 +587,7 @@ static void single_stop(struct seq_file *p, void *v)
49002 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
49003 void *data)
49004 {
49005 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
49006 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
49007 int res = -ENOMEM;
49008
49009 if (op) {
49010 diff --git a/fs/splice.c b/fs/splice.c
49011 index 5cac690..f833a99 100644
49012 --- a/fs/splice.c
49013 +++ b/fs/splice.c
49014 @@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
49015 pipe_lock(pipe);
49016
49017 for (;;) {
49018 - if (!pipe->readers) {
49019 + if (!atomic_read(&pipe->readers)) {
49020 send_sig(SIGPIPE, current, 0);
49021 if (!ret)
49022 ret = -EPIPE;
49023 @@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
49024 do_wakeup = 0;
49025 }
49026
49027 - pipe->waiting_writers++;
49028 + atomic_inc(&pipe->waiting_writers);
49029 pipe_wait(pipe);
49030 - pipe->waiting_writers--;
49031 + atomic_dec(&pipe->waiting_writers);
49032 }
49033
49034 pipe_unlock(pipe);
49035 @@ -563,7 +563,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
49036 old_fs = get_fs();
49037 set_fs(get_ds());
49038 /* The cast to a user pointer is valid due to the set_fs() */
49039 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
49040 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
49041 set_fs(old_fs);
49042
49043 return res;
49044 @@ -578,7 +578,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
49045 old_fs = get_fs();
49046 set_fs(get_ds());
49047 /* The cast to a user pointer is valid due to the set_fs() */
49048 - res = vfs_write(file, (const char __user *)buf, count, &pos);
49049 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
49050 set_fs(old_fs);
49051
49052 return res;
49053 @@ -630,7 +630,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
49054 goto err;
49055
49056 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
49057 - vec[i].iov_base = (void __user *) page_address(page);
49058 + vec[i].iov_base = (void __force_user *) page_address(page);
49059 vec[i].iov_len = this_len;
49060 spd.pages[i] = page;
49061 spd.nr_pages++;
49062 @@ -849,10 +849,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
49063 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
49064 {
49065 while (!pipe->nrbufs) {
49066 - if (!pipe->writers)
49067 + if (!atomic_read(&pipe->writers))
49068 return 0;
49069
49070 - if (!pipe->waiting_writers && sd->num_spliced)
49071 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
49072 return 0;
49073
49074 if (sd->flags & SPLICE_F_NONBLOCK)
49075 @@ -1185,7 +1185,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
49076 * out of the pipe right after the splice_to_pipe(). So set
49077 * PIPE_READERS appropriately.
49078 */
49079 - pipe->readers = 1;
49080 + atomic_set(&pipe->readers, 1);
49081
49082 current->splice_pipe = pipe;
49083 }
49084 @@ -1738,9 +1738,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49085 ret = -ERESTARTSYS;
49086 break;
49087 }
49088 - if (!pipe->writers)
49089 + if (!atomic_read(&pipe->writers))
49090 break;
49091 - if (!pipe->waiting_writers) {
49092 + if (!atomic_read(&pipe->waiting_writers)) {
49093 if (flags & SPLICE_F_NONBLOCK) {
49094 ret = -EAGAIN;
49095 break;
49096 @@ -1772,7 +1772,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49097 pipe_lock(pipe);
49098
49099 while (pipe->nrbufs >= pipe->buffers) {
49100 - if (!pipe->readers) {
49101 + if (!atomic_read(&pipe->readers)) {
49102 send_sig(SIGPIPE, current, 0);
49103 ret = -EPIPE;
49104 break;
49105 @@ -1785,9 +1785,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
49106 ret = -ERESTARTSYS;
49107 break;
49108 }
49109 - pipe->waiting_writers++;
49110 + atomic_inc(&pipe->waiting_writers);
49111 pipe_wait(pipe);
49112 - pipe->waiting_writers--;
49113 + atomic_dec(&pipe->waiting_writers);
49114 }
49115
49116 pipe_unlock(pipe);
49117 @@ -1823,14 +1823,14 @@ retry:
49118 pipe_double_lock(ipipe, opipe);
49119
49120 do {
49121 - if (!opipe->readers) {
49122 + if (!atomic_read(&opipe->readers)) {
49123 send_sig(SIGPIPE, current, 0);
49124 if (!ret)
49125 ret = -EPIPE;
49126 break;
49127 }
49128
49129 - if (!ipipe->nrbufs && !ipipe->writers)
49130 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
49131 break;
49132
49133 /*
49134 @@ -1927,7 +1927,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
49135 pipe_double_lock(ipipe, opipe);
49136
49137 do {
49138 - if (!opipe->readers) {
49139 + if (!atomic_read(&opipe->readers)) {
49140 send_sig(SIGPIPE, current, 0);
49141 if (!ret)
49142 ret = -EPIPE;
49143 @@ -1972,7 +1972,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
49144 * return EAGAIN if we have the potential of some data in the
49145 * future, otherwise just return 0
49146 */
49147 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
49148 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
49149 ret = -EAGAIN;
49150
49151 pipe_unlock(ipipe);
49152 diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
49153 index 35a36d3..23424b2 100644
49154 --- a/fs/sysfs/dir.c
49155 +++ b/fs/sysfs/dir.c
49156 @@ -657,6 +657,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
49157 struct sysfs_dirent *sd;
49158 int rc;
49159
49160 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
49161 + const char *parent_name = parent_sd->s_name;
49162 +
49163 + mode = S_IFDIR | S_IRWXU;
49164 +
49165 + if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
49166 + (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
49167 + (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
49168 + (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
49169 + mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
49170 +#endif
49171 +
49172 /* allocate */
49173 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
49174 if (!sd)
49175 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
49176 index 00012e3..8392349 100644
49177 --- a/fs/sysfs/file.c
49178 +++ b/fs/sysfs/file.c
49179 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
49180
49181 struct sysfs_open_dirent {
49182 atomic_t refcnt;
49183 - atomic_t event;
49184 + atomic_unchecked_t event;
49185 wait_queue_head_t poll;
49186 struct list_head buffers; /* goes through sysfs_buffer.list */
49187 };
49188 @@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
49189 if (!sysfs_get_active(attr_sd))
49190 return -ENODEV;
49191
49192 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
49193 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
49194 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
49195
49196 sysfs_put_active(attr_sd);
49197 @@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
49198 return -ENOMEM;
49199
49200 atomic_set(&new_od->refcnt, 0);
49201 - atomic_set(&new_od->event, 1);
49202 + atomic_set_unchecked(&new_od->event, 1);
49203 init_waitqueue_head(&new_od->poll);
49204 INIT_LIST_HEAD(&new_od->buffers);
49205 goto retry;
49206 @@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
49207
49208 sysfs_put_active(attr_sd);
49209
49210 - if (buffer->event != atomic_read(&od->event))
49211 + if (buffer->event != atomic_read_unchecked(&od->event))
49212 goto trigger;
49213
49214 return DEFAULT_POLLMASK;
49215 @@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
49216
49217 od = sd->s_attr.open;
49218 if (od) {
49219 - atomic_inc(&od->event);
49220 + atomic_inc_unchecked(&od->event);
49221 wake_up_interruptible(&od->poll);
49222 }
49223
49224 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
49225 index a7ac78f..02158e1 100644
49226 --- a/fs/sysfs/symlink.c
49227 +++ b/fs/sysfs/symlink.c
49228 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
49229
49230 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
49231 {
49232 - char *page = nd_get_link(nd);
49233 + const char *page = nd_get_link(nd);
49234 if (!IS_ERR(page))
49235 free_page((unsigned long)page);
49236 }
49237 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
49238 index c175b4d..8f36a16 100644
49239 --- a/fs/udf/misc.c
49240 +++ b/fs/udf/misc.c
49241 @@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
49242
49243 u8 udf_tag_checksum(const struct tag *t)
49244 {
49245 - u8 *data = (u8 *)t;
49246 + const u8 *data = (const u8 *)t;
49247 u8 checksum = 0;
49248 int i;
49249 for (i = 0; i < sizeof(struct tag); ++i)
49250 diff --git a/fs/utimes.c b/fs/utimes.c
49251 index ba653f3..06ea4b1 100644
49252 --- a/fs/utimes.c
49253 +++ b/fs/utimes.c
49254 @@ -1,6 +1,7 @@
49255 #include <linux/compiler.h>
49256 #include <linux/file.h>
49257 #include <linux/fs.h>
49258 +#include <linux/security.h>
49259 #include <linux/linkage.h>
49260 #include <linux/mount.h>
49261 #include <linux/namei.h>
49262 @@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
49263 goto mnt_drop_write_and_out;
49264 }
49265 }
49266 +
49267 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
49268 + error = -EACCES;
49269 + goto mnt_drop_write_and_out;
49270 + }
49271 +
49272 mutex_lock(&inode->i_mutex);
49273 error = notify_change(path->dentry, &newattrs);
49274 mutex_unlock(&inode->i_mutex);
49275 diff --git a/fs/xattr.c b/fs/xattr.c
49276 index 3c8c1cc..a83c398 100644
49277 --- a/fs/xattr.c
49278 +++ b/fs/xattr.c
49279 @@ -316,7 +316,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
49280 * Extended attribute SET operations
49281 */
49282 static long
49283 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
49284 +setxattr(struct path *path, const char __user *name, const void __user *value,
49285 size_t size, int flags)
49286 {
49287 int error;
49288 @@ -349,7 +349,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
49289 }
49290 }
49291
49292 - error = vfs_setxattr(d, kname, kvalue, size, flags);
49293 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
49294 + error = -EACCES;
49295 + goto out;
49296 + }
49297 +
49298 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
49299 out:
49300 if (vvalue)
49301 vfree(vvalue);
49302 @@ -370,7 +375,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
49303 return error;
49304 error = mnt_want_write(path.mnt);
49305 if (!error) {
49306 - error = setxattr(path.dentry, name, value, size, flags);
49307 + error = setxattr(&path, name, value, size, flags);
49308 mnt_drop_write(path.mnt);
49309 }
49310 path_put(&path);
49311 @@ -389,7 +394,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
49312 return error;
49313 error = mnt_want_write(path.mnt);
49314 if (!error) {
49315 - error = setxattr(path.dentry, name, value, size, flags);
49316 + error = setxattr(&path, name, value, size, flags);
49317 mnt_drop_write(path.mnt);
49318 }
49319 path_put(&path);
49320 @@ -400,17 +405,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
49321 const void __user *,value, size_t, size, int, flags)
49322 {
49323 struct file *f;
49324 - struct dentry *dentry;
49325 int error = -EBADF;
49326
49327 f = fget(fd);
49328 if (!f)
49329 return error;
49330 - dentry = f->f_path.dentry;
49331 - audit_inode(NULL, dentry);
49332 + audit_inode(NULL, f->f_path.dentry);
49333 error = mnt_want_write_file(f);
49334 if (!error) {
49335 - error = setxattr(dentry, name, value, size, flags);
49336 + error = setxattr(&f->f_path, name, value, size, flags);
49337 mnt_drop_write_file(f);
49338 }
49339 fput(f);
49340 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
49341 index 69d06b0..c0996e5 100644
49342 --- a/fs/xattr_acl.c
49343 +++ b/fs/xattr_acl.c
49344 @@ -17,8 +17,8 @@
49345 struct posix_acl *
49346 posix_acl_from_xattr(const void *value, size_t size)
49347 {
49348 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
49349 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
49350 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
49351 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
49352 int count;
49353 struct posix_acl *acl;
49354 struct posix_acl_entry *acl_e;
49355 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
49356 index 85e7e32..5344e52 100644
49357 --- a/fs/xfs/xfs_bmap.c
49358 +++ b/fs/xfs/xfs_bmap.c
49359 @@ -190,7 +190,7 @@ xfs_bmap_validate_ret(
49360 int nmap,
49361 int ret_nmap);
49362 #else
49363 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
49364 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
49365 #endif /* DEBUG */
49366
49367 STATIC int
49368 diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
49369 index 79d05e8..e3e5861 100644
49370 --- a/fs/xfs/xfs_dir2_sf.c
49371 +++ b/fs/xfs/xfs_dir2_sf.c
49372 @@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
49373 }
49374
49375 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
49376 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
49377 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
49378 + char name[sfep->namelen];
49379 + memcpy(name, sfep->name, sfep->namelen);
49380 + if (filldir(dirent, name, sfep->namelen,
49381 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
49382 + *offset = off & 0x7fffffff;
49383 + return 0;
49384 + }
49385 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
49386 off & 0x7fffffff, ino, DT_UNKNOWN)) {
49387 *offset = off & 0x7fffffff;
49388 return 0;
49389 diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
49390 index 91f8ff5..0ce68f9 100644
49391 --- a/fs/xfs/xfs_ioctl.c
49392 +++ b/fs/xfs/xfs_ioctl.c
49393 @@ -128,7 +128,7 @@ xfs_find_handle(
49394 }
49395
49396 error = -EFAULT;
49397 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
49398 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
49399 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
49400 goto out_put;
49401
49402 diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
49403 index 3011b87..1ab03e9 100644
49404 --- a/fs/xfs/xfs_iops.c
49405 +++ b/fs/xfs/xfs_iops.c
49406 @@ -397,7 +397,7 @@ xfs_vn_put_link(
49407 struct nameidata *nd,
49408 void *p)
49409 {
49410 - char *s = nd_get_link(nd);
49411 + const char *s = nd_get_link(nd);
49412
49413 if (!IS_ERR(s))
49414 kfree(s);
49415 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
49416 new file mode 100644
49417 index 0000000..b9e7d6f
49418 --- /dev/null
49419 +++ b/grsecurity/Kconfig
49420 @@ -0,0 +1,940 @@
49421 +#
49422 +# grecurity configuration
49423 +#
49424 +menu "Memory Protections"
49425 +depends on GRKERNSEC
49426 +
49427 +config GRKERNSEC_KMEM
49428 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
49429 + default y if GRKERNSEC_CONFIG_AUTO
49430 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
49431 + help
49432 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
49433 + be written to or read from to modify or leak the contents of the running
49434 + kernel. /dev/port will also not be allowed to be opened. If you have module
49435 + support disabled, enabling this will close up four ways that are
49436 + currently used to insert malicious code into the running kernel.
49437 + Even with all these features enabled, we still highly recommend that
49438 + you use the RBAC system, as it is still possible for an attacker to
49439 + modify the running kernel through privileged I/O granted by ioperm/iopl.
49440 + If you are not using XFree86, you may be able to stop this additional
49441 + case by enabling the 'Disable privileged I/O' option. Though nothing
49442 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
49443 + but only to video memory, which is the only writing we allow in this
49444 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
49445 + not be allowed to mprotect it with PROT_WRITE later.
49446 + It is highly recommended that you say Y here if you meet all the
49447 + conditions above.
49448 +
49449 +config GRKERNSEC_VM86
49450 + bool "Restrict VM86 mode"
49451 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
49452 + depends on X86_32
49453 +
49454 + help
49455 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
49456 + make use of a special execution mode on 32bit x86 processors called
49457 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
49458 + video cards and will still work with this option enabled. The purpose
49459 + of the option is to prevent exploitation of emulation errors in
49460 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
49461 + Nearly all users should be able to enable this option.
49462 +
49463 +config GRKERNSEC_IO
49464 + bool "Disable privileged I/O"
49465 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
49466 + depends on X86
49467 + select RTC_CLASS
49468 + select RTC_INTF_DEV
49469 + select RTC_DRV_CMOS
49470 +
49471 + help
49472 + If you say Y here, all ioperm and iopl calls will return an error.
49473 + Ioperm and iopl can be used to modify the running kernel.
49474 + Unfortunately, some programs need this access to operate properly,
49475 + the most notable of which are XFree86 and hwclock. hwclock can be
49476 + remedied by having RTC support in the kernel, so real-time
49477 + clock support is enabled if this option is enabled, to ensure
49478 + that hwclock operates correctly. XFree86 still will not
49479 + operate correctly with this option enabled, so DO NOT CHOOSE Y
49480 + IF YOU USE XFree86. If you use XFree86 and you still want to
49481 + protect your kernel against modification, use the RBAC system.
49482 +
49483 +config GRKERNSEC_PROC_MEMMAP
49484 + bool "Harden ASLR against information leaks and entropy reduction"
49485 + default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
49486 + depends on PAX_NOEXEC || PAX_ASLR
49487 + help
49488 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
49489 + give no information about the addresses of its mappings if
49490 + PaX features that rely on random addresses are enabled on the task.
49491 + In addition to sanitizing this information and disabling other
49492 + dangerous sources of information, this option causes reads of sensitive
49493 + /proc/<pid> entries where the file descriptor was opened in a different
49494 + task than the one performing the read. Such attempts are logged.
49495 + This option also limits argv/env strings for suid/sgid binaries
49496 + to 512KB to prevent a complete exhaustion of the stack entropy provided
49497 + by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
49498 + binaries to prevent alternative mmap layouts from being abused.
49499 +
49500 + If you use PaX it is essential that you say Y here as it closes up
49501 + several holes that make full ASLR useless locally.
49502 +
49503 +config GRKERNSEC_BRUTE
49504 + bool "Deter exploit bruteforcing"
49505 + default y if GRKERNSEC_CONFIG_AUTO
49506 + help
49507 + If you say Y here, attempts to bruteforce exploits against forking
49508 + daemons such as apache or sshd, as well as against suid/sgid binaries
49509 + will be deterred. When a child of a forking daemon is killed by PaX
49510 + or crashes due to an illegal instruction or other suspicious signal,
49511 + the parent process will be delayed 30 seconds upon every subsequent
49512 + fork until the administrator is able to assess the situation and
49513 + restart the daemon.
49514 + In the suid/sgid case, the attempt is logged, the user has all their
49515 + processes terminated, and they are prevented from executing any further
49516 + processes for 15 minutes.
49517 + It is recommended that you also enable signal logging in the auditing
49518 + section so that logs are generated when a process triggers a suspicious
49519 + signal.
49520 + If the sysctl option is enabled, a sysctl option with name
49521 + "deter_bruteforce" is created.
49522 +
49523 +
49524 +config GRKERNSEC_MODHARDEN
49525 + bool "Harden module auto-loading"
49526 + default y if GRKERNSEC_CONFIG_AUTO
49527 + depends on MODULES
49528 + help
49529 + If you say Y here, module auto-loading in response to use of some
49530 + feature implemented by an unloaded module will be restricted to
49531 + root users. Enabling this option helps defend against attacks
49532 + by unprivileged users who abuse the auto-loading behavior to
49533 + cause a vulnerable module to load that is then exploited.
49534 +
49535 + If this option prevents a legitimate use of auto-loading for a
49536 + non-root user, the administrator can execute modprobe manually
49537 + with the exact name of the module mentioned in the alert log.
49538 + Alternatively, the administrator can add the module to the list
49539 + of modules loaded at boot by modifying init scripts.
49540 +
49541 + Modification of init scripts will most likely be needed on
49542 + Ubuntu servers with encrypted home directory support enabled,
49543 + as the first non-root user logging in will cause the ecb(aes),
49544 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
49545 +
49546 +config GRKERNSEC_HIDESYM
49547 + bool "Hide kernel symbols"
49548 + default y if GRKERNSEC_CONFIG_AUTO
49549 + help
49550 + If you say Y here, getting information on loaded modules, and
49551 + displaying all kernel symbols through a syscall will be restricted
49552 + to users with CAP_SYS_MODULE. For software compatibility reasons,
49553 + /proc/kallsyms will be restricted to the root user. The RBAC
49554 + system can hide that entry even from root.
49555 +
49556 + This option also prevents leaking of kernel addresses through
49557 + several /proc entries.
49558 +
49559 + Note that this option is only effective provided the following
49560 + conditions are met:
49561 + 1) The kernel using grsecurity is not precompiled by some distribution
49562 + 2) You have also enabled GRKERNSEC_DMESG
49563 + 3) You are using the RBAC system and hiding other files such as your
49564 + kernel image and System.map. Alternatively, enabling this option
49565 + causes the permissions on /boot, /lib/modules, and the kernel
49566 + source directory to change at compile time to prevent
49567 + reading by non-root users.
49568 + If the above conditions are met, this option will aid in providing a
49569 + useful protection against local kernel exploitation of overflows
49570 + and arbitrary read/write vulnerabilities.
49571 +
49572 +config GRKERNSEC_KERN_LOCKOUT
49573 + bool "Active kernel exploit response"
49574 + default y if GRKERNSEC_CONFIG_AUTO
49575 + depends on X86 || ARM || PPC || SPARC
49576 + help
49577 + If you say Y here, when a PaX alert is triggered due to suspicious
49578 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
49579 + or an OOPS occurs due to bad memory accesses, instead of just
49580 + terminating the offending process (and potentially allowing
49581 + a subsequent exploit from the same user), we will take one of two
49582 + actions:
49583 + If the user was root, we will panic the system
49584 + If the user was non-root, we will log the attempt, terminate
49585 + all processes owned by the user, then prevent them from creating
49586 + any new processes until the system is restarted
49587 + This deters repeated kernel exploitation/bruteforcing attempts
49588 + and is useful for later forensics.
49589 +
49590 +endmenu
49591 +menu "Role Based Access Control Options"
49592 +depends on GRKERNSEC
49593 +
49594 +config GRKERNSEC_RBAC_DEBUG
49595 + bool
49596 +
49597 +config GRKERNSEC_NO_RBAC
49598 + bool "Disable RBAC system"
49599 + help
49600 + If you say Y here, the /dev/grsec device will be removed from the kernel,
49601 + preventing the RBAC system from being enabled. You should only say Y
49602 + here if you have no intention of using the RBAC system, so as to prevent
49603 + an attacker with root access from misusing the RBAC system to hide files
49604 + and processes when loadable module support and /dev/[k]mem have been
49605 + locked down.
49606 +
49607 +config GRKERNSEC_ACL_HIDEKERN
49608 + bool "Hide kernel processes"
49609 + help
49610 + If you say Y here, all kernel threads will be hidden to all
49611 + processes but those whose subject has the "view hidden processes"
49612 + flag.
49613 +
49614 +config GRKERNSEC_ACL_MAXTRIES
49615 + int "Maximum tries before password lockout"
49616 + default 3
49617 + help
49618 + This option enforces the maximum number of times a user can attempt
49619 + to authorize themselves with the grsecurity RBAC system before being
49620 + denied the ability to attempt authorization again for a specified time.
49621 + The lower the number, the harder it will be to brute-force a password.
49622 +
49623 +config GRKERNSEC_ACL_TIMEOUT
49624 + int "Time to wait after max password tries, in seconds"
49625 + default 30
49626 + help
49627 + This option specifies the time the user must wait after attempting to
49628 + authorize to the RBAC system with the maximum number of invalid
49629 + passwords. The higher the number, the harder it will be to brute-force
49630 + a password.
49631 +
49632 +endmenu
49633 +menu "Filesystem Protections"
49634 +depends on GRKERNSEC
49635 +
49636 +config GRKERNSEC_PROC
49637 + bool "Proc restrictions"
49638 + default y if GRKERNSEC_CONFIG_AUTO
49639 + help
49640 + If you say Y here, the permissions of the /proc filesystem
49641 + will be altered to enhance system security and privacy. You MUST
49642 + choose either a user only restriction or a user and group restriction.
49643 + Depending upon the option you choose, you can either restrict users to
49644 + see only the processes they themselves run, or choose a group that can
49645 + view all processes and files normally restricted to root if you choose
49646 + the "restrict to user only" option. NOTE: If you're running identd or
49647 + ntpd as a non-root user, you will have to run it as the group you
49648 + specify here.
49649 +
49650 +config GRKERNSEC_PROC_USER
49651 + bool "Restrict /proc to user only"
49652 + depends on GRKERNSEC_PROC
49653 + help
49654 + If you say Y here, non-root users will only be able to view their own
49655 + processes, and restricts them from viewing network-related information,
49656 + and viewing kernel symbol and module information.
49657 +
49658 +config GRKERNSEC_PROC_USERGROUP
49659 + bool "Allow special group"
49660 + default y if GRKERNSEC_CONFIG_AUTO
49661 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
49662 + help
49663 + If you say Y here, you will be able to select a group that will be
49664 + able to view all processes and network-related information. If you've
49665 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
49666 + remain hidden. This option is useful if you want to run identd as
49667 + a non-root user.
49668 +
49669 +config GRKERNSEC_PROC_GID
49670 + int "GID for special group"
49671 + depends on GRKERNSEC_PROC_USERGROUP
49672 + default 1001
49673 +
49674 +config GRKERNSEC_PROC_ADD
49675 + bool "Additional restrictions"
49676 + default y if GRKERNSEC_CONFIG_AUTO
49677 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
49678 + help
49679 + If you say Y here, additional restrictions will be placed on
49680 + /proc that keep normal users from viewing device information and
49681 + slabinfo information that could be useful for exploits.
49682 +
49683 +config GRKERNSEC_LINK
49684 + bool "Linking restrictions"
49685 + default y if GRKERNSEC_CONFIG_AUTO
49686 + help
49687 + If you say Y here, /tmp race exploits will be prevented, since users
49688 + will no longer be able to follow symlinks owned by other users in
49689 + world-writable +t directories (e.g. /tmp), unless the owner of the
49690 + symlink is the owner of the directory. users will also not be
49691 + able to hardlink to files they do not own. If the sysctl option is
49692 + enabled, a sysctl option with name "linking_restrictions" is created.
49693 +
49694 +config GRKERNSEC_SYMLINKOWN
49695 + bool "Kernel-enforced SymlinksIfOwnerMatch"
49696 + default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
49697 + help
49698 + Apache's SymlinksIfOwnerMatch option has an inherent race condition
49699 + that prevents it from being used as a security feature. As Apache
49700 + verifies the symlink by performing a stat() against the target of
49701 + the symlink before it is followed, an attacker can setup a symlink
49702 + to point to a same-owned file, then replace the symlink with one
49703 + that targets another user's file just after Apache "validates" the
49704 + symlink -- a classic TOCTOU race. If you say Y here, a complete,
49705 + race-free replacement for Apache's "SymlinksIfOwnerMatch" option
49706 + will be in place for the group you specify. If the sysctl option
49707 + is enabled, a sysctl option with name "enforce_symlinksifowner" is
49708 + created.
49709 +
49710 +config GRKERNSEC_SYMLINKOWN_GID
49711 + int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
49712 + depends on GRKERNSEC_SYMLINKOWN
49713 + default 1006
49714 + help
49715 + Setting this GID determines what group kernel-enforced
49716 + SymlinksIfOwnerMatch will be enabled for. If the sysctl option
49717 + is enabled, a sysctl option with name "symlinkown_gid" is created.
49718 +
49719 +config GRKERNSEC_FIFO
49720 + bool "FIFO restrictions"
49721 + default y if GRKERNSEC_CONFIG_AUTO
49722 + help
49723 + If you say Y here, users will not be able to write to FIFOs they don't
49724 + own in world-writable +t directories (e.g. /tmp), unless the owner of
49725 + the FIFO is the same owner of the directory it's held in. If the sysctl
49726 + option is enabled, a sysctl option with name "fifo_restrictions" is
49727 + created.
49728 +
49729 +config GRKERNSEC_SYSFS_RESTRICT
49730 + bool "Sysfs/debugfs restriction"
49731 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
49732 + depends on SYSFS
49733 + help
49734 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
49735 + any filesystem normally mounted under it (e.g. debugfs) will be
49736 + mostly accessible only by root. These filesystems generally provide access
49737 + to hardware and debug information that isn't appropriate for unprivileged
49738 + users of the system. Sysfs and debugfs have also become a large source
49739 + of new vulnerabilities, ranging from infoleaks to local compromise.
49740 + There has been very little oversight with an eye toward security involved
49741 + in adding new exporters of information to these filesystems, so their
49742 + use is discouraged.
49743 + For reasons of compatibility, a few directories have been whitelisted
49744 + for access by non-root users:
49745 + /sys/fs/selinux
49746 + /sys/fs/fuse
49747 + /sys/devices/system/cpu
49748 +
49749 +config GRKERNSEC_ROFS
49750 + bool "Runtime read-only mount protection"
49751 + help
49752 + If you say Y here, a sysctl option with name "romount_protect" will
49753 + be created. By setting this option to 1 at runtime, filesystems
49754 + will be protected in the following ways:
49755 + * No new writable mounts will be allowed
49756 + * Existing read-only mounts won't be able to be remounted read/write
49757 + * Write operations will be denied on all block devices
49758 + This option acts independently of grsec_lock: once it is set to 1,
49759 + it cannot be turned off. Therefore, please be mindful of the resulting
49760 + behavior if this option is enabled in an init script on a read-only
49761 + filesystem. This feature is mainly intended for secure embedded systems.
49762 +
49763 +config GRKERNSEC_CHROOT
49764 + bool "Chroot jail restrictions"
49765 + default y if GRKERNSEC_CONFIG_AUTO
49766 + help
49767 + If you say Y here, you will be able to choose several options that will
49768 + make breaking out of a chrooted jail much more difficult. If you
49769 + encounter no software incompatibilities with the following options, it
49770 + is recommended that you enable each one.
49771 +
49772 +config GRKERNSEC_CHROOT_MOUNT
49773 + bool "Deny mounts"
49774 + default y if GRKERNSEC_CONFIG_AUTO
49775 + depends on GRKERNSEC_CHROOT
49776 + help
49777 + If you say Y here, processes inside a chroot will not be able to
49778 + mount or remount filesystems. If the sysctl option is enabled, a
49779 + sysctl option with name "chroot_deny_mount" is created.
49780 +
49781 +config GRKERNSEC_CHROOT_DOUBLE
49782 + bool "Deny double-chroots"
49783 + default y if GRKERNSEC_CONFIG_AUTO
49784 + depends on GRKERNSEC_CHROOT
49785 + help
49786 + If you say Y here, processes inside a chroot will not be able to chroot
49787 + again outside the chroot. This is a widely used method of breaking
49788 + out of a chroot jail and should not be allowed. If the sysctl
49789 + option is enabled, a sysctl option with name
49790 + "chroot_deny_chroot" is created.
49791 +
49792 +config GRKERNSEC_CHROOT_PIVOT
49793 + bool "Deny pivot_root in chroot"
49794 + default y if GRKERNSEC_CONFIG_AUTO
49795 + depends on GRKERNSEC_CHROOT
49796 + help
49797 + If you say Y here, processes inside a chroot will not be able to use
49798 + a function called pivot_root() that was introduced in Linux 2.3.41. It
49799 + works similar to chroot in that it changes the root filesystem. This
49800 + function could be misused in a chrooted process to attempt to break out
49801 + of the chroot, and therefore should not be allowed. If the sysctl
49802 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
49803 + created.
49804 +
49805 +config GRKERNSEC_CHROOT_CHDIR
49806 + bool "Enforce chdir(\"/\") on all chroots"
49807 + default y if GRKERNSEC_CONFIG_AUTO
49808 + depends on GRKERNSEC_CHROOT
49809 + help
49810 + If you say Y here, the current working directory of all newly-chrooted
49811 + applications will be set to the the root directory of the chroot.
49812 + The man page on chroot(2) states:
49813 + Note that this call does not change the current working
49814 + directory, so that `.' can be outside the tree rooted at
49815 + `/'. In particular, the super-user can escape from a
49816 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
49817 +
49818 + It is recommended that you say Y here, since it's not known to break
49819 + any software. If the sysctl option is enabled, a sysctl option with
49820 + name "chroot_enforce_chdir" is created.
49821 +
49822 +config GRKERNSEC_CHROOT_CHMOD
49823 + bool "Deny (f)chmod +s"
49824 + default y if GRKERNSEC_CONFIG_AUTO
49825 + depends on GRKERNSEC_CHROOT
49826 + help
49827 + If you say Y here, processes inside a chroot will not be able to chmod
49828 + or fchmod files to make them have suid or sgid bits. This protects
49829 + against another published method of breaking a chroot. If the sysctl
49830 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
49831 + created.
49832 +
49833 +config GRKERNSEC_CHROOT_FCHDIR
49834 + bool "Deny fchdir out of chroot"
49835 + default y if GRKERNSEC_CONFIG_AUTO
49836 + depends on GRKERNSEC_CHROOT
49837 + help
49838 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
49839 + to a file descriptor of the chrooting process that points to a directory
49840 + outside the filesystem will be stopped. If the sysctl option
49841 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
49842 +
49843 +config GRKERNSEC_CHROOT_MKNOD
49844 + bool "Deny mknod"
49845 + default y if GRKERNSEC_CONFIG_AUTO
49846 + depends on GRKERNSEC_CHROOT
49847 + help
49848 + If you say Y here, processes inside a chroot will not be allowed to
49849 + mknod. The problem with using mknod inside a chroot is that it
49850 + would allow an attacker to create a device entry that is the same
49851 + as one on the physical root of your system, which could range from
49852 + anything from the console device to a device for your harddrive (which
49853 + they could then use to wipe the drive or steal data). It is recommended
49854 + that you say Y here, unless you run into software incompatibilities.
49855 + If the sysctl option is enabled, a sysctl option with name
49856 + "chroot_deny_mknod" is created.
49857 +
49858 +config GRKERNSEC_CHROOT_SHMAT
49859 + bool "Deny shmat() out of chroot"
49860 + default y if GRKERNSEC_CONFIG_AUTO
49861 + depends on GRKERNSEC_CHROOT
49862 + help
49863 + If you say Y here, processes inside a chroot will not be able to attach
49864 + to shared memory segments that were created outside of the chroot jail.
49865 + It is recommended that you say Y here. If the sysctl option is enabled,
49866 + a sysctl option with name "chroot_deny_shmat" is created.
49867 +
49868 +config GRKERNSEC_CHROOT_UNIX
49869 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
49870 + default y if GRKERNSEC_CONFIG_AUTO
49871 + depends on GRKERNSEC_CHROOT
49872 + help
49873 + If you say Y here, processes inside a chroot will not be able to
49874 + connect to abstract (meaning not belonging to a filesystem) Unix
49875 + domain sockets that were bound outside of a chroot. It is recommended
49876 + that you say Y here. If the sysctl option is enabled, a sysctl option
49877 + with name "chroot_deny_unix" is created.
49878 +
49879 +config GRKERNSEC_CHROOT_FINDTASK
49880 + bool "Protect outside processes"
49881 + default y if GRKERNSEC_CONFIG_AUTO
49882 + depends on GRKERNSEC_CHROOT
49883 + help
49884 + If you say Y here, processes inside a chroot will not be able to
49885 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
49886 + getsid, or view any process outside of the chroot. If the sysctl
49887 + option is enabled, a sysctl option with name "chroot_findtask" is
49888 + created.
49889 +
49890 +config GRKERNSEC_CHROOT_NICE
49891 + bool "Restrict priority changes"
49892 + default y if GRKERNSEC_CONFIG_AUTO
49893 + depends on GRKERNSEC_CHROOT
49894 + help
49895 + If you say Y here, processes inside a chroot will not be able to raise
49896 + the priority of processes in the chroot, or alter the priority of
49897 + processes outside the chroot. This provides more security than simply
49898 + removing CAP_SYS_NICE from the process' capability set. If the
49899 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
49900 + is created.
49901 +
49902 +config GRKERNSEC_CHROOT_SYSCTL
49903 + bool "Deny sysctl writes"
49904 + default y if GRKERNSEC_CONFIG_AUTO
49905 + depends on GRKERNSEC_CHROOT
49906 + help
49907 + If you say Y here, an attacker in a chroot will not be able to
49908 + write to sysctl entries, either by sysctl(2) or through a /proc
49909 + interface. It is strongly recommended that you say Y here. If the
49910 + sysctl option is enabled, a sysctl option with name
49911 + "chroot_deny_sysctl" is created.
49912 +
49913 +config GRKERNSEC_CHROOT_CAPS
49914 + bool "Capability restrictions"
49915 + default y if GRKERNSEC_CONFIG_AUTO
49916 + depends on GRKERNSEC_CHROOT
49917 + help
49918 + If you say Y here, the capabilities on all processes within a
49919 + chroot jail will be lowered to stop module insertion, raw i/o,
49920 + system and net admin tasks, rebooting the system, modifying immutable
49921 + files, modifying IPC owned by another, and changing the system time.
49922 + This is left an option because it can break some apps. Disable this
49923 + if your chrooted apps are having problems performing those kinds of
49924 + tasks. If the sysctl option is enabled, a sysctl option with
49925 + name "chroot_caps" is created.
49926 +
49927 +endmenu
49928 +menu "Kernel Auditing"
49929 +depends on GRKERNSEC
49930 +
49931 +config GRKERNSEC_AUDIT_GROUP
49932 + bool "Single group for auditing"
49933 + help
49934 + If you say Y here, the exec, chdir, and (un)mount logging features
49935 + will only operate on a group you specify. This option is recommended
49936 + if you only want to watch certain users instead of having a large
49937 + amount of logs from the entire system. If the sysctl option is enabled,
49938 + a sysctl option with name "audit_group" is created.
49939 +
49940 +config GRKERNSEC_AUDIT_GID
49941 + int "GID for auditing"
49942 + depends on GRKERNSEC_AUDIT_GROUP
49943 + default 1007
49944 +
49945 +config GRKERNSEC_EXECLOG
49946 + bool "Exec logging"
49947 + help
49948 + If you say Y here, all execve() calls will be logged (since the
49949 + other exec*() calls are frontends to execve(), all execution
49950 + will be logged). Useful for shell-servers that like to keep track
49951 + of their users. If the sysctl option is enabled, a sysctl option with
49952 + name "exec_logging" is created.
49953 + WARNING: This option when enabled will produce a LOT of logs, especially
49954 + on an active system.
49955 +
49956 +config GRKERNSEC_RESLOG
49957 + bool "Resource logging"
49958 + default y if GRKERNSEC_CONFIG_AUTO
49959 + help
49960 + If you say Y here, all attempts to overstep resource limits will
49961 + be logged with the resource name, the requested size, and the current
49962 + limit. It is highly recommended that you say Y here. If the sysctl
49963 + option is enabled, a sysctl option with name "resource_logging" is
49964 + created. If the RBAC system is enabled, the sysctl value is ignored.
49965 +
49966 +config GRKERNSEC_CHROOT_EXECLOG
49967 + bool "Log execs within chroot"
49968 + help
49969 + If you say Y here, all executions inside a chroot jail will be logged
49970 + to syslog. This can cause a large amount of logs if certain
49971 + applications (eg. djb's daemontools) are installed on the system, and
49972 + is therefore left as an option. If the sysctl option is enabled, a
49973 + sysctl option with name "chroot_execlog" is created.
49974 +
49975 +config GRKERNSEC_AUDIT_PTRACE
49976 + bool "Ptrace logging"
49977 + help
49978 + If you say Y here, all attempts to attach to a process via ptrace
49979 + will be logged. If the sysctl option is enabled, a sysctl option
49980 + with name "audit_ptrace" is created.
49981 +
49982 +config GRKERNSEC_AUDIT_CHDIR
49983 + bool "Chdir logging"
49984 + help
49985 + If you say Y here, all chdir() calls will be logged. If the sysctl
49986 + option is enabled, a sysctl option with name "audit_chdir" is created.
49987 +
49988 +config GRKERNSEC_AUDIT_MOUNT
49989 + bool "(Un)Mount logging"
49990 + help
49991 + If you say Y here, all mounts and unmounts will be logged. If the
49992 + sysctl option is enabled, a sysctl option with name "audit_mount" is
49993 + created.
49994 +
49995 +config GRKERNSEC_SIGNAL
49996 + bool "Signal logging"
49997 + default y if GRKERNSEC_CONFIG_AUTO
49998 + help
49999 + If you say Y here, certain important signals will be logged, such as
50000 + SIGSEGV, which will as a result inform you of when a error in a program
50001 + occurred, which in some cases could mean a possible exploit attempt.
50002 + If the sysctl option is enabled, a sysctl option with name
50003 + "signal_logging" is created.
50004 +
50005 +config GRKERNSEC_FORKFAIL
50006 + bool "Fork failure logging"
50007 + help
50008 + If you say Y here, all failed fork() attempts will be logged.
50009 + This could suggest a fork bomb, or someone attempting to overstep
50010 + their process limit. If the sysctl option is enabled, a sysctl option
50011 + with name "forkfail_logging" is created.
50012 +
50013 +config GRKERNSEC_TIME
50014 + bool "Time change logging"
50015 + default y if GRKERNSEC_CONFIG_AUTO
50016 + help
50017 + If you say Y here, any changes of the system clock will be logged.
50018 + If the sysctl option is enabled, a sysctl option with name
50019 + "timechange_logging" is created.
50020 +
50021 +config GRKERNSEC_PROC_IPADDR
50022 + bool "/proc/<pid>/ipaddr support"
50023 + default y if GRKERNSEC_CONFIG_AUTO
50024 + help
50025 + If you say Y here, a new entry will be added to each /proc/<pid>
50026 + directory that contains the IP address of the person using the task.
50027 + The IP is carried across local TCP and AF_UNIX stream sockets.
50028 + This information can be useful for IDS/IPSes to perform remote response
50029 + to a local attack. The entry is readable by only the owner of the
50030 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
50031 + the RBAC system), and thus does not create privacy concerns.
50032 +
50033 +config GRKERNSEC_RWXMAP_LOG
50034 + bool 'Denied RWX mmap/mprotect logging'
50035 + default y if GRKERNSEC_CONFIG_AUTO
50036 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
50037 + help
50038 + If you say Y here, calls to mmap() and mprotect() with explicit
50039 + usage of PROT_WRITE and PROT_EXEC together will be logged when
50040 + denied by the PAX_MPROTECT feature. If the sysctl option is
50041 + enabled, a sysctl option with name "rwxmap_logging" is created.
50042 +
50043 +config GRKERNSEC_AUDIT_TEXTREL
50044 + bool 'ELF text relocations logging (READ HELP)'
50045 + depends on PAX_MPROTECT
50046 + help
50047 + If you say Y here, text relocations will be logged with the filename
50048 + of the offending library or binary. The purpose of the feature is
50049 + to help Linux distribution developers get rid of libraries and
50050 + binaries that need text relocations which hinder the future progress
50051 + of PaX. Only Linux distribution developers should say Y here, and
50052 + never on a production machine, as this option creates an information
50053 + leak that could aid an attacker in defeating the randomization of
50054 + a single memory region. If the sysctl option is enabled, a sysctl
50055 + option with name "audit_textrel" is created.
50056 +
50057 +endmenu
50058 +
50059 +menu "Executable Protections"
50060 +depends on GRKERNSEC
50061 +
50062 +config GRKERNSEC_DMESG
50063 + bool "Dmesg(8) restriction"
50064 + default y if GRKERNSEC_CONFIG_AUTO
50065 + help
50066 + If you say Y here, non-root users will not be able to use dmesg(8)
50067 + to view up to the last 4kb of messages in the kernel's log buffer.
50068 + The kernel's log buffer often contains kernel addresses and other
50069 + identifying information useful to an attacker in fingerprinting a
50070 + system for a targeted exploit.
50071 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
50072 + created.
50073 +
50074 +config GRKERNSEC_HARDEN_PTRACE
50075 + bool "Deter ptrace-based process snooping"
50076 + default y if GRKERNSEC_CONFIG_AUTO
50077 + help
50078 + If you say Y here, TTY sniffers and other malicious monitoring
50079 + programs implemented through ptrace will be defeated. If you
50080 + have been using the RBAC system, this option has already been
50081 + enabled for several years for all users, with the ability to make
50082 + fine-grained exceptions.
50083 +
50084 + This option only affects the ability of non-root users to ptrace
50085 + processes that are not a descendent of the ptracing process.
50086 + This means that strace ./binary and gdb ./binary will still work,
50087 + but attaching to arbitrary processes will not. If the sysctl
50088 + option is enabled, a sysctl option with name "harden_ptrace" is
50089 + created.
50090 +
50091 +config GRKERNSEC_PTRACE_READEXEC
50092 + bool "Require read access to ptrace sensitive binaries"
50093 + default y if GRKERNSEC_CONFIG_AUTO
50094 + help
50095 + If you say Y here, unprivileged users will not be able to ptrace unreadable
50096 + binaries. This option is useful in environments that
50097 + remove the read bits (e.g. file mode 4711) from suid binaries to
50098 + prevent infoleaking of their contents. This option adds
50099 + consistency to the use of that file mode, as the binary could normally
50100 + be read out when run without privileges while ptracing.
50101 +
50102 + If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
50103 + is created.
50104 +
50105 +config GRKERNSEC_SETXID
50106 + bool "Enforce consistent multithreaded privileges"
50107 + default y if GRKERNSEC_CONFIG_AUTO
50108 + depends on (X86 || SPARC64 || PPC || ARM || MIPS)
50109 + help
50110 + If you say Y here, a change from a root uid to a non-root uid
50111 + in a multithreaded application will cause the resulting uids,
50112 + gids, supplementary groups, and capabilities in that thread
50113 + to be propagated to the other threads of the process. In most
50114 + cases this is unnecessary, as glibc will emulate this behavior
50115 + on behalf of the application. Other libcs do not act in the
50116 + same way, allowing the other threads of the process to continue
50117 + running with root privileges. If the sysctl option is enabled,
50118 + a sysctl option with name "consistent_setxid" is created.
50119 +
50120 +config GRKERNSEC_TPE
50121 + bool "Trusted Path Execution (TPE)"
50122 + default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
50123 + help
50124 + If you say Y here, you will be able to choose a gid to add to the
50125 + supplementary groups of users you want to mark as "untrusted."
50126 + These users will not be able to execute any files that are not in
50127 + root-owned directories writable only by root. If the sysctl option
50128 + is enabled, a sysctl option with name "tpe" is created.
50129 +
50130 +config GRKERNSEC_TPE_ALL
50131 + bool "Partially restrict all non-root users"
50132 + depends on GRKERNSEC_TPE
50133 + help
50134 + If you say Y here, all non-root users will be covered under
50135 + a weaker TPE restriction. This is separate from, and in addition to,
50136 + the main TPE options that you have selected elsewhere. Thus, if a
50137 + "trusted" GID is chosen, this restriction applies to even that GID.
50138 + Under this restriction, all non-root users will only be allowed to
50139 + execute files in directories they own that are not group or
50140 + world-writable, or in directories owned by root and writable only by
50141 + root. If the sysctl option is enabled, a sysctl option with name
50142 + "tpe_restrict_all" is created.
50143 +
50144 +config GRKERNSEC_TPE_INVERT
50145 + bool "Invert GID option"
50146 + depends on GRKERNSEC_TPE
50147 + help
50148 + If you say Y here, the group you specify in the TPE configuration will
50149 + decide what group TPE restrictions will be *disabled* for. This
50150 + option is useful if you want TPE restrictions to be applied to most
50151 + users on the system. If the sysctl option is enabled, a sysctl option
50152 + with name "tpe_invert" is created. Unlike other sysctl options, this
50153 + entry will default to on for backward-compatibility.
50154 +
50155 +config GRKERNSEC_TPE_GID
50156 + int "GID for untrusted users"
50157 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
50158 + default 1005
50159 + help
50160 + Setting this GID determines what group TPE restrictions will be
50161 + *enabled* for. If the sysctl option is enabled, a sysctl option
50162 + with name "tpe_gid" is created.
50163 +
50164 +config GRKERNSEC_TPE_GID
50165 + int "GID for trusted users"
50166 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
50167 + default 1005
50168 + help
50169 + Setting this GID determines what group TPE restrictions will be
50170 + *disabled* for. If the sysctl option is enabled, a sysctl option
50171 + with name "tpe_gid" is created.
50172 +
50173 +endmenu
50174 +menu "Network Protections"
50175 +depends on GRKERNSEC
50176 +
50177 +config GRKERNSEC_RANDNET
50178 + bool "Larger entropy pools"
50179 + default y if GRKERNSEC_CONFIG_AUTO
50180 + help
50181 + If you say Y here, the entropy pools used for many features of Linux
50182 + and grsecurity will be doubled in size. Since several grsecurity
50183 + features use additional randomness, it is recommended that you say Y
50184 + here. Saying Y here has a similar effect as modifying
50185 + /proc/sys/kernel/random/poolsize.
50186 +
50187 +config GRKERNSEC_BLACKHOLE
50188 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
50189 + default y if GRKERNSEC_CONFIG_AUTO
50190 + depends on NET
50191 + help
50192 + If you say Y here, neither TCP resets nor ICMP
50193 + destination-unreachable packets will be sent in response to packets
50194 + sent to ports for which no associated listening process exists.
50195 + This feature supports both IPV4 and IPV6 and exempts the
50196 + loopback interface from blackholing. Enabling this feature
50197 + makes a host more resilient to DoS attacks and reduces network
50198 + visibility against scanners.
50199 +
50200 + The blackhole feature as-implemented is equivalent to the FreeBSD
50201 + blackhole feature, as it prevents RST responses to all packets, not
50202 + just SYNs. Under most application behavior this causes no
50203 + problems, but applications (like haproxy) may not close certain
50204 + connections in a way that cleanly terminates them on the remote
50205 + end, leaving the remote host in LAST_ACK state. Because of this
50206 + side-effect and to prevent intentional LAST_ACK DoSes, this
50207 + feature also adds automatic mitigation against such attacks.
50208 + The mitigation drastically reduces the amount of time a socket
50209 + can spend in LAST_ACK state. If you're using haproxy and not
50210 + all servers it connects to have this option enabled, consider
50211 + disabling this feature on the haproxy host.
50212 +
50213 + If the sysctl option is enabled, two sysctl options with names
50214 + "ip_blackhole" and "lastack_retries" will be created.
50215 + While "ip_blackhole" takes the standard zero/non-zero on/off
50216 + toggle, "lastack_retries" uses the same kinds of values as
50217 + "tcp_retries1" and "tcp_retries2". The default value of 4
50218 + prevents a socket from lasting more than 45 seconds in LAST_ACK
50219 + state.
50220 +
50221 +config GRKERNSEC_SOCKET
50222 + bool "Socket restrictions"
50223 + depends on NET
50224 + help
50225 + If you say Y here, you will be able to choose from several options.
50226 + If you assign a GID on your system and add it to the supplementary
50227 + groups of users you want to restrict socket access to, this patch
50228 + will perform up to three things, based on the option(s) you choose.
50229 +
50230 +config GRKERNSEC_SOCKET_ALL
50231 + bool "Deny any sockets to group"
50232 + depends on GRKERNSEC_SOCKET
50233 + help
50234 + If you say Y here, you will be able to choose a GID of whose users will
50235 + be unable to connect to other hosts from your machine or run server
50236 + applications from your machine. If the sysctl option is enabled, a
50237 + sysctl option with name "socket_all" is created.
50238 +
50239 +config GRKERNSEC_SOCKET_ALL_GID
50240 + int "GID to deny all sockets for"
50241 + depends on GRKERNSEC_SOCKET_ALL
50242 + default 1004
50243 + help
50244 + Here you can choose the GID to disable socket access for. Remember to
50245 + add the users you want socket access disabled for to the GID
50246 + specified here. If the sysctl option is enabled, a sysctl option
50247 + with name "socket_all_gid" is created.
50248 +
50249 +config GRKERNSEC_SOCKET_CLIENT
50250 + bool "Deny client sockets to group"
50251 + depends on GRKERNSEC_SOCKET
50252 + help
50253 + If you say Y here, you will be able to choose a GID of whose users will
50254 + be unable to connect to other hosts from your machine, but will be
50255 + able to run servers. If this option is enabled, all users in the group
50256 + you specify will have to use passive mode when initiating ftp transfers
50257 + from the shell on your machine. If the sysctl option is enabled, a
50258 + sysctl option with name "socket_client" is created.
50259 +
50260 +config GRKERNSEC_SOCKET_CLIENT_GID
50261 + int "GID to deny client sockets for"
50262 + depends on GRKERNSEC_SOCKET_CLIENT
50263 + default 1003
50264 + help
50265 + Here you can choose the GID to disable client socket access for.
50266 + Remember to add the users you want client socket access disabled for to
50267 + the GID specified here. If the sysctl option is enabled, a sysctl
50268 + option with name "socket_client_gid" is created.
50269 +
50270 +config GRKERNSEC_SOCKET_SERVER
50271 + bool "Deny server sockets to group"
50272 + depends on GRKERNSEC_SOCKET
50273 + help
50274 + If you say Y here, you will be able to choose a GID of whose users will
50275 + be unable to run server applications from your machine. If the sysctl
50276 + option is enabled, a sysctl option with name "socket_server" is created.
50277 +
50278 +config GRKERNSEC_SOCKET_SERVER_GID
50279 + int "GID to deny server sockets for"
50280 + depends on GRKERNSEC_SOCKET_SERVER
50281 + default 1002
50282 + help
50283 + Here you can choose the GID to disable server socket access for.
50284 + Remember to add the users you want server socket access disabled for to
50285 + the GID specified here. If the sysctl option is enabled, a sysctl
50286 + option with name "socket_server_gid" is created.
50287 +
50288 +endmenu
50289 +menu "Sysctl Support"
50290 +depends on GRKERNSEC && SYSCTL
50291 +
50292 +config GRKERNSEC_SYSCTL
50293 + bool "Sysctl support"
50294 + default y if GRKERNSEC_CONFIG_AUTO
50295 + help
50296 + If you say Y here, you will be able to change the options that
50297 + grsecurity runs with at bootup, without having to recompile your
50298 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
50299 + to enable (1) or disable (0) various features. All the sysctl entries
50300 + are mutable until the "grsec_lock" entry is set to a non-zero value.
50301 + All features enabled in the kernel configuration are disabled at boot
50302 + if you do not say Y to the "Turn on features by default" option.
50303 + All options should be set at startup, and the grsec_lock entry should
50304 + be set to a non-zero value after all the options are set.
50305 + *THIS IS EXTREMELY IMPORTANT*
50306 +
50307 +config GRKERNSEC_SYSCTL_DISTRO
50308 + bool "Extra sysctl support for distro makers (READ HELP)"
50309 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
50310 + help
50311 + If you say Y here, additional sysctl options will be created
50312 + for features that affect processes running as root. Therefore,
50313 + it is critical when using this option that the grsec_lock entry be
50314 + enabled after boot. Only distros with prebuilt kernel packages
50315 + with this option enabled that can ensure grsec_lock is enabled
50316 + after boot should use this option.
50317 + *Failure to set grsec_lock after boot makes all grsec features
50318 + this option covers useless*
50319 +
50320 + Currently this option creates the following sysctl entries:
50321 + "Disable Privileged I/O": "disable_priv_io"
50322 +
50323 +config GRKERNSEC_SYSCTL_ON
50324 + bool "Turn on features by default"
50325 + default y if GRKERNSEC_CONFIG_AUTO
50326 + depends on GRKERNSEC_SYSCTL
50327 + help
50328 + If you say Y here, instead of having all features enabled in the
50329 + kernel configuration disabled at boot time, the features will be
50330 + enabled at boot time. It is recommended you say Y here unless
50331 + there is some reason you would want all sysctl-tunable features to
50332 + be disabled by default. As mentioned elsewhere, it is important
50333 + to enable the grsec_lock entry once you have finished modifying
50334 + the sysctl entries.
50335 +
50336 +endmenu
50337 +menu "Logging Options"
50338 +depends on GRKERNSEC
50339 +
50340 +config GRKERNSEC_FLOODTIME
50341 + int "Seconds in between log messages (minimum)"
50342 + default 10
50343 + help
50344 + This option allows you to enforce the number of seconds between
50345 + grsecurity log messages. The default should be suitable for most
50346 + people, however, if you choose to change it, choose a value small enough
50347 + to allow informative logs to be produced, but large enough to
50348 + prevent flooding.
50349 +
50350 +config GRKERNSEC_FLOODBURST
50351 + int "Number of messages in a burst (maximum)"
50352 + default 6
50353 + help
50354 + This option allows you to choose the maximum number of messages allowed
50355 + within the flood time interval you chose in a separate option. The
50356 + default should be suitable for most people, however if you find that
50357 + many of your logs are being interpreted as flooding, you may want to
50358 + raise this value.
50359 +
50360 +endmenu
50361 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
50362 new file mode 100644
50363 index 0000000..1b9afa9
50364 --- /dev/null
50365 +++ b/grsecurity/Makefile
50366 @@ -0,0 +1,38 @@
50367 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
50368 +# during 2001-2009 it has been completely redesigned by Brad Spengler
50369 +# into an RBAC system
50370 +#
50371 +# All code in this directory and various hooks inserted throughout the kernel
50372 +# are copyright Brad Spengler - Open Source Security, Inc., and released
50373 +# under the GPL v2 or higher
50374 +
50375 +KBUILD_CFLAGS += -Werror
50376 +
50377 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
50378 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
50379 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
50380 +
50381 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
50382 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
50383 + gracl_learn.o grsec_log.o
50384 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
50385 +
50386 +ifdef CONFIG_NET
50387 +obj-y += grsec_sock.o
50388 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
50389 +endif
50390 +
50391 +ifndef CONFIG_GRKERNSEC
50392 +obj-y += grsec_disabled.o
50393 +endif
50394 +
50395 +ifdef CONFIG_GRKERNSEC_HIDESYM
50396 +extra-y := grsec_hidesym.o
50397 +$(obj)/grsec_hidesym.o:
50398 + @-chmod -f 500 /boot
50399 + @-chmod -f 500 /lib/modules
50400 + @-chmod -f 500 /lib64/modules
50401 + @-chmod -f 500 /lib32/modules
50402 + @-chmod -f 700 .
50403 + @echo ' grsec: protected kernel image paths'
50404 +endif
50405 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
50406 new file mode 100644
50407 index 0000000..7a5922f
50408 --- /dev/null
50409 +++ b/grsecurity/gracl.c
50410 @@ -0,0 +1,4016 @@
50411 +#include <linux/kernel.h>
50412 +#include <linux/module.h>
50413 +#include <linux/sched.h>
50414 +#include <linux/mm.h>
50415 +#include <linux/file.h>
50416 +#include <linux/fs.h>
50417 +#include <linux/namei.h>
50418 +#include <linux/mount.h>
50419 +#include <linux/tty.h>
50420 +#include <linux/proc_fs.h>
50421 +#include <linux/lglock.h>
50422 +#include <linux/slab.h>
50423 +#include <linux/vmalloc.h>
50424 +#include <linux/types.h>
50425 +#include <linux/sysctl.h>
50426 +#include <linux/netdevice.h>
50427 +#include <linux/ptrace.h>
50428 +#include <linux/gracl.h>
50429 +#include <linux/gralloc.h>
50430 +#include <linux/security.h>
50431 +#include <linux/grinternal.h>
50432 +#include <linux/pid_namespace.h>
50433 +#include <linux/stop_machine.h>
50434 +#include <linux/fdtable.h>
50435 +#include <linux/percpu.h>
50436 +#include "../fs/mount.h"
50437 +
50438 +#include <asm/uaccess.h>
50439 +#include <asm/errno.h>
50440 +#include <asm/mman.h>
50441 +
50442 +static struct acl_role_db acl_role_set;
50443 +static struct name_db name_set;
50444 +static struct inodev_db inodev_set;
50445 +
50446 +/* for keeping track of userspace pointers used for subjects, so we
50447 + can share references in the kernel as well
50448 +*/
50449 +
50450 +static struct path real_root;
50451 +
50452 +static struct acl_subj_map_db subj_map_set;
50453 +
50454 +static struct acl_role_label *default_role;
50455 +
50456 +static struct acl_role_label *role_list;
50457 +
50458 +static u16 acl_sp_role_value;
50459 +
50460 +extern char *gr_shared_page[4];
50461 +static DEFINE_MUTEX(gr_dev_mutex);
50462 +DEFINE_RWLOCK(gr_inode_lock);
50463 +
50464 +struct gr_arg *gr_usermode;
50465 +
50466 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
50467 +
50468 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
50469 +extern void gr_clear_learn_entries(void);
50470 +
50471 +#ifdef CONFIG_GRKERNSEC_RESLOG
50472 +extern void gr_log_resource(const struct task_struct *task,
50473 + const int res, const unsigned long wanted, const int gt);
50474 +#endif
50475 +
50476 +unsigned char *gr_system_salt;
50477 +unsigned char *gr_system_sum;
50478 +
50479 +static struct sprole_pw **acl_special_roles = NULL;
50480 +static __u16 num_sprole_pws = 0;
50481 +
50482 +static struct acl_role_label *kernel_role = NULL;
50483 +
50484 +static unsigned int gr_auth_attempts = 0;
50485 +static unsigned long gr_auth_expires = 0UL;
50486 +
50487 +#ifdef CONFIG_NET
50488 +extern struct vfsmount *sock_mnt;
50489 +#endif
50490 +
50491 +extern struct vfsmount *pipe_mnt;
50492 +extern struct vfsmount *shm_mnt;
50493 +#ifdef CONFIG_HUGETLBFS
50494 +extern struct vfsmount *hugetlbfs_vfsmount;
50495 +#endif
50496 +
50497 +static struct acl_object_label *fakefs_obj_rw;
50498 +static struct acl_object_label *fakefs_obj_rwx;
50499 +
50500 +extern int gr_init_uidset(void);
50501 +extern void gr_free_uidset(void);
50502 +extern void gr_remove_uid(uid_t uid);
50503 +extern int gr_find_uid(uid_t uid);
50504 +
50505 +DECLARE_BRLOCK(vfsmount_lock);
50506 +
50507 +__inline__ int
50508 +gr_acl_is_enabled(void)
50509 +{
50510 + return (gr_status & GR_READY);
50511 +}
50512 +
50513 +#ifdef CONFIG_BTRFS_FS
50514 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
50515 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
50516 +#endif
50517 +
50518 +static inline dev_t __get_dev(const struct dentry *dentry)
50519 +{
50520 +#ifdef CONFIG_BTRFS_FS
50521 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
50522 + return get_btrfs_dev_from_inode(dentry->d_inode);
50523 + else
50524 +#endif
50525 + return dentry->d_inode->i_sb->s_dev;
50526 +}
50527 +
50528 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
50529 +{
50530 + return __get_dev(dentry);
50531 +}
50532 +
50533 +static char gr_task_roletype_to_char(struct task_struct *task)
50534 +{
50535 + switch (task->role->roletype &
50536 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
50537 + GR_ROLE_SPECIAL)) {
50538 + case GR_ROLE_DEFAULT:
50539 + return 'D';
50540 + case GR_ROLE_USER:
50541 + return 'U';
50542 + case GR_ROLE_GROUP:
50543 + return 'G';
50544 + case GR_ROLE_SPECIAL:
50545 + return 'S';
50546 + }
50547 +
50548 + return 'X';
50549 +}
50550 +
50551 +char gr_roletype_to_char(void)
50552 +{
50553 + return gr_task_roletype_to_char(current);
50554 +}
50555 +
50556 +__inline__ int
50557 +gr_acl_tpe_check(void)
50558 +{
50559 + if (unlikely(!(gr_status & GR_READY)))
50560 + return 0;
50561 + if (current->role->roletype & GR_ROLE_TPE)
50562 + return 1;
50563 + else
50564 + return 0;
50565 +}
50566 +
50567 +int
50568 +gr_handle_rawio(const struct inode *inode)
50569 +{
50570 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50571 + if (inode && S_ISBLK(inode->i_mode) &&
50572 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
50573 + !capable(CAP_SYS_RAWIO))
50574 + return 1;
50575 +#endif
50576 + return 0;
50577 +}
50578 +
50579 +static int
50580 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
50581 +{
50582 + if (likely(lena != lenb))
50583 + return 0;
50584 +
50585 + return !memcmp(a, b, lena);
50586 +}
50587 +
50588 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
50589 +{
50590 + *buflen -= namelen;
50591 + if (*buflen < 0)
50592 + return -ENAMETOOLONG;
50593 + *buffer -= namelen;
50594 + memcpy(*buffer, str, namelen);
50595 + return 0;
50596 +}
50597 +
50598 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
50599 +{
50600 + return prepend(buffer, buflen, name->name, name->len);
50601 +}
50602 +
50603 +static int prepend_path(const struct path *path, struct path *root,
50604 + char **buffer, int *buflen)
50605 +{
50606 + struct dentry *dentry = path->dentry;
50607 + struct vfsmount *vfsmnt = path->mnt;
50608 + struct mount *mnt = real_mount(vfsmnt);
50609 + bool slash = false;
50610 + int error = 0;
50611 +
50612 + while (dentry != root->dentry || vfsmnt != root->mnt) {
50613 + struct dentry * parent;
50614 +
50615 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
50616 + /* Global root? */
50617 + if (!mnt_has_parent(mnt)) {
50618 + goto out;
50619 + }
50620 + dentry = mnt->mnt_mountpoint;
50621 + mnt = mnt->mnt_parent;
50622 + vfsmnt = &mnt->mnt;
50623 + continue;
50624 + }
50625 + parent = dentry->d_parent;
50626 + prefetch(parent);
50627 + spin_lock(&dentry->d_lock);
50628 + error = prepend_name(buffer, buflen, &dentry->d_name);
50629 + spin_unlock(&dentry->d_lock);
50630 + if (!error)
50631 + error = prepend(buffer, buflen, "/", 1);
50632 + if (error)
50633 + break;
50634 +
50635 + slash = true;
50636 + dentry = parent;
50637 + }
50638 +
50639 +out:
50640 + if (!error && !slash)
50641 + error = prepend(buffer, buflen, "/", 1);
50642 +
50643 + return error;
50644 +}
50645 +
50646 +/* this must be called with vfsmount_lock and rename_lock held */
50647 +
50648 +static char *__our_d_path(const struct path *path, struct path *root,
50649 + char *buf, int buflen)
50650 +{
50651 + char *res = buf + buflen;
50652 + int error;
50653 +
50654 + prepend(&res, &buflen, "\0", 1);
50655 + error = prepend_path(path, root, &res, &buflen);
50656 + if (error)
50657 + return ERR_PTR(error);
50658 +
50659 + return res;
50660 +}
50661 +
50662 +static char *
50663 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
50664 +{
50665 + char *retval;
50666 +
50667 + retval = __our_d_path(path, root, buf, buflen);
50668 + if (unlikely(IS_ERR(retval)))
50669 + retval = strcpy(buf, "<path too long>");
50670 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
50671 + retval[1] = '\0';
50672 +
50673 + return retval;
50674 +}
50675 +
50676 +static char *
50677 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
50678 + char *buf, int buflen)
50679 +{
50680 + struct path path;
50681 + char *res;
50682 +
50683 + path.dentry = (struct dentry *)dentry;
50684 + path.mnt = (struct vfsmount *)vfsmnt;
50685 +
50686 + /* we can use real_root.dentry, real_root.mnt, because this is only called
50687 + by the RBAC system */
50688 + res = gen_full_path(&path, &real_root, buf, buflen);
50689 +
50690 + return res;
50691 +}
50692 +
50693 +static char *
50694 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
50695 + char *buf, int buflen)
50696 +{
50697 + char *res;
50698 + struct path path;
50699 + struct path root;
50700 + struct task_struct *reaper = init_pid_ns.child_reaper;
50701 +
50702 + path.dentry = (struct dentry *)dentry;
50703 + path.mnt = (struct vfsmount *)vfsmnt;
50704 +
50705 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
50706 + get_fs_root(reaper->fs, &root);
50707 +
50708 + write_seqlock(&rename_lock);
50709 + br_read_lock(vfsmount_lock);
50710 + res = gen_full_path(&path, &root, buf, buflen);
50711 + br_read_unlock(vfsmount_lock);
50712 + write_sequnlock(&rename_lock);
50713 +
50714 + path_put(&root);
50715 + return res;
50716 +}
50717 +
50718 +static char *
50719 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
50720 +{
50721 + char *ret;
50722 + write_seqlock(&rename_lock);
50723 + br_read_lock(vfsmount_lock);
50724 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
50725 + PAGE_SIZE);
50726 + br_read_unlock(vfsmount_lock);
50727 + write_sequnlock(&rename_lock);
50728 + return ret;
50729 +}
50730 +
50731 +static char *
50732 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
50733 +{
50734 + char *ret;
50735 + char *buf;
50736 + int buflen;
50737 +
50738 + write_seqlock(&rename_lock);
50739 + br_read_lock(vfsmount_lock);
50740 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
50741 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
50742 + buflen = (int)(ret - buf);
50743 + if (buflen >= 5)
50744 + prepend(&ret, &buflen, "/proc", 5);
50745 + else
50746 + ret = strcpy(buf, "<path too long>");
50747 + br_read_unlock(vfsmount_lock);
50748 + write_sequnlock(&rename_lock);
50749 + return ret;
50750 +}
50751 +
50752 +char *
50753 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
50754 +{
50755 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
50756 + PAGE_SIZE);
50757 +}
50758 +
50759 +char *
50760 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
50761 +{
50762 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
50763 + PAGE_SIZE);
50764 +}
50765 +
50766 +char *
50767 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
50768 +{
50769 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
50770 + PAGE_SIZE);
50771 +}
50772 +
50773 +char *
50774 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
50775 +{
50776 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
50777 + PAGE_SIZE);
50778 +}
50779 +
50780 +char *
50781 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
50782 +{
50783 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
50784 + PAGE_SIZE);
50785 +}
50786 +
50787 +__inline__ __u32
50788 +to_gr_audit(const __u32 reqmode)
50789 +{
50790 + /* masks off auditable permission flags, then shifts them to create
50791 + auditing flags, and adds the special case of append auditing if
50792 + we're requesting write */
50793 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
50794 +}
50795 +
50796 +struct acl_subject_label *
50797 +lookup_subject_map(const struct acl_subject_label *userp)
50798 +{
50799 + unsigned int index = shash(userp, subj_map_set.s_size);
50800 + struct subject_map *match;
50801 +
50802 + match = subj_map_set.s_hash[index];
50803 +
50804 + while (match && match->user != userp)
50805 + match = match->next;
50806 +
50807 + if (match != NULL)
50808 + return match->kernel;
50809 + else
50810 + return NULL;
50811 +}
50812 +
50813 +static void
50814 +insert_subj_map_entry(struct subject_map *subjmap)
50815 +{
50816 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
50817 + struct subject_map **curr;
50818 +
50819 + subjmap->prev = NULL;
50820 +
50821 + curr = &subj_map_set.s_hash[index];
50822 + if (*curr != NULL)
50823 + (*curr)->prev = subjmap;
50824 +
50825 + subjmap->next = *curr;
50826 + *curr = subjmap;
50827 +
50828 + return;
50829 +}
50830 +
50831 +static struct acl_role_label *
50832 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
50833 + const gid_t gid)
50834 +{
50835 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
50836 + struct acl_role_label *match;
50837 + struct role_allowed_ip *ipp;
50838 + unsigned int x;
50839 + u32 curr_ip = task->signal->curr_ip;
50840 +
50841 + task->signal->saved_ip = curr_ip;
50842 +
50843 + match = acl_role_set.r_hash[index];
50844 +
50845 + while (match) {
50846 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
50847 + for (x = 0; x < match->domain_child_num; x++) {
50848 + if (match->domain_children[x] == uid)
50849 + goto found;
50850 + }
50851 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
50852 + break;
50853 + match = match->next;
50854 + }
50855 +found:
50856 + if (match == NULL) {
50857 + try_group:
50858 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
50859 + match = acl_role_set.r_hash[index];
50860 +
50861 + while (match) {
50862 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
50863 + for (x = 0; x < match->domain_child_num; x++) {
50864 + if (match->domain_children[x] == gid)
50865 + goto found2;
50866 + }
50867 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
50868 + break;
50869 + match = match->next;
50870 + }
50871 +found2:
50872 + if (match == NULL)
50873 + match = default_role;
50874 + if (match->allowed_ips == NULL)
50875 + return match;
50876 + else {
50877 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
50878 + if (likely
50879 + ((ntohl(curr_ip) & ipp->netmask) ==
50880 + (ntohl(ipp->addr) & ipp->netmask)))
50881 + return match;
50882 + }
50883 + match = default_role;
50884 + }
50885 + } else if (match->allowed_ips == NULL) {
50886 + return match;
50887 + } else {
50888 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
50889 + if (likely
50890 + ((ntohl(curr_ip) & ipp->netmask) ==
50891 + (ntohl(ipp->addr) & ipp->netmask)))
50892 + return match;
50893 + }
50894 + goto try_group;
50895 + }
50896 +
50897 + return match;
50898 +}
50899 +
50900 +struct acl_subject_label *
50901 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
50902 + const struct acl_role_label *role)
50903 +{
50904 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
50905 + struct acl_subject_label *match;
50906 +
50907 + match = role->subj_hash[index];
50908 +
50909 + while (match && (match->inode != ino || match->device != dev ||
50910 + (match->mode & GR_DELETED))) {
50911 + match = match->next;
50912 + }
50913 +
50914 + if (match && !(match->mode & GR_DELETED))
50915 + return match;
50916 + else
50917 + return NULL;
50918 +}
50919 +
50920 +struct acl_subject_label *
50921 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
50922 + const struct acl_role_label *role)
50923 +{
50924 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
50925 + struct acl_subject_label *match;
50926 +
50927 + match = role->subj_hash[index];
50928 +
50929 + while (match && (match->inode != ino || match->device != dev ||
50930 + !(match->mode & GR_DELETED))) {
50931 + match = match->next;
50932 + }
50933 +
50934 + if (match && (match->mode & GR_DELETED))
50935 + return match;
50936 + else
50937 + return NULL;
50938 +}
50939 +
50940 +static struct acl_object_label *
50941 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
50942 + const struct acl_subject_label *subj)
50943 +{
50944 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
50945 + struct acl_object_label *match;
50946 +
50947 + match = subj->obj_hash[index];
50948 +
50949 + while (match && (match->inode != ino || match->device != dev ||
50950 + (match->mode & GR_DELETED))) {
50951 + match = match->next;
50952 + }
50953 +
50954 + if (match && !(match->mode & GR_DELETED))
50955 + return match;
50956 + else
50957 + return NULL;
50958 +}
50959 +
50960 +static struct acl_object_label *
50961 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
50962 + const struct acl_subject_label *subj)
50963 +{
50964 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
50965 + struct acl_object_label *match;
50966 +
50967 + match = subj->obj_hash[index];
50968 +
50969 + while (match && (match->inode != ino || match->device != dev ||
50970 + !(match->mode & GR_DELETED))) {
50971 + match = match->next;
50972 + }
50973 +
50974 + if (match && (match->mode & GR_DELETED))
50975 + return match;
50976 +
50977 + match = subj->obj_hash[index];
50978 +
50979 + while (match && (match->inode != ino || match->device != dev ||
50980 + (match->mode & GR_DELETED))) {
50981 + match = match->next;
50982 + }
50983 +
50984 + if (match && !(match->mode & GR_DELETED))
50985 + return match;
50986 + else
50987 + return NULL;
50988 +}
50989 +
50990 +static struct name_entry *
50991 +lookup_name_entry(const char *name)
50992 +{
50993 + unsigned int len = strlen(name);
50994 + unsigned int key = full_name_hash(name, len);
50995 + unsigned int index = key % name_set.n_size;
50996 + struct name_entry *match;
50997 +
50998 + match = name_set.n_hash[index];
50999 +
51000 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
51001 + match = match->next;
51002 +
51003 + return match;
51004 +}
51005 +
51006 +static struct name_entry *
51007 +lookup_name_entry_create(const char *name)
51008 +{
51009 + unsigned int len = strlen(name);
51010 + unsigned int key = full_name_hash(name, len);
51011 + unsigned int index = key % name_set.n_size;
51012 + struct name_entry *match;
51013 +
51014 + match = name_set.n_hash[index];
51015 +
51016 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
51017 + !match->deleted))
51018 + match = match->next;
51019 +
51020 + if (match && match->deleted)
51021 + return match;
51022 +
51023 + match = name_set.n_hash[index];
51024 +
51025 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
51026 + match->deleted))
51027 + match = match->next;
51028 +
51029 + if (match && !match->deleted)
51030 + return match;
51031 + else
51032 + return NULL;
51033 +}
51034 +
51035 +static struct inodev_entry *
51036 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
51037 +{
51038 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
51039 + struct inodev_entry *match;
51040 +
51041 + match = inodev_set.i_hash[index];
51042 +
51043 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
51044 + match = match->next;
51045 +
51046 + return match;
51047 +}
51048 +
51049 +static void
51050 +insert_inodev_entry(struct inodev_entry *entry)
51051 +{
51052 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
51053 + inodev_set.i_size);
51054 + struct inodev_entry **curr;
51055 +
51056 + entry->prev = NULL;
51057 +
51058 + curr = &inodev_set.i_hash[index];
51059 + if (*curr != NULL)
51060 + (*curr)->prev = entry;
51061 +
51062 + entry->next = *curr;
51063 + *curr = entry;
51064 +
51065 + return;
51066 +}
51067 +
51068 +static void
51069 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
51070 +{
51071 + unsigned int index =
51072 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
51073 + struct acl_role_label **curr;
51074 + struct acl_role_label *tmp, *tmp2;
51075 +
51076 + curr = &acl_role_set.r_hash[index];
51077 +
51078 + /* simple case, slot is empty, just set it to our role */
51079 + if (*curr == NULL) {
51080 + *curr = role;
51081 + } else {
51082 + /* example:
51083 + 1 -> 2 -> 3 (adding 2 -> 3 to here)
51084 + 2 -> 3
51085 + */
51086 + /* first check to see if we can already be reached via this slot */
51087 + tmp = *curr;
51088 + while (tmp && tmp != role)
51089 + tmp = tmp->next;
51090 + if (tmp == role) {
51091 + /* we don't need to add ourselves to this slot's chain */
51092 + return;
51093 + }
51094 + /* we need to add ourselves to this chain, two cases */
51095 + if (role->next == NULL) {
51096 + /* simple case, append the current chain to our role */
51097 + role->next = *curr;
51098 + *curr = role;
51099 + } else {
51100 + /* 1 -> 2 -> 3 -> 4
51101 + 2 -> 3 -> 4
51102 + 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
51103 + */
51104 + /* trickier case: walk our role's chain until we find
51105 + the role for the start of the current slot's chain */
51106 + tmp = role;
51107 + tmp2 = *curr;
51108 + while (tmp->next && tmp->next != tmp2)
51109 + tmp = tmp->next;
51110 + if (tmp->next == tmp2) {
51111 + /* from example above, we found 3, so just
51112 + replace this slot's chain with ours */
51113 + *curr = role;
51114 + } else {
51115 + /* we didn't find a subset of our role's chain
51116 + in the current slot's chain, so append their
51117 + chain to ours, and set us as the first role in
51118 + the slot's chain
51119 +
51120 + we could fold this case with the case above,
51121 + but making it explicit for clarity
51122 + */
51123 + tmp->next = tmp2;
51124 + *curr = role;
51125 + }
51126 + }
51127 + }
51128 +
51129 + return;
51130 +}
51131 +
51132 +static void
51133 +insert_acl_role_label(struct acl_role_label *role)
51134 +{
51135 + int i;
51136 +
51137 + if (role_list == NULL) {
51138 + role_list = role;
51139 + role->prev = NULL;
51140 + } else {
51141 + role->prev = role_list;
51142 + role_list = role;
51143 + }
51144 +
51145 + /* used for hash chains */
51146 + role->next = NULL;
51147 +
51148 + if (role->roletype & GR_ROLE_DOMAIN) {
51149 + for (i = 0; i < role->domain_child_num; i++)
51150 + __insert_acl_role_label(role, role->domain_children[i]);
51151 + } else
51152 + __insert_acl_role_label(role, role->uidgid);
51153 +}
51154 +
51155 +static int
51156 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
51157 +{
51158 + struct name_entry **curr, *nentry;
51159 + struct inodev_entry *ientry;
51160 + unsigned int len = strlen(name);
51161 + unsigned int key = full_name_hash(name, len);
51162 + unsigned int index = key % name_set.n_size;
51163 +
51164 + curr = &name_set.n_hash[index];
51165 +
51166 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
51167 + curr = &((*curr)->next);
51168 +
51169 + if (*curr != NULL)
51170 + return 1;
51171 +
51172 + nentry = acl_alloc(sizeof (struct name_entry));
51173 + if (nentry == NULL)
51174 + return 0;
51175 + ientry = acl_alloc(sizeof (struct inodev_entry));
51176 + if (ientry == NULL)
51177 + return 0;
51178 + ientry->nentry = nentry;
51179 +
51180 + nentry->key = key;
51181 + nentry->name = name;
51182 + nentry->inode = inode;
51183 + nentry->device = device;
51184 + nentry->len = len;
51185 + nentry->deleted = deleted;
51186 +
51187 + nentry->prev = NULL;
51188 + curr = &name_set.n_hash[index];
51189 + if (*curr != NULL)
51190 + (*curr)->prev = nentry;
51191 + nentry->next = *curr;
51192 + *curr = nentry;
51193 +
51194 + /* insert us into the table searchable by inode/dev */
51195 + insert_inodev_entry(ientry);
51196 +
51197 + return 1;
51198 +}
51199 +
51200 +static void
51201 +insert_acl_obj_label(struct acl_object_label *obj,
51202 + struct acl_subject_label *subj)
51203 +{
51204 + unsigned int index =
51205 + fhash(obj->inode, obj->device, subj->obj_hash_size);
51206 + struct acl_object_label **curr;
51207 +
51208 +
51209 + obj->prev = NULL;
51210 +
51211 + curr = &subj->obj_hash[index];
51212 + if (*curr != NULL)
51213 + (*curr)->prev = obj;
51214 +
51215 + obj->next = *curr;
51216 + *curr = obj;
51217 +
51218 + return;
51219 +}
51220 +
51221 +static void
51222 +insert_acl_subj_label(struct acl_subject_label *obj,
51223 + struct acl_role_label *role)
51224 +{
51225 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
51226 + struct acl_subject_label **curr;
51227 +
51228 + obj->prev = NULL;
51229 +
51230 + curr = &role->subj_hash[index];
51231 + if (*curr != NULL)
51232 + (*curr)->prev = obj;
51233 +
51234 + obj->next = *curr;
51235 + *curr = obj;
51236 +
51237 + return;
51238 +}
51239 +
51240 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
51241 +
51242 +static void *
51243 +create_table(__u32 * len, int elementsize)
51244 +{
51245 + unsigned int table_sizes[] = {
51246 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
51247 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
51248 + 4194301, 8388593, 16777213, 33554393, 67108859
51249 + };
51250 + void *newtable = NULL;
51251 + unsigned int pwr = 0;
51252 +
51253 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
51254 + table_sizes[pwr] <= *len)
51255 + pwr++;
51256 +
51257 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
51258 + return newtable;
51259 +
51260 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
51261 + newtable =
51262 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
51263 + else
51264 + newtable = vmalloc(table_sizes[pwr] * elementsize);
51265 +
51266 + *len = table_sizes[pwr];
51267 +
51268 + return newtable;
51269 +}
51270 +
51271 +static int
51272 +init_variables(const struct gr_arg *arg)
51273 +{
51274 + struct task_struct *reaper = init_pid_ns.child_reaper;
51275 + unsigned int stacksize;
51276 +
51277 + subj_map_set.s_size = arg->role_db.num_subjects;
51278 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
51279 + name_set.n_size = arg->role_db.num_objects;
51280 + inodev_set.i_size = arg->role_db.num_objects;
51281 +
51282 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
51283 + !name_set.n_size || !inodev_set.i_size)
51284 + return 1;
51285 +
51286 + if (!gr_init_uidset())
51287 + return 1;
51288 +
51289 + /* set up the stack that holds allocation info */
51290 +
51291 + stacksize = arg->role_db.num_pointers + 5;
51292 +
51293 + if (!acl_alloc_stack_init(stacksize))
51294 + return 1;
51295 +
51296 + /* grab reference for the real root dentry and vfsmount */
51297 + get_fs_root(reaper->fs, &real_root);
51298 +
51299 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51300 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
51301 +#endif
51302 +
51303 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
51304 + if (fakefs_obj_rw == NULL)
51305 + return 1;
51306 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
51307 +
51308 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
51309 + if (fakefs_obj_rwx == NULL)
51310 + return 1;
51311 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
51312 +
51313 + subj_map_set.s_hash =
51314 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
51315 + acl_role_set.r_hash =
51316 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
51317 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
51318 + inodev_set.i_hash =
51319 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
51320 +
51321 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
51322 + !name_set.n_hash || !inodev_set.i_hash)
51323 + return 1;
51324 +
51325 + memset(subj_map_set.s_hash, 0,
51326 + sizeof(struct subject_map *) * subj_map_set.s_size);
51327 + memset(acl_role_set.r_hash, 0,
51328 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
51329 + memset(name_set.n_hash, 0,
51330 + sizeof (struct name_entry *) * name_set.n_size);
51331 + memset(inodev_set.i_hash, 0,
51332 + sizeof (struct inodev_entry *) * inodev_set.i_size);
51333 +
51334 + return 0;
51335 +}
51336 +
51337 +/* free information not needed after startup
51338 + currently contains user->kernel pointer mappings for subjects
51339 +*/
51340 +
51341 +static void
51342 +free_init_variables(void)
51343 +{
51344 + __u32 i;
51345 +
51346 + if (subj_map_set.s_hash) {
51347 + for (i = 0; i < subj_map_set.s_size; i++) {
51348 + if (subj_map_set.s_hash[i]) {
51349 + kfree(subj_map_set.s_hash[i]);
51350 + subj_map_set.s_hash[i] = NULL;
51351 + }
51352 + }
51353 +
51354 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
51355 + PAGE_SIZE)
51356 + kfree(subj_map_set.s_hash);
51357 + else
51358 + vfree(subj_map_set.s_hash);
51359 + }
51360 +
51361 + return;
51362 +}
51363 +
51364 +static void
51365 +free_variables(void)
51366 +{
51367 + struct acl_subject_label *s;
51368 + struct acl_role_label *r;
51369 + struct task_struct *task, *task2;
51370 + unsigned int x;
51371 +
51372 + gr_clear_learn_entries();
51373 +
51374 + read_lock(&tasklist_lock);
51375 + do_each_thread(task2, task) {
51376 + task->acl_sp_role = 0;
51377 + task->acl_role_id = 0;
51378 + task->acl = NULL;
51379 + task->role = NULL;
51380 + } while_each_thread(task2, task);
51381 + read_unlock(&tasklist_lock);
51382 +
51383 + /* release the reference to the real root dentry and vfsmount */
51384 + path_put(&real_root);
51385 + memset(&real_root, 0, sizeof(real_root));
51386 +
51387 + /* free all object hash tables */
51388 +
51389 + FOR_EACH_ROLE_START(r)
51390 + if (r->subj_hash == NULL)
51391 + goto next_role;
51392 + FOR_EACH_SUBJECT_START(r, s, x)
51393 + if (s->obj_hash == NULL)
51394 + break;
51395 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
51396 + kfree(s->obj_hash);
51397 + else
51398 + vfree(s->obj_hash);
51399 + FOR_EACH_SUBJECT_END(s, x)
51400 + FOR_EACH_NESTED_SUBJECT_START(r, s)
51401 + if (s->obj_hash == NULL)
51402 + break;
51403 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
51404 + kfree(s->obj_hash);
51405 + else
51406 + vfree(s->obj_hash);
51407 + FOR_EACH_NESTED_SUBJECT_END(s)
51408 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
51409 + kfree(r->subj_hash);
51410 + else
51411 + vfree(r->subj_hash);
51412 + r->subj_hash = NULL;
51413 +next_role:
51414 + FOR_EACH_ROLE_END(r)
51415 +
51416 + acl_free_all();
51417 +
51418 + if (acl_role_set.r_hash) {
51419 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
51420 + PAGE_SIZE)
51421 + kfree(acl_role_set.r_hash);
51422 + else
51423 + vfree(acl_role_set.r_hash);
51424 + }
51425 + if (name_set.n_hash) {
51426 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
51427 + PAGE_SIZE)
51428 + kfree(name_set.n_hash);
51429 + else
51430 + vfree(name_set.n_hash);
51431 + }
51432 +
51433 + if (inodev_set.i_hash) {
51434 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
51435 + PAGE_SIZE)
51436 + kfree(inodev_set.i_hash);
51437 + else
51438 + vfree(inodev_set.i_hash);
51439 + }
51440 +
51441 + gr_free_uidset();
51442 +
51443 + memset(&name_set, 0, sizeof (struct name_db));
51444 + memset(&inodev_set, 0, sizeof (struct inodev_db));
51445 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
51446 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
51447 +
51448 + default_role = NULL;
51449 + kernel_role = NULL;
51450 + role_list = NULL;
51451 +
51452 + return;
51453 +}
51454 +
51455 +static __u32
51456 +count_user_objs(struct acl_object_label *userp)
51457 +{
51458 + struct acl_object_label o_tmp;
51459 + __u32 num = 0;
51460 +
51461 + while (userp) {
51462 + if (copy_from_user(&o_tmp, userp,
51463 + sizeof (struct acl_object_label)))
51464 + break;
51465 +
51466 + userp = o_tmp.prev;
51467 + num++;
51468 + }
51469 +
51470 + return num;
51471 +}
51472 +
51473 +static struct acl_subject_label *
51474 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
51475 +
51476 +static int
51477 +copy_user_glob(struct acl_object_label *obj)
51478 +{
51479 + struct acl_object_label *g_tmp, **guser;
51480 + unsigned int len;
51481 + char *tmp;
51482 +
51483 + if (obj->globbed == NULL)
51484 + return 0;
51485 +
51486 + guser = &obj->globbed;
51487 + while (*guser) {
51488 + g_tmp = (struct acl_object_label *)
51489 + acl_alloc(sizeof (struct acl_object_label));
51490 + if (g_tmp == NULL)
51491 + return -ENOMEM;
51492 +
51493 + if (copy_from_user(g_tmp, *guser,
51494 + sizeof (struct acl_object_label)))
51495 + return -EFAULT;
51496 +
51497 + len = strnlen_user(g_tmp->filename, PATH_MAX);
51498 +
51499 + if (!len || len >= PATH_MAX)
51500 + return -EINVAL;
51501 +
51502 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51503 + return -ENOMEM;
51504 +
51505 + if (copy_from_user(tmp, g_tmp->filename, len))
51506 + return -EFAULT;
51507 + tmp[len-1] = '\0';
51508 + g_tmp->filename = tmp;
51509 +
51510 + *guser = g_tmp;
51511 + guser = &(g_tmp->next);
51512 + }
51513 +
51514 + return 0;
51515 +}
51516 +
51517 +static int
51518 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
51519 + struct acl_role_label *role)
51520 +{
51521 + struct acl_object_label *o_tmp;
51522 + unsigned int len;
51523 + int ret;
51524 + char *tmp;
51525 +
51526 + while (userp) {
51527 + if ((o_tmp = (struct acl_object_label *)
51528 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
51529 + return -ENOMEM;
51530 +
51531 + if (copy_from_user(o_tmp, userp,
51532 + sizeof (struct acl_object_label)))
51533 + return -EFAULT;
51534 +
51535 + userp = o_tmp->prev;
51536 +
51537 + len = strnlen_user(o_tmp->filename, PATH_MAX);
51538 +
51539 + if (!len || len >= PATH_MAX)
51540 + return -EINVAL;
51541 +
51542 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51543 + return -ENOMEM;
51544 +
51545 + if (copy_from_user(tmp, o_tmp->filename, len))
51546 + return -EFAULT;
51547 + tmp[len-1] = '\0';
51548 + o_tmp->filename = tmp;
51549 +
51550 + insert_acl_obj_label(o_tmp, subj);
51551 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
51552 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
51553 + return -ENOMEM;
51554 +
51555 + ret = copy_user_glob(o_tmp);
51556 + if (ret)
51557 + return ret;
51558 +
51559 + if (o_tmp->nested) {
51560 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
51561 + if (IS_ERR(o_tmp->nested))
51562 + return PTR_ERR(o_tmp->nested);
51563 +
51564 + /* insert into nested subject list */
51565 + o_tmp->nested->next = role->hash->first;
51566 + role->hash->first = o_tmp->nested;
51567 + }
51568 + }
51569 +
51570 + return 0;
51571 +}
51572 +
51573 +static __u32
51574 +count_user_subjs(struct acl_subject_label *userp)
51575 +{
51576 + struct acl_subject_label s_tmp;
51577 + __u32 num = 0;
51578 +
51579 + while (userp) {
51580 + if (copy_from_user(&s_tmp, userp,
51581 + sizeof (struct acl_subject_label)))
51582 + break;
51583 +
51584 + userp = s_tmp.prev;
51585 + /* do not count nested subjects against this count, since
51586 + they are not included in the hash table, but are
51587 + attached to objects. We have already counted
51588 + the subjects in userspace for the allocation
51589 + stack
51590 + */
51591 + if (!(s_tmp.mode & GR_NESTED))
51592 + num++;
51593 + }
51594 +
51595 + return num;
51596 +}
51597 +
51598 +static int
51599 +copy_user_allowedips(struct acl_role_label *rolep)
51600 +{
51601 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
51602 +
51603 + ruserip = rolep->allowed_ips;
51604 +
51605 + while (ruserip) {
51606 + rlast = rtmp;
51607 +
51608 + if ((rtmp = (struct role_allowed_ip *)
51609 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
51610 + return -ENOMEM;
51611 +
51612 + if (copy_from_user(rtmp, ruserip,
51613 + sizeof (struct role_allowed_ip)))
51614 + return -EFAULT;
51615 +
51616 + ruserip = rtmp->prev;
51617 +
51618 + if (!rlast) {
51619 + rtmp->prev = NULL;
51620 + rolep->allowed_ips = rtmp;
51621 + } else {
51622 + rlast->next = rtmp;
51623 + rtmp->prev = rlast;
51624 + }
51625 +
51626 + if (!ruserip)
51627 + rtmp->next = NULL;
51628 + }
51629 +
51630 + return 0;
51631 +}
51632 +
51633 +static int
51634 +copy_user_transitions(struct acl_role_label *rolep)
51635 +{
51636 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
51637 +
51638 + unsigned int len;
51639 + char *tmp;
51640 +
51641 + rusertp = rolep->transitions;
51642 +
51643 + while (rusertp) {
51644 + rlast = rtmp;
51645 +
51646 + if ((rtmp = (struct role_transition *)
51647 + acl_alloc(sizeof (struct role_transition))) == NULL)
51648 + return -ENOMEM;
51649 +
51650 + if (copy_from_user(rtmp, rusertp,
51651 + sizeof (struct role_transition)))
51652 + return -EFAULT;
51653 +
51654 + rusertp = rtmp->prev;
51655 +
51656 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
51657 +
51658 + if (!len || len >= GR_SPROLE_LEN)
51659 + return -EINVAL;
51660 +
51661 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51662 + return -ENOMEM;
51663 +
51664 + if (copy_from_user(tmp, rtmp->rolename, len))
51665 + return -EFAULT;
51666 + tmp[len-1] = '\0';
51667 + rtmp->rolename = tmp;
51668 +
51669 + if (!rlast) {
51670 + rtmp->prev = NULL;
51671 + rolep->transitions = rtmp;
51672 + } else {
51673 + rlast->next = rtmp;
51674 + rtmp->prev = rlast;
51675 + }
51676 +
51677 + if (!rusertp)
51678 + rtmp->next = NULL;
51679 + }
51680 +
51681 + return 0;
51682 +}
51683 +
51684 +static struct acl_subject_label *
51685 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
51686 +{
51687 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
51688 + unsigned int len;
51689 + char *tmp;
51690 + __u32 num_objs;
51691 + struct acl_ip_label **i_tmp, *i_utmp2;
51692 + struct gr_hash_struct ghash;
51693 + struct subject_map *subjmap;
51694 + unsigned int i_num;
51695 + int err;
51696 +
51697 + s_tmp = lookup_subject_map(userp);
51698 +
51699 + /* we've already copied this subject into the kernel, just return
51700 + the reference to it, and don't copy it over again
51701 + */
51702 + if (s_tmp)
51703 + return(s_tmp);
51704 +
51705 + if ((s_tmp = (struct acl_subject_label *)
51706 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
51707 + return ERR_PTR(-ENOMEM);
51708 +
51709 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
51710 + if (subjmap == NULL)
51711 + return ERR_PTR(-ENOMEM);
51712 +
51713 + subjmap->user = userp;
51714 + subjmap->kernel = s_tmp;
51715 + insert_subj_map_entry(subjmap);
51716 +
51717 + if (copy_from_user(s_tmp, userp,
51718 + sizeof (struct acl_subject_label)))
51719 + return ERR_PTR(-EFAULT);
51720 +
51721 + len = strnlen_user(s_tmp->filename, PATH_MAX);
51722 +
51723 + if (!len || len >= PATH_MAX)
51724 + return ERR_PTR(-EINVAL);
51725 +
51726 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51727 + return ERR_PTR(-ENOMEM);
51728 +
51729 + if (copy_from_user(tmp, s_tmp->filename, len))
51730 + return ERR_PTR(-EFAULT);
51731 + tmp[len-1] = '\0';
51732 + s_tmp->filename = tmp;
51733 +
51734 + if (!strcmp(s_tmp->filename, "/"))
51735 + role->root_label = s_tmp;
51736 +
51737 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
51738 + return ERR_PTR(-EFAULT);
51739 +
51740 + /* copy user and group transition tables */
51741 +
51742 + if (s_tmp->user_trans_num) {
51743 + uid_t *uidlist;
51744 +
51745 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
51746 + if (uidlist == NULL)
51747 + return ERR_PTR(-ENOMEM);
51748 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
51749 + return ERR_PTR(-EFAULT);
51750 +
51751 + s_tmp->user_transitions = uidlist;
51752 + }
51753 +
51754 + if (s_tmp->group_trans_num) {
51755 + gid_t *gidlist;
51756 +
51757 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
51758 + if (gidlist == NULL)
51759 + return ERR_PTR(-ENOMEM);
51760 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
51761 + return ERR_PTR(-EFAULT);
51762 +
51763 + s_tmp->group_transitions = gidlist;
51764 + }
51765 +
51766 + /* set up object hash table */
51767 + num_objs = count_user_objs(ghash.first);
51768 +
51769 + s_tmp->obj_hash_size = num_objs;
51770 + s_tmp->obj_hash =
51771 + (struct acl_object_label **)
51772 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
51773 +
51774 + if (!s_tmp->obj_hash)
51775 + return ERR_PTR(-ENOMEM);
51776 +
51777 + memset(s_tmp->obj_hash, 0,
51778 + s_tmp->obj_hash_size *
51779 + sizeof (struct acl_object_label *));
51780 +
51781 + /* add in objects */
51782 + err = copy_user_objs(ghash.first, s_tmp, role);
51783 +
51784 + if (err)
51785 + return ERR_PTR(err);
51786 +
51787 + /* set pointer for parent subject */
51788 + if (s_tmp->parent_subject) {
51789 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
51790 +
51791 + if (IS_ERR(s_tmp2))
51792 + return s_tmp2;
51793 +
51794 + s_tmp->parent_subject = s_tmp2;
51795 + }
51796 +
51797 + /* add in ip acls */
51798 +
51799 + if (!s_tmp->ip_num) {
51800 + s_tmp->ips = NULL;
51801 + goto insert;
51802 + }
51803 +
51804 + i_tmp =
51805 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
51806 + sizeof (struct acl_ip_label *));
51807 +
51808 + if (!i_tmp)
51809 + return ERR_PTR(-ENOMEM);
51810 +
51811 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
51812 + *(i_tmp + i_num) =
51813 + (struct acl_ip_label *)
51814 + acl_alloc(sizeof (struct acl_ip_label));
51815 + if (!*(i_tmp + i_num))
51816 + return ERR_PTR(-ENOMEM);
51817 +
51818 + if (copy_from_user
51819 + (&i_utmp2, s_tmp->ips + i_num,
51820 + sizeof (struct acl_ip_label *)))
51821 + return ERR_PTR(-EFAULT);
51822 +
51823 + if (copy_from_user
51824 + (*(i_tmp + i_num), i_utmp2,
51825 + sizeof (struct acl_ip_label)))
51826 + return ERR_PTR(-EFAULT);
51827 +
51828 + if ((*(i_tmp + i_num))->iface == NULL)
51829 + continue;
51830 +
51831 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
51832 + if (!len || len >= IFNAMSIZ)
51833 + return ERR_PTR(-EINVAL);
51834 + tmp = acl_alloc(len);
51835 + if (tmp == NULL)
51836 + return ERR_PTR(-ENOMEM);
51837 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
51838 + return ERR_PTR(-EFAULT);
51839 + (*(i_tmp + i_num))->iface = tmp;
51840 + }
51841 +
51842 + s_tmp->ips = i_tmp;
51843 +
51844 +insert:
51845 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
51846 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
51847 + return ERR_PTR(-ENOMEM);
51848 +
51849 + return s_tmp;
51850 +}
51851 +
51852 +static int
51853 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
51854 +{
51855 + struct acl_subject_label s_pre;
51856 + struct acl_subject_label * ret;
51857 + int err;
51858 +
51859 + while (userp) {
51860 + if (copy_from_user(&s_pre, userp,
51861 + sizeof (struct acl_subject_label)))
51862 + return -EFAULT;
51863 +
51864 + /* do not add nested subjects here, add
51865 + while parsing objects
51866 + */
51867 +
51868 + if (s_pre.mode & GR_NESTED) {
51869 + userp = s_pre.prev;
51870 + continue;
51871 + }
51872 +
51873 + ret = do_copy_user_subj(userp, role);
51874 +
51875 + err = PTR_ERR(ret);
51876 + if (IS_ERR(ret))
51877 + return err;
51878 +
51879 + insert_acl_subj_label(ret, role);
51880 +
51881 + userp = s_pre.prev;
51882 + }
51883 +
51884 + return 0;
51885 +}
51886 +
51887 +static int
51888 +copy_user_acl(struct gr_arg *arg)
51889 +{
51890 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
51891 + struct sprole_pw *sptmp;
51892 + struct gr_hash_struct *ghash;
51893 + uid_t *domainlist;
51894 + unsigned int r_num;
51895 + unsigned int len;
51896 + char *tmp;
51897 + int err = 0;
51898 + __u16 i;
51899 + __u32 num_subjs;
51900 +
51901 + /* we need a default and kernel role */
51902 + if (arg->role_db.num_roles < 2)
51903 + return -EINVAL;
51904 +
51905 + /* copy special role authentication info from userspace */
51906 +
51907 + num_sprole_pws = arg->num_sprole_pws;
51908 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
51909 +
51910 + if (!acl_special_roles && num_sprole_pws)
51911 + return -ENOMEM;
51912 +
51913 + for (i = 0; i < num_sprole_pws; i++) {
51914 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
51915 + if (!sptmp)
51916 + return -ENOMEM;
51917 + if (copy_from_user(sptmp, arg->sprole_pws + i,
51918 + sizeof (struct sprole_pw)))
51919 + return -EFAULT;
51920 +
51921 + len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
51922 +
51923 + if (!len || len >= GR_SPROLE_LEN)
51924 + return -EINVAL;
51925 +
51926 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51927 + return -ENOMEM;
51928 +
51929 + if (copy_from_user(tmp, sptmp->rolename, len))
51930 + return -EFAULT;
51931 +
51932 + tmp[len-1] = '\0';
51933 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51934 + printk(KERN_ALERT "Copying special role %s\n", tmp);
51935 +#endif
51936 + sptmp->rolename = tmp;
51937 + acl_special_roles[i] = sptmp;
51938 + }
51939 +
51940 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
51941 +
51942 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
51943 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
51944 +
51945 + if (!r_tmp)
51946 + return -ENOMEM;
51947 +
51948 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
51949 + sizeof (struct acl_role_label *)))
51950 + return -EFAULT;
51951 +
51952 + if (copy_from_user(r_tmp, r_utmp2,
51953 + sizeof (struct acl_role_label)))
51954 + return -EFAULT;
51955 +
51956 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
51957 +
51958 + if (!len || len >= PATH_MAX)
51959 + return -EINVAL;
51960 +
51961 + if ((tmp = (char *) acl_alloc(len)) == NULL)
51962 + return -ENOMEM;
51963 +
51964 + if (copy_from_user(tmp, r_tmp->rolename, len))
51965 + return -EFAULT;
51966 +
51967 + tmp[len-1] = '\0';
51968 + r_tmp->rolename = tmp;
51969 +
51970 + if (!strcmp(r_tmp->rolename, "default")
51971 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
51972 + default_role = r_tmp;
51973 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
51974 + kernel_role = r_tmp;
51975 + }
51976 +
51977 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
51978 + return -ENOMEM;
51979 +
51980 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
51981 + return -EFAULT;
51982 +
51983 + r_tmp->hash = ghash;
51984 +
51985 + num_subjs = count_user_subjs(r_tmp->hash->first);
51986 +
51987 + r_tmp->subj_hash_size = num_subjs;
51988 + r_tmp->subj_hash =
51989 + (struct acl_subject_label **)
51990 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
51991 +
51992 + if (!r_tmp->subj_hash)
51993 + return -ENOMEM;
51994 +
51995 + err = copy_user_allowedips(r_tmp);
51996 + if (err)
51997 + return err;
51998 +
51999 + /* copy domain info */
52000 + if (r_tmp->domain_children != NULL) {
52001 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
52002 + if (domainlist == NULL)
52003 + return -ENOMEM;
52004 +
52005 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
52006 + return -EFAULT;
52007 +
52008 + r_tmp->domain_children = domainlist;
52009 + }
52010 +
52011 + err = copy_user_transitions(r_tmp);
52012 + if (err)
52013 + return err;
52014 +
52015 + memset(r_tmp->subj_hash, 0,
52016 + r_tmp->subj_hash_size *
52017 + sizeof (struct acl_subject_label *));
52018 +
52019 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
52020 +
52021 + if (err)
52022 + return err;
52023 +
52024 + /* set nested subject list to null */
52025 + r_tmp->hash->first = NULL;
52026 +
52027 + insert_acl_role_label(r_tmp);
52028 + }
52029 +
52030 + if (default_role == NULL || kernel_role == NULL)
52031 + return -EINVAL;
52032 +
52033 + return err;
52034 +}
52035 +
52036 +static int
52037 +gracl_init(struct gr_arg *args)
52038 +{
52039 + int error = 0;
52040 +
52041 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
52042 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
52043 +
52044 + if (init_variables(args)) {
52045 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
52046 + error = -ENOMEM;
52047 + free_variables();
52048 + goto out;
52049 + }
52050 +
52051 + error = copy_user_acl(args);
52052 + free_init_variables();
52053 + if (error) {
52054 + free_variables();
52055 + goto out;
52056 + }
52057 +
52058 + if ((error = gr_set_acls(0))) {
52059 + free_variables();
52060 + goto out;
52061 + }
52062 +
52063 + pax_open_kernel();
52064 + gr_status |= GR_READY;
52065 + pax_close_kernel();
52066 +
52067 + out:
52068 + return error;
52069 +}
52070 +
52071 +/* derived from glibc fnmatch() 0: match, 1: no match*/
52072 +
52073 +static int
52074 +glob_match(const char *p, const char *n)
52075 +{
52076 + char c;
52077 +
52078 + while ((c = *p++) != '\0') {
52079 + switch (c) {
52080 + case '?':
52081 + if (*n == '\0')
52082 + return 1;
52083 + else if (*n == '/')
52084 + return 1;
52085 + break;
52086 + case '\\':
52087 + if (*n != c)
52088 + return 1;
52089 + break;
52090 + case '*':
52091 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
52092 + if (*n == '/')
52093 + return 1;
52094 + else if (c == '?') {
52095 + if (*n == '\0')
52096 + return 1;
52097 + else
52098 + ++n;
52099 + }
52100 + }
52101 + if (c == '\0') {
52102 + return 0;
52103 + } else {
52104 + const char *endp;
52105 +
52106 + if ((endp = strchr(n, '/')) == NULL)
52107 + endp = n + strlen(n);
52108 +
52109 + if (c == '[') {
52110 + for (--p; n < endp; ++n)
52111 + if (!glob_match(p, n))
52112 + return 0;
52113 + } else if (c == '/') {
52114 + while (*n != '\0' && *n != '/')
52115 + ++n;
52116 + if (*n == '/' && !glob_match(p, n + 1))
52117 + return 0;
52118 + } else {
52119 + for (--p; n < endp; ++n)
52120 + if (*n == c && !glob_match(p, n))
52121 + return 0;
52122 + }
52123 +
52124 + return 1;
52125 + }
52126 + case '[':
52127 + {
52128 + int not;
52129 + char cold;
52130 +
52131 + if (*n == '\0' || *n == '/')
52132 + return 1;
52133 +
52134 + not = (*p == '!' || *p == '^');
52135 + if (not)
52136 + ++p;
52137 +
52138 + c = *p++;
52139 + for (;;) {
52140 + unsigned char fn = (unsigned char)*n;
52141 +
52142 + if (c == '\0')
52143 + return 1;
52144 + else {
52145 + if (c == fn)
52146 + goto matched;
52147 + cold = c;
52148 + c = *p++;
52149 +
52150 + if (c == '-' && *p != ']') {
52151 + unsigned char cend = *p++;
52152 +
52153 + if (cend == '\0')
52154 + return 1;
52155 +
52156 + if (cold <= fn && fn <= cend)
52157 + goto matched;
52158 +
52159 + c = *p++;
52160 + }
52161 + }
52162 +
52163 + if (c == ']')
52164 + break;
52165 + }
52166 + if (!not)
52167 + return 1;
52168 + break;
52169 + matched:
52170 + while (c != ']') {
52171 + if (c == '\0')
52172 + return 1;
52173 +
52174 + c = *p++;
52175 + }
52176 + if (not)
52177 + return 1;
52178 + }
52179 + break;
52180 + default:
52181 + if (c != *n)
52182 + return 1;
52183 + }
52184 +
52185 + ++n;
52186 + }
52187 +
52188 + if (*n == '\0')
52189 + return 0;
52190 +
52191 + if (*n == '/')
52192 + return 0;
52193 +
52194 + return 1;
52195 +}
52196 +
52197 +static struct acl_object_label *
52198 +chk_glob_label(struct acl_object_label *globbed,
52199 + const struct dentry *dentry, const struct vfsmount *mnt, char **path)
52200 +{
52201 + struct acl_object_label *tmp;
52202 +
52203 + if (*path == NULL)
52204 + *path = gr_to_filename_nolock(dentry, mnt);
52205 +
52206 + tmp = globbed;
52207 +
52208 + while (tmp) {
52209 + if (!glob_match(tmp->filename, *path))
52210 + return tmp;
52211 + tmp = tmp->next;
52212 + }
52213 +
52214 + return NULL;
52215 +}
52216 +
52217 +static struct acl_object_label *
52218 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
52219 + const ino_t curr_ino, const dev_t curr_dev,
52220 + const struct acl_subject_label *subj, char **path, const int checkglob)
52221 +{
52222 + struct acl_subject_label *tmpsubj;
52223 + struct acl_object_label *retval;
52224 + struct acl_object_label *retval2;
52225 +
52226 + tmpsubj = (struct acl_subject_label *) subj;
52227 + read_lock(&gr_inode_lock);
52228 + do {
52229 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
52230 + if (retval) {
52231 + if (checkglob && retval->globbed) {
52232 + retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
52233 + if (retval2)
52234 + retval = retval2;
52235 + }
52236 + break;
52237 + }
52238 + } while ((tmpsubj = tmpsubj->parent_subject));
52239 + read_unlock(&gr_inode_lock);
52240 +
52241 + return retval;
52242 +}
52243 +
52244 +static __inline__ struct acl_object_label *
52245 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
52246 + struct dentry *curr_dentry,
52247 + const struct acl_subject_label *subj, char **path, const int checkglob)
52248 +{
52249 + int newglob = checkglob;
52250 + ino_t inode;
52251 + dev_t device;
52252 +
52253 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
52254 + as we don't want a / * rule to match instead of the / object
52255 + don't do this for create lookups that call this function though, since they're looking up
52256 + on the parent and thus need globbing checks on all paths
52257 + */
52258 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
52259 + newglob = GR_NO_GLOB;
52260 +
52261 + spin_lock(&curr_dentry->d_lock);
52262 + inode = curr_dentry->d_inode->i_ino;
52263 + device = __get_dev(curr_dentry);
52264 + spin_unlock(&curr_dentry->d_lock);
52265 +
52266 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
52267 +}
52268 +
52269 +static struct acl_object_label *
52270 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52271 + const struct acl_subject_label *subj, char *path, const int checkglob)
52272 +{
52273 + struct dentry *dentry = (struct dentry *) l_dentry;
52274 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
52275 + struct mount *real_mnt = real_mount(mnt);
52276 + struct acl_object_label *retval;
52277 + struct dentry *parent;
52278 +
52279 + write_seqlock(&rename_lock);
52280 + br_read_lock(vfsmount_lock);
52281 +
52282 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
52283 +#ifdef CONFIG_NET
52284 + mnt == sock_mnt ||
52285 +#endif
52286 +#ifdef CONFIG_HUGETLBFS
52287 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
52288 +#endif
52289 + /* ignore Eric Biederman */
52290 + IS_PRIVATE(l_dentry->d_inode))) {
52291 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
52292 + goto out;
52293 + }
52294 +
52295 + for (;;) {
52296 + if (dentry == real_root.dentry && mnt == real_root.mnt)
52297 + break;
52298 +
52299 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
52300 + if (!mnt_has_parent(real_mnt))
52301 + break;
52302 +
52303 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52304 + if (retval != NULL)
52305 + goto out;
52306 +
52307 + dentry = real_mnt->mnt_mountpoint;
52308 + real_mnt = real_mnt->mnt_parent;
52309 + mnt = &real_mnt->mnt;
52310 + continue;
52311 + }
52312 +
52313 + parent = dentry->d_parent;
52314 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52315 + if (retval != NULL)
52316 + goto out;
52317 +
52318 + dentry = parent;
52319 + }
52320 +
52321 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52322 +
52323 + /* real_root is pinned so we don't have to hold a reference */
52324 + if (retval == NULL)
52325 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
52326 +out:
52327 + br_read_unlock(vfsmount_lock);
52328 + write_sequnlock(&rename_lock);
52329 +
52330 + BUG_ON(retval == NULL);
52331 +
52332 + return retval;
52333 +}
52334 +
52335 +static __inline__ struct acl_object_label *
52336 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52337 + const struct acl_subject_label *subj)
52338 +{
52339 + char *path = NULL;
52340 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
52341 +}
52342 +
52343 +static __inline__ struct acl_object_label *
52344 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52345 + const struct acl_subject_label *subj)
52346 +{
52347 + char *path = NULL;
52348 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
52349 +}
52350 +
52351 +static __inline__ struct acl_object_label *
52352 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52353 + const struct acl_subject_label *subj, char *path)
52354 +{
52355 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
52356 +}
52357 +
52358 +static struct acl_subject_label *
52359 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52360 + const struct acl_role_label *role)
52361 +{
52362 + struct dentry *dentry = (struct dentry *) l_dentry;
52363 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
52364 + struct mount *real_mnt = real_mount(mnt);
52365 + struct acl_subject_label *retval;
52366 + struct dentry *parent;
52367 +
52368 + write_seqlock(&rename_lock);
52369 + br_read_lock(vfsmount_lock);
52370 +
52371 + for (;;) {
52372 + if (dentry == real_root.dentry && mnt == real_root.mnt)
52373 + break;
52374 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
52375 + if (!mnt_has_parent(real_mnt))
52376 + break;
52377 +
52378 + spin_lock(&dentry->d_lock);
52379 + read_lock(&gr_inode_lock);
52380 + retval =
52381 + lookup_acl_subj_label(dentry->d_inode->i_ino,
52382 + __get_dev(dentry), role);
52383 + read_unlock(&gr_inode_lock);
52384 + spin_unlock(&dentry->d_lock);
52385 + if (retval != NULL)
52386 + goto out;
52387 +
52388 + dentry = real_mnt->mnt_mountpoint;
52389 + real_mnt = real_mnt->mnt_parent;
52390 + mnt = &real_mnt->mnt;
52391 + continue;
52392 + }
52393 +
52394 + spin_lock(&dentry->d_lock);
52395 + read_lock(&gr_inode_lock);
52396 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
52397 + __get_dev(dentry), role);
52398 + read_unlock(&gr_inode_lock);
52399 + parent = dentry->d_parent;
52400 + spin_unlock(&dentry->d_lock);
52401 +
52402 + if (retval != NULL)
52403 + goto out;
52404 +
52405 + dentry = parent;
52406 + }
52407 +
52408 + spin_lock(&dentry->d_lock);
52409 + read_lock(&gr_inode_lock);
52410 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
52411 + __get_dev(dentry), role);
52412 + read_unlock(&gr_inode_lock);
52413 + spin_unlock(&dentry->d_lock);
52414 +
52415 + if (unlikely(retval == NULL)) {
52416 + /* real_root is pinned, we don't need to hold a reference */
52417 + read_lock(&gr_inode_lock);
52418 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
52419 + __get_dev(real_root.dentry), role);
52420 + read_unlock(&gr_inode_lock);
52421 + }
52422 +out:
52423 + br_read_unlock(vfsmount_lock);
52424 + write_sequnlock(&rename_lock);
52425 +
52426 + BUG_ON(retval == NULL);
52427 +
52428 + return retval;
52429 +}
52430 +
52431 +static void
52432 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
52433 +{
52434 + struct task_struct *task = current;
52435 + const struct cred *cred = current_cred();
52436 +
52437 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
52438 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52439 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
52440 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
52441 +
52442 + return;
52443 +}
52444 +
52445 +static void
52446 +gr_log_learn_id_change(const char type, const unsigned int real,
52447 + const unsigned int effective, const unsigned int fs)
52448 +{
52449 + struct task_struct *task = current;
52450 + const struct cred *cred = current_cred();
52451 +
52452 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
52453 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52454 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
52455 + type, real, effective, fs, &task->signal->saved_ip);
52456 +
52457 + return;
52458 +}
52459 +
52460 +__u32
52461 +gr_search_file(const struct dentry * dentry, const __u32 mode,
52462 + const struct vfsmount * mnt)
52463 +{
52464 + __u32 retval = mode;
52465 + struct acl_subject_label *curracl;
52466 + struct acl_object_label *currobj;
52467 +
52468 + if (unlikely(!(gr_status & GR_READY)))
52469 + return (mode & ~GR_AUDITS);
52470 +
52471 + curracl = current->acl;
52472 +
52473 + currobj = chk_obj_label(dentry, mnt, curracl);
52474 + retval = currobj->mode & mode;
52475 +
52476 + /* if we're opening a specified transfer file for writing
52477 + (e.g. /dev/initctl), then transfer our role to init
52478 + */
52479 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
52480 + current->role->roletype & GR_ROLE_PERSIST)) {
52481 + struct task_struct *task = init_pid_ns.child_reaper;
52482 +
52483 + if (task->role != current->role) {
52484 + task->acl_sp_role = 0;
52485 + task->acl_role_id = current->acl_role_id;
52486 + task->role = current->role;
52487 + rcu_read_lock();
52488 + read_lock(&grsec_exec_file_lock);
52489 + gr_apply_subject_to_task(task);
52490 + read_unlock(&grsec_exec_file_lock);
52491 + rcu_read_unlock();
52492 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
52493 + }
52494 + }
52495 +
52496 + if (unlikely
52497 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
52498 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
52499 + __u32 new_mode = mode;
52500 +
52501 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52502 +
52503 + retval = new_mode;
52504 +
52505 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
52506 + new_mode |= GR_INHERIT;
52507 +
52508 + if (!(mode & GR_NOLEARN))
52509 + gr_log_learn(dentry, mnt, new_mode);
52510 + }
52511 +
52512 + return retval;
52513 +}
52514 +
52515 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
52516 + const struct dentry *parent,
52517 + const struct vfsmount *mnt)
52518 +{
52519 + struct name_entry *match;
52520 + struct acl_object_label *matchpo;
52521 + struct acl_subject_label *curracl;
52522 + char *path;
52523 +
52524 + if (unlikely(!(gr_status & GR_READY)))
52525 + return NULL;
52526 +
52527 + preempt_disable();
52528 + path = gr_to_filename_rbac(new_dentry, mnt);
52529 + match = lookup_name_entry_create(path);
52530 +
52531 + curracl = current->acl;
52532 +
52533 + if (match) {
52534 + read_lock(&gr_inode_lock);
52535 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
52536 + read_unlock(&gr_inode_lock);
52537 +
52538 + if (matchpo) {
52539 + preempt_enable();
52540 + return matchpo;
52541 + }
52542 + }
52543 +
52544 + // lookup parent
52545 +
52546 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
52547 +
52548 + preempt_enable();
52549 + return matchpo;
52550 +}
52551 +
52552 +__u32
52553 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
52554 + const struct vfsmount * mnt, const __u32 mode)
52555 +{
52556 + struct acl_object_label *matchpo;
52557 + __u32 retval;
52558 +
52559 + if (unlikely(!(gr_status & GR_READY)))
52560 + return (mode & ~GR_AUDITS);
52561 +
52562 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
52563 +
52564 + retval = matchpo->mode & mode;
52565 +
52566 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
52567 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
52568 + __u32 new_mode = mode;
52569 +
52570 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52571 +
52572 + gr_log_learn(new_dentry, mnt, new_mode);
52573 + return new_mode;
52574 + }
52575 +
52576 + return retval;
52577 +}
52578 +
52579 +__u32
52580 +gr_check_link(const struct dentry * new_dentry,
52581 + const struct dentry * parent_dentry,
52582 + const struct vfsmount * parent_mnt,
52583 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
52584 +{
52585 + struct acl_object_label *obj;
52586 + __u32 oldmode, newmode;
52587 + __u32 needmode;
52588 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
52589 + GR_DELETE | GR_INHERIT;
52590 +
52591 + if (unlikely(!(gr_status & GR_READY)))
52592 + return (GR_CREATE | GR_LINK);
52593 +
52594 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
52595 + oldmode = obj->mode;
52596 +
52597 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
52598 + newmode = obj->mode;
52599 +
52600 + needmode = newmode & checkmodes;
52601 +
52602 + // old name for hardlink must have at least the permissions of the new name
52603 + if ((oldmode & needmode) != needmode)
52604 + goto bad;
52605 +
52606 + // if old name had restrictions/auditing, make sure the new name does as well
52607 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
52608 +
52609 + // don't allow hardlinking of suid/sgid files without permission
52610 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
52611 + needmode |= GR_SETID;
52612 +
52613 + if ((newmode & needmode) != needmode)
52614 + goto bad;
52615 +
52616 + // enforce minimum permissions
52617 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
52618 + return newmode;
52619 +bad:
52620 + needmode = oldmode;
52621 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
52622 + needmode |= GR_SETID;
52623 +
52624 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
52625 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
52626 + return (GR_CREATE | GR_LINK);
52627 + } else if (newmode & GR_SUPPRESS)
52628 + return GR_SUPPRESS;
52629 + else
52630 + return 0;
52631 +}
52632 +
52633 +int
52634 +gr_check_hidden_task(const struct task_struct *task)
52635 +{
52636 + if (unlikely(!(gr_status & GR_READY)))
52637 + return 0;
52638 +
52639 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
52640 + return 1;
52641 +
52642 + return 0;
52643 +}
52644 +
52645 +int
52646 +gr_check_protected_task(const struct task_struct *task)
52647 +{
52648 + if (unlikely(!(gr_status & GR_READY) || !task))
52649 + return 0;
52650 +
52651 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
52652 + task->acl != current->acl)
52653 + return 1;
52654 +
52655 + return 0;
52656 +}
52657 +
52658 +int
52659 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
52660 +{
52661 + struct task_struct *p;
52662 + int ret = 0;
52663 +
52664 + if (unlikely(!(gr_status & GR_READY) || !pid))
52665 + return ret;
52666 +
52667 + read_lock(&tasklist_lock);
52668 + do_each_pid_task(pid, type, p) {
52669 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
52670 + p->acl != current->acl) {
52671 + ret = 1;
52672 + goto out;
52673 + }
52674 + } while_each_pid_task(pid, type, p);
52675 +out:
52676 + read_unlock(&tasklist_lock);
52677 +
52678 + return ret;
52679 +}
52680 +
52681 +void
52682 +gr_copy_label(struct task_struct *tsk)
52683 +{
52684 + tsk->signal->used_accept = 0;
52685 + tsk->acl_sp_role = 0;
52686 + tsk->acl_role_id = current->acl_role_id;
52687 + tsk->acl = current->acl;
52688 + tsk->role = current->role;
52689 + tsk->signal->curr_ip = current->signal->curr_ip;
52690 + tsk->signal->saved_ip = current->signal->saved_ip;
52691 + if (current->exec_file)
52692 + get_file(current->exec_file);
52693 + tsk->exec_file = current->exec_file;
52694 + tsk->is_writable = current->is_writable;
52695 + if (unlikely(current->signal->used_accept)) {
52696 + current->signal->curr_ip = 0;
52697 + current->signal->saved_ip = 0;
52698 + }
52699 +
52700 + return;
52701 +}
52702 +
52703 +static void
52704 +gr_set_proc_res(struct task_struct *task)
52705 +{
52706 + struct acl_subject_label *proc;
52707 + unsigned short i;
52708 +
52709 + proc = task->acl;
52710 +
52711 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
52712 + return;
52713 +
52714 + for (i = 0; i < RLIM_NLIMITS; i++) {
52715 + if (!(proc->resmask & (1 << i)))
52716 + continue;
52717 +
52718 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
52719 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
52720 + }
52721 +
52722 + return;
52723 +}
52724 +
52725 +extern int __gr_process_user_ban(struct user_struct *user);
52726 +
52727 +int
52728 +gr_check_user_change(int real, int effective, int fs)
52729 +{
52730 + unsigned int i;
52731 + __u16 num;
52732 + uid_t *uidlist;
52733 + int curuid;
52734 + int realok = 0;
52735 + int effectiveok = 0;
52736 + int fsok = 0;
52737 +
52738 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
52739 + struct user_struct *user;
52740 +
52741 + if (real == -1)
52742 + goto skipit;
52743 +
52744 + user = find_user(real);
52745 + if (user == NULL)
52746 + goto skipit;
52747 +
52748 + if (__gr_process_user_ban(user)) {
52749 + /* for find_user */
52750 + free_uid(user);
52751 + return 1;
52752 + }
52753 +
52754 + /* for find_user */
52755 + free_uid(user);
52756 +
52757 +skipit:
52758 +#endif
52759 +
52760 + if (unlikely(!(gr_status & GR_READY)))
52761 + return 0;
52762 +
52763 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52764 + gr_log_learn_id_change('u', real, effective, fs);
52765 +
52766 + num = current->acl->user_trans_num;
52767 + uidlist = current->acl->user_transitions;
52768 +
52769 + if (uidlist == NULL)
52770 + return 0;
52771 +
52772 + if (real == -1)
52773 + realok = 1;
52774 + if (effective == -1)
52775 + effectiveok = 1;
52776 + if (fs == -1)
52777 + fsok = 1;
52778 +
52779 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
52780 + for (i = 0; i < num; i++) {
52781 + curuid = (int)uidlist[i];
52782 + if (real == curuid)
52783 + realok = 1;
52784 + if (effective == curuid)
52785 + effectiveok = 1;
52786 + if (fs == curuid)
52787 + fsok = 1;
52788 + }
52789 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
52790 + for (i = 0; i < num; i++) {
52791 + curuid = (int)uidlist[i];
52792 + if (real == curuid)
52793 + break;
52794 + if (effective == curuid)
52795 + break;
52796 + if (fs == curuid)
52797 + break;
52798 + }
52799 + /* not in deny list */
52800 + if (i == num) {
52801 + realok = 1;
52802 + effectiveok = 1;
52803 + fsok = 1;
52804 + }
52805 + }
52806 +
52807 + if (realok && effectiveok && fsok)
52808 + return 0;
52809 + else {
52810 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
52811 + return 1;
52812 + }
52813 +}
52814 +
52815 +int
52816 +gr_check_group_change(int real, int effective, int fs)
52817 +{
52818 + unsigned int i;
52819 + __u16 num;
52820 + gid_t *gidlist;
52821 + int curgid;
52822 + int realok = 0;
52823 + int effectiveok = 0;
52824 + int fsok = 0;
52825 +
52826 + if (unlikely(!(gr_status & GR_READY)))
52827 + return 0;
52828 +
52829 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
52830 + gr_log_learn_id_change('g', real, effective, fs);
52831 +
52832 + num = current->acl->group_trans_num;
52833 + gidlist = current->acl->group_transitions;
52834 +
52835 + if (gidlist == NULL)
52836 + return 0;
52837 +
52838 + if (real == -1)
52839 + realok = 1;
52840 + if (effective == -1)
52841 + effectiveok = 1;
52842 + if (fs == -1)
52843 + fsok = 1;
52844 +
52845 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
52846 + for (i = 0; i < num; i++) {
52847 + curgid = (int)gidlist[i];
52848 + if (real == curgid)
52849 + realok = 1;
52850 + if (effective == curgid)
52851 + effectiveok = 1;
52852 + if (fs == curgid)
52853 + fsok = 1;
52854 + }
52855 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
52856 + for (i = 0; i < num; i++) {
52857 + curgid = (int)gidlist[i];
52858 + if (real == curgid)
52859 + break;
52860 + if (effective == curgid)
52861 + break;
52862 + if (fs == curgid)
52863 + break;
52864 + }
52865 + /* not in deny list */
52866 + if (i == num) {
52867 + realok = 1;
52868 + effectiveok = 1;
52869 + fsok = 1;
52870 + }
52871 + }
52872 +
52873 + if (realok && effectiveok && fsok)
52874 + return 0;
52875 + else {
52876 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
52877 + return 1;
52878 + }
52879 +}
52880 +
52881 +extern int gr_acl_is_capable(const int cap);
52882 +
52883 +void
52884 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
52885 +{
52886 + struct acl_role_label *role = task->role;
52887 + struct acl_subject_label *subj = NULL;
52888 + struct acl_object_label *obj;
52889 + struct file *filp;
52890 +
52891 + if (unlikely(!(gr_status & GR_READY)))
52892 + return;
52893 +
52894 + filp = task->exec_file;
52895 +
52896 + /* kernel process, we'll give them the kernel role */
52897 + if (unlikely(!filp)) {
52898 + task->role = kernel_role;
52899 + task->acl = kernel_role->root_label;
52900 + return;
52901 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
52902 + role = lookup_acl_role_label(task, uid, gid);
52903 +
52904 + /* don't change the role if we're not a privileged process */
52905 + if (role && task->role != role &&
52906 + (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
52907 + ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
52908 + return;
52909 +
52910 + /* perform subject lookup in possibly new role
52911 + we can use this result below in the case where role == task->role
52912 + */
52913 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
52914 +
52915 + /* if we changed uid/gid, but result in the same role
52916 + and are using inheritance, don't lose the inherited subject
52917 + if current subject is other than what normal lookup
52918 + would result in, we arrived via inheritance, don't
52919 + lose subject
52920 + */
52921 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
52922 + (subj == task->acl)))
52923 + task->acl = subj;
52924 +
52925 + task->role = role;
52926 +
52927 + task->is_writable = 0;
52928 +
52929 + /* ignore additional mmap checks for processes that are writable
52930 + by the default ACL */
52931 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
52932 + if (unlikely(obj->mode & GR_WRITE))
52933 + task->is_writable = 1;
52934 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
52935 + if (unlikely(obj->mode & GR_WRITE))
52936 + task->is_writable = 1;
52937 +
52938 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
52939 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
52940 +#endif
52941 +
52942 + gr_set_proc_res(task);
52943 +
52944 + return;
52945 +}
52946 +
52947 +int
52948 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
52949 + const int unsafe_flags)
52950 +{
52951 + struct task_struct *task = current;
52952 + struct acl_subject_label *newacl;
52953 + struct acl_object_label *obj;
52954 + __u32 retmode;
52955 +
52956 + if (unlikely(!(gr_status & GR_READY)))
52957 + return 0;
52958 +
52959 + newacl = chk_subj_label(dentry, mnt, task->role);
52960 +
52961 + /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
52962 + did an exec
52963 + */
52964 + rcu_read_lock();
52965 + read_lock(&tasklist_lock);
52966 + if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
52967 + (task->parent->acl->mode & GR_POVERRIDE))) {
52968 + read_unlock(&tasklist_lock);
52969 + rcu_read_unlock();
52970 + goto skip_check;
52971 + }
52972 + read_unlock(&tasklist_lock);
52973 + rcu_read_unlock();
52974 +
52975 + if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
52976 + !(task->role->roletype & GR_ROLE_GOD) &&
52977 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
52978 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
52979 + if (unsafe_flags & LSM_UNSAFE_SHARE)
52980 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
52981 + else
52982 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
52983 + return -EACCES;
52984 + }
52985 +
52986 +skip_check:
52987 +
52988 + obj = chk_obj_label(dentry, mnt, task->acl);
52989 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
52990 +
52991 + if (!(task->acl->mode & GR_INHERITLEARN) &&
52992 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
52993 + if (obj->nested)
52994 + task->acl = obj->nested;
52995 + else
52996 + task->acl = newacl;
52997 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
52998 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
52999 +
53000 + task->is_writable = 0;
53001 +
53002 + /* ignore additional mmap checks for processes that are writable
53003 + by the default ACL */
53004 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
53005 + if (unlikely(obj->mode & GR_WRITE))
53006 + task->is_writable = 1;
53007 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
53008 + if (unlikely(obj->mode & GR_WRITE))
53009 + task->is_writable = 1;
53010 +
53011 + gr_set_proc_res(task);
53012 +
53013 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53014 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53015 +#endif
53016 + return 0;
53017 +}
53018 +
53019 +/* always called with valid inodev ptr */
53020 +static void
53021 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
53022 +{
53023 + struct acl_object_label *matchpo;
53024 + struct acl_subject_label *matchps;
53025 + struct acl_subject_label *subj;
53026 + struct acl_role_label *role;
53027 + unsigned int x;
53028 +
53029 + FOR_EACH_ROLE_START(role)
53030 + FOR_EACH_SUBJECT_START(role, subj, x)
53031 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
53032 + matchpo->mode |= GR_DELETED;
53033 + FOR_EACH_SUBJECT_END(subj,x)
53034 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
53035 + if (subj->inode == ino && subj->device == dev)
53036 + subj->mode |= GR_DELETED;
53037 + FOR_EACH_NESTED_SUBJECT_END(subj)
53038 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
53039 + matchps->mode |= GR_DELETED;
53040 + FOR_EACH_ROLE_END(role)
53041 +
53042 + inodev->nentry->deleted = 1;
53043 +
53044 + return;
53045 +}
53046 +
53047 +void
53048 +gr_handle_delete(const ino_t ino, const dev_t dev)
53049 +{
53050 + struct inodev_entry *inodev;
53051 +
53052 + if (unlikely(!(gr_status & GR_READY)))
53053 + return;
53054 +
53055 + write_lock(&gr_inode_lock);
53056 + inodev = lookup_inodev_entry(ino, dev);
53057 + if (inodev != NULL)
53058 + do_handle_delete(inodev, ino, dev);
53059 + write_unlock(&gr_inode_lock);
53060 +
53061 + return;
53062 +}
53063 +
53064 +static void
53065 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
53066 + const ino_t newinode, const dev_t newdevice,
53067 + struct acl_subject_label *subj)
53068 +{
53069 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
53070 + struct acl_object_label *match;
53071 +
53072 + match = subj->obj_hash[index];
53073 +
53074 + while (match && (match->inode != oldinode ||
53075 + match->device != olddevice ||
53076 + !(match->mode & GR_DELETED)))
53077 + match = match->next;
53078 +
53079 + if (match && (match->inode == oldinode)
53080 + && (match->device == olddevice)
53081 + && (match->mode & GR_DELETED)) {
53082 + if (match->prev == NULL) {
53083 + subj->obj_hash[index] = match->next;
53084 + if (match->next != NULL)
53085 + match->next->prev = NULL;
53086 + } else {
53087 + match->prev->next = match->next;
53088 + if (match->next != NULL)
53089 + match->next->prev = match->prev;
53090 + }
53091 + match->prev = NULL;
53092 + match->next = NULL;
53093 + match->inode = newinode;
53094 + match->device = newdevice;
53095 + match->mode &= ~GR_DELETED;
53096 +
53097 + insert_acl_obj_label(match, subj);
53098 + }
53099 +
53100 + return;
53101 +}
53102 +
53103 +static void
53104 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
53105 + const ino_t newinode, const dev_t newdevice,
53106 + struct acl_role_label *role)
53107 +{
53108 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
53109 + struct acl_subject_label *match;
53110 +
53111 + match = role->subj_hash[index];
53112 +
53113 + while (match && (match->inode != oldinode ||
53114 + match->device != olddevice ||
53115 + !(match->mode & GR_DELETED)))
53116 + match = match->next;
53117 +
53118 + if (match && (match->inode == oldinode)
53119 + && (match->device == olddevice)
53120 + && (match->mode & GR_DELETED)) {
53121 + if (match->prev == NULL) {
53122 + role->subj_hash[index] = match->next;
53123 + if (match->next != NULL)
53124 + match->next->prev = NULL;
53125 + } else {
53126 + match->prev->next = match->next;
53127 + if (match->next != NULL)
53128 + match->next->prev = match->prev;
53129 + }
53130 + match->prev = NULL;
53131 + match->next = NULL;
53132 + match->inode = newinode;
53133 + match->device = newdevice;
53134 + match->mode &= ~GR_DELETED;
53135 +
53136 + insert_acl_subj_label(match, role);
53137 + }
53138 +
53139 + return;
53140 +}
53141 +
53142 +static void
53143 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
53144 + const ino_t newinode, const dev_t newdevice)
53145 +{
53146 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
53147 + struct inodev_entry *match;
53148 +
53149 + match = inodev_set.i_hash[index];
53150 +
53151 + while (match && (match->nentry->inode != oldinode ||
53152 + match->nentry->device != olddevice || !match->nentry->deleted))
53153 + match = match->next;
53154 +
53155 + if (match && (match->nentry->inode == oldinode)
53156 + && (match->nentry->device == olddevice) &&
53157 + match->nentry->deleted) {
53158 + if (match->prev == NULL) {
53159 + inodev_set.i_hash[index] = match->next;
53160 + if (match->next != NULL)
53161 + match->next->prev = NULL;
53162 + } else {
53163 + match->prev->next = match->next;
53164 + if (match->next != NULL)
53165 + match->next->prev = match->prev;
53166 + }
53167 + match->prev = NULL;
53168 + match->next = NULL;
53169 + match->nentry->inode = newinode;
53170 + match->nentry->device = newdevice;
53171 + match->nentry->deleted = 0;
53172 +
53173 + insert_inodev_entry(match);
53174 + }
53175 +
53176 + return;
53177 +}
53178 +
53179 +static void
53180 +__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
53181 +{
53182 + struct acl_subject_label *subj;
53183 + struct acl_role_label *role;
53184 + unsigned int x;
53185 +
53186 + FOR_EACH_ROLE_START(role)
53187 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
53188 +
53189 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
53190 + if ((subj->inode == ino) && (subj->device == dev)) {
53191 + subj->inode = ino;
53192 + subj->device = dev;
53193 + }
53194 + FOR_EACH_NESTED_SUBJECT_END(subj)
53195 + FOR_EACH_SUBJECT_START(role, subj, x)
53196 + update_acl_obj_label(matchn->inode, matchn->device,
53197 + ino, dev, subj);
53198 + FOR_EACH_SUBJECT_END(subj,x)
53199 + FOR_EACH_ROLE_END(role)
53200 +
53201 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
53202 +
53203 + return;
53204 +}
53205 +
53206 +static void
53207 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
53208 + const struct vfsmount *mnt)
53209 +{
53210 + ino_t ino = dentry->d_inode->i_ino;
53211 + dev_t dev = __get_dev(dentry);
53212 +
53213 + __do_handle_create(matchn, ino, dev);
53214 +
53215 + return;
53216 +}
53217 +
53218 +void
53219 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
53220 +{
53221 + struct name_entry *matchn;
53222 +
53223 + if (unlikely(!(gr_status & GR_READY)))
53224 + return;
53225 +
53226 + preempt_disable();
53227 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
53228 +
53229 + if (unlikely((unsigned long)matchn)) {
53230 + write_lock(&gr_inode_lock);
53231 + do_handle_create(matchn, dentry, mnt);
53232 + write_unlock(&gr_inode_lock);
53233 + }
53234 + preempt_enable();
53235 +
53236 + return;
53237 +}
53238 +
53239 +void
53240 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
53241 +{
53242 + struct name_entry *matchn;
53243 +
53244 + if (unlikely(!(gr_status & GR_READY)))
53245 + return;
53246 +
53247 + preempt_disable();
53248 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
53249 +
53250 + if (unlikely((unsigned long)matchn)) {
53251 + write_lock(&gr_inode_lock);
53252 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
53253 + write_unlock(&gr_inode_lock);
53254 + }
53255 + preempt_enable();
53256 +
53257 + return;
53258 +}
53259 +
53260 +void
53261 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53262 + struct dentry *old_dentry,
53263 + struct dentry *new_dentry,
53264 + struct vfsmount *mnt, const __u8 replace)
53265 +{
53266 + struct name_entry *matchn;
53267 + struct inodev_entry *inodev;
53268 + struct inode *inode = new_dentry->d_inode;
53269 + ino_t old_ino = old_dentry->d_inode->i_ino;
53270 + dev_t old_dev = __get_dev(old_dentry);
53271 +
53272 + /* vfs_rename swaps the name and parent link for old_dentry and
53273 + new_dentry
53274 + at this point, old_dentry has the new name, parent link, and inode
53275 + for the renamed file
53276 + if a file is being replaced by a rename, new_dentry has the inode
53277 + and name for the replaced file
53278 + */
53279 +
53280 + if (unlikely(!(gr_status & GR_READY)))
53281 + return;
53282 +
53283 + preempt_disable();
53284 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
53285 +
53286 + /* we wouldn't have to check d_inode if it weren't for
53287 + NFS silly-renaming
53288 + */
53289 +
53290 + write_lock(&gr_inode_lock);
53291 + if (unlikely(replace && inode)) {
53292 + ino_t new_ino = inode->i_ino;
53293 + dev_t new_dev = __get_dev(new_dentry);
53294 +
53295 + inodev = lookup_inodev_entry(new_ino, new_dev);
53296 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
53297 + do_handle_delete(inodev, new_ino, new_dev);
53298 + }
53299 +
53300 + inodev = lookup_inodev_entry(old_ino, old_dev);
53301 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
53302 + do_handle_delete(inodev, old_ino, old_dev);
53303 +
53304 + if (unlikely((unsigned long)matchn))
53305 + do_handle_create(matchn, old_dentry, mnt);
53306 +
53307 + write_unlock(&gr_inode_lock);
53308 + preempt_enable();
53309 +
53310 + return;
53311 +}
53312 +
53313 +static int
53314 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
53315 + unsigned char **sum)
53316 +{
53317 + struct acl_role_label *r;
53318 + struct role_allowed_ip *ipp;
53319 + struct role_transition *trans;
53320 + unsigned int i;
53321 + int found = 0;
53322 + u32 curr_ip = current->signal->curr_ip;
53323 +
53324 + current->signal->saved_ip = curr_ip;
53325 +
53326 + /* check transition table */
53327 +
53328 + for (trans = current->role->transitions; trans; trans = trans->next) {
53329 + if (!strcmp(rolename, trans->rolename)) {
53330 + found = 1;
53331 + break;
53332 + }
53333 + }
53334 +
53335 + if (!found)
53336 + return 0;
53337 +
53338 + /* handle special roles that do not require authentication
53339 + and check ip */
53340 +
53341 + FOR_EACH_ROLE_START(r)
53342 + if (!strcmp(rolename, r->rolename) &&
53343 + (r->roletype & GR_ROLE_SPECIAL)) {
53344 + found = 0;
53345 + if (r->allowed_ips != NULL) {
53346 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
53347 + if ((ntohl(curr_ip) & ipp->netmask) ==
53348 + (ntohl(ipp->addr) & ipp->netmask))
53349 + found = 1;
53350 + }
53351 + } else
53352 + found = 2;
53353 + if (!found)
53354 + return 0;
53355 +
53356 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
53357 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
53358 + *salt = NULL;
53359 + *sum = NULL;
53360 + return 1;
53361 + }
53362 + }
53363 + FOR_EACH_ROLE_END(r)
53364 +
53365 + for (i = 0; i < num_sprole_pws; i++) {
53366 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
53367 + *salt = acl_special_roles[i]->salt;
53368 + *sum = acl_special_roles[i]->sum;
53369 + return 1;
53370 + }
53371 + }
53372 +
53373 + return 0;
53374 +}
53375 +
53376 +static void
53377 +assign_special_role(char *rolename)
53378 +{
53379 + struct acl_object_label *obj;
53380 + struct acl_role_label *r;
53381 + struct acl_role_label *assigned = NULL;
53382 + struct task_struct *tsk;
53383 + struct file *filp;
53384 +
53385 + FOR_EACH_ROLE_START(r)
53386 + if (!strcmp(rolename, r->rolename) &&
53387 + (r->roletype & GR_ROLE_SPECIAL)) {
53388 + assigned = r;
53389 + break;
53390 + }
53391 + FOR_EACH_ROLE_END(r)
53392 +
53393 + if (!assigned)
53394 + return;
53395 +
53396 + read_lock(&tasklist_lock);
53397 + read_lock(&grsec_exec_file_lock);
53398 +
53399 + tsk = current->real_parent;
53400 + if (tsk == NULL)
53401 + goto out_unlock;
53402 +
53403 + filp = tsk->exec_file;
53404 + if (filp == NULL)
53405 + goto out_unlock;
53406 +
53407 + tsk->is_writable = 0;
53408 +
53409 + tsk->acl_sp_role = 1;
53410 + tsk->acl_role_id = ++acl_sp_role_value;
53411 + tsk->role = assigned;
53412 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
53413 +
53414 + /* ignore additional mmap checks for processes that are writable
53415 + by the default ACL */
53416 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53417 + if (unlikely(obj->mode & GR_WRITE))
53418 + tsk->is_writable = 1;
53419 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
53420 + if (unlikely(obj->mode & GR_WRITE))
53421 + tsk->is_writable = 1;
53422 +
53423 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53424 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
53425 +#endif
53426 +
53427 +out_unlock:
53428 + read_unlock(&grsec_exec_file_lock);
53429 + read_unlock(&tasklist_lock);
53430 + return;
53431 +}
53432 +
53433 +int gr_check_secure_terminal(struct task_struct *task)
53434 +{
53435 + struct task_struct *p, *p2, *p3;
53436 + struct files_struct *files;
53437 + struct fdtable *fdt;
53438 + struct file *our_file = NULL, *file;
53439 + int i;
53440 +
53441 + if (task->signal->tty == NULL)
53442 + return 1;
53443 +
53444 + files = get_files_struct(task);
53445 + if (files != NULL) {
53446 + rcu_read_lock();
53447 + fdt = files_fdtable(files);
53448 + for (i=0; i < fdt->max_fds; i++) {
53449 + file = fcheck_files(files, i);
53450 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
53451 + get_file(file);
53452 + our_file = file;
53453 + }
53454 + }
53455 + rcu_read_unlock();
53456 + put_files_struct(files);
53457 + }
53458 +
53459 + if (our_file == NULL)
53460 + return 1;
53461 +
53462 + read_lock(&tasklist_lock);
53463 + do_each_thread(p2, p) {
53464 + files = get_files_struct(p);
53465 + if (files == NULL ||
53466 + (p->signal && p->signal->tty == task->signal->tty)) {
53467 + if (files != NULL)
53468 + put_files_struct(files);
53469 + continue;
53470 + }
53471 + rcu_read_lock();
53472 + fdt = files_fdtable(files);
53473 + for (i=0; i < fdt->max_fds; i++) {
53474 + file = fcheck_files(files, i);
53475 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
53476 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
53477 + p3 = task;
53478 + while (p3->pid > 0) {
53479 + if (p3 == p)
53480 + break;
53481 + p3 = p3->real_parent;
53482 + }
53483 + if (p3 == p)
53484 + break;
53485 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
53486 + gr_handle_alertkill(p);
53487 + rcu_read_unlock();
53488 + put_files_struct(files);
53489 + read_unlock(&tasklist_lock);
53490 + fput(our_file);
53491 + return 0;
53492 + }
53493 + }
53494 + rcu_read_unlock();
53495 + put_files_struct(files);
53496 + } while_each_thread(p2, p);
53497 + read_unlock(&tasklist_lock);
53498 +
53499 + fput(our_file);
53500 + return 1;
53501 +}
53502 +
53503 +static int gr_rbac_disable(void *unused)
53504 +{
53505 + pax_open_kernel();
53506 + gr_status &= ~GR_READY;
53507 + pax_close_kernel();
53508 +
53509 + return 0;
53510 +}
53511 +
53512 +ssize_t
53513 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
53514 +{
53515 + struct gr_arg_wrapper uwrap;
53516 + unsigned char *sprole_salt = NULL;
53517 + unsigned char *sprole_sum = NULL;
53518 + int error = sizeof (struct gr_arg_wrapper);
53519 + int error2 = 0;
53520 +
53521 + mutex_lock(&gr_dev_mutex);
53522 +
53523 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
53524 + error = -EPERM;
53525 + goto out;
53526 + }
53527 +
53528 + if (count != sizeof (struct gr_arg_wrapper)) {
53529 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
53530 + error = -EINVAL;
53531 + goto out;
53532 + }
53533 +
53534 +
53535 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
53536 + gr_auth_expires = 0;
53537 + gr_auth_attempts = 0;
53538 + }
53539 +
53540 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
53541 + error = -EFAULT;
53542 + goto out;
53543 + }
53544 +
53545 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
53546 + error = -EINVAL;
53547 + goto out;
53548 + }
53549 +
53550 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
53551 + error = -EFAULT;
53552 + goto out;
53553 + }
53554 +
53555 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
53556 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
53557 + time_after(gr_auth_expires, get_seconds())) {
53558 + error = -EBUSY;
53559 + goto out;
53560 + }
53561 +
53562 + /* if non-root trying to do anything other than use a special role,
53563 + do not attempt authentication, do not count towards authentication
53564 + locking
53565 + */
53566 +
53567 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
53568 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
53569 + current_uid()) {
53570 + error = -EPERM;
53571 + goto out;
53572 + }
53573 +
53574 + /* ensure pw and special role name are null terminated */
53575 +
53576 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
53577 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
53578 +
53579 + /* Okay.
53580 + * We have our enough of the argument structure..(we have yet
53581 + * to copy_from_user the tables themselves) . Copy the tables
53582 + * only if we need them, i.e. for loading operations. */
53583 +
53584 + switch (gr_usermode->mode) {
53585 + case GR_STATUS:
53586 + if (gr_status & GR_READY) {
53587 + error = 1;
53588 + if (!gr_check_secure_terminal(current))
53589 + error = 3;
53590 + } else
53591 + error = 2;
53592 + goto out;
53593 + case GR_SHUTDOWN:
53594 + if ((gr_status & GR_READY)
53595 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53596 + stop_machine(gr_rbac_disable, NULL, NULL);
53597 + free_variables();
53598 + memset(gr_usermode, 0, sizeof (struct gr_arg));
53599 + memset(gr_system_salt, 0, GR_SALT_LEN);
53600 + memset(gr_system_sum, 0, GR_SHA_LEN);
53601 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
53602 + } else if (gr_status & GR_READY) {
53603 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
53604 + error = -EPERM;
53605 + } else {
53606 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
53607 + error = -EAGAIN;
53608 + }
53609 + break;
53610 + case GR_ENABLE:
53611 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
53612 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
53613 + else {
53614 + if (gr_status & GR_READY)
53615 + error = -EAGAIN;
53616 + else
53617 + error = error2;
53618 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
53619 + }
53620 + break;
53621 + case GR_RELOAD:
53622 + if (!(gr_status & GR_READY)) {
53623 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
53624 + error = -EAGAIN;
53625 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53626 + stop_machine(gr_rbac_disable, NULL, NULL);
53627 + free_variables();
53628 + error2 = gracl_init(gr_usermode);
53629 + if (!error2)
53630 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
53631 + else {
53632 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
53633 + error = error2;
53634 + }
53635 + } else {
53636 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
53637 + error = -EPERM;
53638 + }
53639 + break;
53640 + case GR_SEGVMOD:
53641 + if (unlikely(!(gr_status & GR_READY))) {
53642 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
53643 + error = -EAGAIN;
53644 + break;
53645 + }
53646 +
53647 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53648 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
53649 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
53650 + struct acl_subject_label *segvacl;
53651 + segvacl =
53652 + lookup_acl_subj_label(gr_usermode->segv_inode,
53653 + gr_usermode->segv_device,
53654 + current->role);
53655 + if (segvacl) {
53656 + segvacl->crashes = 0;
53657 + segvacl->expires = 0;
53658 + }
53659 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
53660 + gr_remove_uid(gr_usermode->segv_uid);
53661 + }
53662 + } else {
53663 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
53664 + error = -EPERM;
53665 + }
53666 + break;
53667 + case GR_SPROLE:
53668 + case GR_SPROLEPAM:
53669 + if (unlikely(!(gr_status & GR_READY))) {
53670 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
53671 + error = -EAGAIN;
53672 + break;
53673 + }
53674 +
53675 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
53676 + current->role->expires = 0;
53677 + current->role->auth_attempts = 0;
53678 + }
53679 +
53680 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
53681 + time_after(current->role->expires, get_seconds())) {
53682 + error = -EBUSY;
53683 + goto out;
53684 + }
53685 +
53686 + if (lookup_special_role_auth
53687 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
53688 + && ((!sprole_salt && !sprole_sum)
53689 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
53690 + char *p = "";
53691 + assign_special_role(gr_usermode->sp_role);
53692 + read_lock(&tasklist_lock);
53693 + if (current->real_parent)
53694 + p = current->real_parent->role->rolename;
53695 + read_unlock(&tasklist_lock);
53696 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
53697 + p, acl_sp_role_value);
53698 + } else {
53699 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
53700 + error = -EPERM;
53701 + if(!(current->role->auth_attempts++))
53702 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
53703 +
53704 + goto out;
53705 + }
53706 + break;
53707 + case GR_UNSPROLE:
53708 + if (unlikely(!(gr_status & GR_READY))) {
53709 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
53710 + error = -EAGAIN;
53711 + break;
53712 + }
53713 +
53714 + if (current->role->roletype & GR_ROLE_SPECIAL) {
53715 + char *p = "";
53716 + int i = 0;
53717 +
53718 + read_lock(&tasklist_lock);
53719 + if (current->real_parent) {
53720 + p = current->real_parent->role->rolename;
53721 + i = current->real_parent->acl_role_id;
53722 + }
53723 + read_unlock(&tasklist_lock);
53724 +
53725 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
53726 + gr_set_acls(1);
53727 + } else {
53728 + error = -EPERM;
53729 + goto out;
53730 + }
53731 + break;
53732 + default:
53733 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
53734 + error = -EINVAL;
53735 + break;
53736 + }
53737 +
53738 + if (error != -EPERM)
53739 + goto out;
53740 +
53741 + if(!(gr_auth_attempts++))
53742 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
53743 +
53744 + out:
53745 + mutex_unlock(&gr_dev_mutex);
53746 + return error;
53747 +}
53748 +
53749 +/* must be called with
53750 + rcu_read_lock();
53751 + read_lock(&tasklist_lock);
53752 + read_lock(&grsec_exec_file_lock);
53753 +*/
53754 +int gr_apply_subject_to_task(struct task_struct *task)
53755 +{
53756 + struct acl_object_label *obj;
53757 + char *tmpname;
53758 + struct acl_subject_label *tmpsubj;
53759 + struct file *filp;
53760 + struct name_entry *nmatch;
53761 +
53762 + filp = task->exec_file;
53763 + if (filp == NULL)
53764 + return 0;
53765 +
53766 + /* the following is to apply the correct subject
53767 + on binaries running when the RBAC system
53768 + is enabled, when the binaries have been
53769 + replaced or deleted since their execution
53770 + -----
53771 + when the RBAC system starts, the inode/dev
53772 + from exec_file will be one the RBAC system
53773 + is unaware of. It only knows the inode/dev
53774 + of the present file on disk, or the absence
53775 + of it.
53776 + */
53777 + preempt_disable();
53778 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
53779 +
53780 + nmatch = lookup_name_entry(tmpname);
53781 + preempt_enable();
53782 + tmpsubj = NULL;
53783 + if (nmatch) {
53784 + if (nmatch->deleted)
53785 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
53786 + else
53787 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
53788 + if (tmpsubj != NULL)
53789 + task->acl = tmpsubj;
53790 + }
53791 + if (tmpsubj == NULL)
53792 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
53793 + task->role);
53794 + if (task->acl) {
53795 + task->is_writable = 0;
53796 + /* ignore additional mmap checks for processes that are writable
53797 + by the default ACL */
53798 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53799 + if (unlikely(obj->mode & GR_WRITE))
53800 + task->is_writable = 1;
53801 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
53802 + if (unlikely(obj->mode & GR_WRITE))
53803 + task->is_writable = 1;
53804 +
53805 + gr_set_proc_res(task);
53806 +
53807 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
53808 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53809 +#endif
53810 + } else {
53811 + return 1;
53812 + }
53813 +
53814 + return 0;
53815 +}
53816 +
53817 +int
53818 +gr_set_acls(const int type)
53819 +{
53820 + struct task_struct *task, *task2;
53821 + struct acl_role_label *role = current->role;
53822 + __u16 acl_role_id = current->acl_role_id;
53823 + const struct cred *cred;
53824 + int ret;
53825 +
53826 + rcu_read_lock();
53827 + read_lock(&tasklist_lock);
53828 + read_lock(&grsec_exec_file_lock);
53829 + do_each_thread(task2, task) {
53830 + /* check to see if we're called from the exit handler,
53831 + if so, only replace ACLs that have inherited the admin
53832 + ACL */
53833 +
53834 + if (type && (task->role != role ||
53835 + task->acl_role_id != acl_role_id))
53836 + continue;
53837 +
53838 + task->acl_role_id = 0;
53839 + task->acl_sp_role = 0;
53840 +
53841 + if (task->exec_file) {
53842 + cred = __task_cred(task);
53843 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
53844 + ret = gr_apply_subject_to_task(task);
53845 + if (ret) {
53846 + read_unlock(&grsec_exec_file_lock);
53847 + read_unlock(&tasklist_lock);
53848 + rcu_read_unlock();
53849 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
53850 + return ret;
53851 + }
53852 + } else {
53853 + // it's a kernel process
53854 + task->role = kernel_role;
53855 + task->acl = kernel_role->root_label;
53856 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
53857 + task->acl->mode &= ~GR_PROCFIND;
53858 +#endif
53859 + }
53860 + } while_each_thread(task2, task);
53861 + read_unlock(&grsec_exec_file_lock);
53862 + read_unlock(&tasklist_lock);
53863 + rcu_read_unlock();
53864 +
53865 + return 0;
53866 +}
53867 +
53868 +void
53869 +gr_learn_resource(const struct task_struct *task,
53870 + const int res, const unsigned long wanted, const int gt)
53871 +{
53872 + struct acl_subject_label *acl;
53873 + const struct cred *cred;
53874 +
53875 + if (unlikely((gr_status & GR_READY) &&
53876 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
53877 + goto skip_reslog;
53878 +
53879 +#ifdef CONFIG_GRKERNSEC_RESLOG
53880 + gr_log_resource(task, res, wanted, gt);
53881 +#endif
53882 + skip_reslog:
53883 +
53884 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
53885 + return;
53886 +
53887 + acl = task->acl;
53888 +
53889 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
53890 + !(acl->resmask & (1 << (unsigned short) res))))
53891 + return;
53892 +
53893 + if (wanted >= acl->res[res].rlim_cur) {
53894 + unsigned long res_add;
53895 +
53896 + res_add = wanted;
53897 + switch (res) {
53898 + case RLIMIT_CPU:
53899 + res_add += GR_RLIM_CPU_BUMP;
53900 + break;
53901 + case RLIMIT_FSIZE:
53902 + res_add += GR_RLIM_FSIZE_BUMP;
53903 + break;
53904 + case RLIMIT_DATA:
53905 + res_add += GR_RLIM_DATA_BUMP;
53906 + break;
53907 + case RLIMIT_STACK:
53908 + res_add += GR_RLIM_STACK_BUMP;
53909 + break;
53910 + case RLIMIT_CORE:
53911 + res_add += GR_RLIM_CORE_BUMP;
53912 + break;
53913 + case RLIMIT_RSS:
53914 + res_add += GR_RLIM_RSS_BUMP;
53915 + break;
53916 + case RLIMIT_NPROC:
53917 + res_add += GR_RLIM_NPROC_BUMP;
53918 + break;
53919 + case RLIMIT_NOFILE:
53920 + res_add += GR_RLIM_NOFILE_BUMP;
53921 + break;
53922 + case RLIMIT_MEMLOCK:
53923 + res_add += GR_RLIM_MEMLOCK_BUMP;
53924 + break;
53925 + case RLIMIT_AS:
53926 + res_add += GR_RLIM_AS_BUMP;
53927 + break;
53928 + case RLIMIT_LOCKS:
53929 + res_add += GR_RLIM_LOCKS_BUMP;
53930 + break;
53931 + case RLIMIT_SIGPENDING:
53932 + res_add += GR_RLIM_SIGPENDING_BUMP;
53933 + break;
53934 + case RLIMIT_MSGQUEUE:
53935 + res_add += GR_RLIM_MSGQUEUE_BUMP;
53936 + break;
53937 + case RLIMIT_NICE:
53938 + res_add += GR_RLIM_NICE_BUMP;
53939 + break;
53940 + case RLIMIT_RTPRIO:
53941 + res_add += GR_RLIM_RTPRIO_BUMP;
53942 + break;
53943 + case RLIMIT_RTTIME:
53944 + res_add += GR_RLIM_RTTIME_BUMP;
53945 + break;
53946 + }
53947 +
53948 + acl->res[res].rlim_cur = res_add;
53949 +
53950 + if (wanted > acl->res[res].rlim_max)
53951 + acl->res[res].rlim_max = res_add;
53952 +
53953 + /* only log the subject filename, since resource logging is supported for
53954 + single-subject learning only */
53955 + rcu_read_lock();
53956 + cred = __task_cred(task);
53957 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
53958 + task->role->roletype, cred->uid, cred->gid, acl->filename,
53959 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
53960 + "", (unsigned long) res, &task->signal->saved_ip);
53961 + rcu_read_unlock();
53962 + }
53963 +
53964 + return;
53965 +}
53966 +
53967 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
53968 +void
53969 +pax_set_initial_flags(struct linux_binprm *bprm)
53970 +{
53971 + struct task_struct *task = current;
53972 + struct acl_subject_label *proc;
53973 + unsigned long flags;
53974 +
53975 + if (unlikely(!(gr_status & GR_READY)))
53976 + return;
53977 +
53978 + flags = pax_get_flags(task);
53979 +
53980 + proc = task->acl;
53981 +
53982 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
53983 + flags &= ~MF_PAX_PAGEEXEC;
53984 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
53985 + flags &= ~MF_PAX_SEGMEXEC;
53986 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
53987 + flags &= ~MF_PAX_RANDMMAP;
53988 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
53989 + flags &= ~MF_PAX_EMUTRAMP;
53990 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
53991 + flags &= ~MF_PAX_MPROTECT;
53992 +
53993 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
53994 + flags |= MF_PAX_PAGEEXEC;
53995 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
53996 + flags |= MF_PAX_SEGMEXEC;
53997 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
53998 + flags |= MF_PAX_RANDMMAP;
53999 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
54000 + flags |= MF_PAX_EMUTRAMP;
54001 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
54002 + flags |= MF_PAX_MPROTECT;
54003 +
54004 + pax_set_flags(task, flags);
54005 +
54006 + return;
54007 +}
54008 +#endif
54009 +
54010 +int
54011 +gr_handle_proc_ptrace(struct task_struct *task)
54012 +{
54013 + struct file *filp;
54014 + struct task_struct *tmp = task;
54015 + struct task_struct *curtemp = current;
54016 + __u32 retmode;
54017 +
54018 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
54019 + if (unlikely(!(gr_status & GR_READY)))
54020 + return 0;
54021 +#endif
54022 +
54023 + read_lock(&tasklist_lock);
54024 + read_lock(&grsec_exec_file_lock);
54025 + filp = task->exec_file;
54026 +
54027 + while (tmp->pid > 0) {
54028 + if (tmp == curtemp)
54029 + break;
54030 + tmp = tmp->real_parent;
54031 + }
54032 +
54033 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
54034 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
54035 + read_unlock(&grsec_exec_file_lock);
54036 + read_unlock(&tasklist_lock);
54037 + return 1;
54038 + }
54039 +
54040 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
54041 + if (!(gr_status & GR_READY)) {
54042 + read_unlock(&grsec_exec_file_lock);
54043 + read_unlock(&tasklist_lock);
54044 + return 0;
54045 + }
54046 +#endif
54047 +
54048 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
54049 + read_unlock(&grsec_exec_file_lock);
54050 + read_unlock(&tasklist_lock);
54051 +
54052 + if (retmode & GR_NOPTRACE)
54053 + return 1;
54054 +
54055 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
54056 + && (current->acl != task->acl || (current->acl != current->role->root_label
54057 + && current->pid != task->pid)))
54058 + return 1;
54059 +
54060 + return 0;
54061 +}
54062 +
54063 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
54064 +{
54065 + if (unlikely(!(gr_status & GR_READY)))
54066 + return;
54067 +
54068 + if (!(current->role->roletype & GR_ROLE_GOD))
54069 + return;
54070 +
54071 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
54072 + p->role->rolename, gr_task_roletype_to_char(p),
54073 + p->acl->filename);
54074 +}
54075 +
54076 +int
54077 +gr_handle_ptrace(struct task_struct *task, const long request)
54078 +{
54079 + struct task_struct *tmp = task;
54080 + struct task_struct *curtemp = current;
54081 + __u32 retmode;
54082 +
54083 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
54084 + if (unlikely(!(gr_status & GR_READY)))
54085 + return 0;
54086 +#endif
54087 + if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
54088 + read_lock(&tasklist_lock);
54089 + while (tmp->pid > 0) {
54090 + if (tmp == curtemp)
54091 + break;
54092 + tmp = tmp->real_parent;
54093 + }
54094 +
54095 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
54096 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
54097 + read_unlock(&tasklist_lock);
54098 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54099 + return 1;
54100 + }
54101 + read_unlock(&tasklist_lock);
54102 + }
54103 +
54104 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
54105 + if (!(gr_status & GR_READY))
54106 + return 0;
54107 +#endif
54108 +
54109 + read_lock(&grsec_exec_file_lock);
54110 + if (unlikely(!task->exec_file)) {
54111 + read_unlock(&grsec_exec_file_lock);
54112 + return 0;
54113 + }
54114 +
54115 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
54116 + read_unlock(&grsec_exec_file_lock);
54117 +
54118 + if (retmode & GR_NOPTRACE) {
54119 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54120 + return 1;
54121 + }
54122 +
54123 + if (retmode & GR_PTRACERD) {
54124 + switch (request) {
54125 + case PTRACE_SEIZE:
54126 + case PTRACE_POKETEXT:
54127 + case PTRACE_POKEDATA:
54128 + case PTRACE_POKEUSR:
54129 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
54130 + case PTRACE_SETREGS:
54131 + case PTRACE_SETFPREGS:
54132 +#endif
54133 +#ifdef CONFIG_X86
54134 + case PTRACE_SETFPXREGS:
54135 +#endif
54136 +#ifdef CONFIG_ALTIVEC
54137 + case PTRACE_SETVRREGS:
54138 +#endif
54139 + return 1;
54140 + default:
54141 + return 0;
54142 + }
54143 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
54144 + !(current->role->roletype & GR_ROLE_GOD) &&
54145 + (current->acl != task->acl)) {
54146 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54147 + return 1;
54148 + }
54149 +
54150 + return 0;
54151 +}
54152 +
54153 +static int is_writable_mmap(const struct file *filp)
54154 +{
54155 + struct task_struct *task = current;
54156 + struct acl_object_label *obj, *obj2;
54157 +
54158 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
54159 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
54160 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
54161 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
54162 + task->role->root_label);
54163 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
54164 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
54165 + return 1;
54166 + }
54167 + }
54168 + return 0;
54169 +}
54170 +
54171 +int
54172 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
54173 +{
54174 + __u32 mode;
54175 +
54176 + if (unlikely(!file || !(prot & PROT_EXEC)))
54177 + return 1;
54178 +
54179 + if (is_writable_mmap(file))
54180 + return 0;
54181 +
54182 + mode =
54183 + gr_search_file(file->f_path.dentry,
54184 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
54185 + file->f_path.mnt);
54186 +
54187 + if (!gr_tpe_allow(file))
54188 + return 0;
54189 +
54190 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
54191 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54192 + return 0;
54193 + } else if (unlikely(!(mode & GR_EXEC))) {
54194 + return 0;
54195 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
54196 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54197 + return 1;
54198 + }
54199 +
54200 + return 1;
54201 +}
54202 +
54203 +int
54204 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
54205 +{
54206 + __u32 mode;
54207 +
54208 + if (unlikely(!file || !(prot & PROT_EXEC)))
54209 + return 1;
54210 +
54211 + if (is_writable_mmap(file))
54212 + return 0;
54213 +
54214 + mode =
54215 + gr_search_file(file->f_path.dentry,
54216 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
54217 + file->f_path.mnt);
54218 +
54219 + if (!gr_tpe_allow(file))
54220 + return 0;
54221 +
54222 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
54223 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54224 + return 0;
54225 + } else if (unlikely(!(mode & GR_EXEC))) {
54226 + return 0;
54227 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
54228 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54229 + return 1;
54230 + }
54231 +
54232 + return 1;
54233 +}
54234 +
54235 +void
54236 +gr_acl_handle_psacct(struct task_struct *task, const long code)
54237 +{
54238 + unsigned long runtime;
54239 + unsigned long cputime;
54240 + unsigned int wday, cday;
54241 + __u8 whr, chr;
54242 + __u8 wmin, cmin;
54243 + __u8 wsec, csec;
54244 + struct timespec timeval;
54245 +
54246 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
54247 + !(task->acl->mode & GR_PROCACCT)))
54248 + return;
54249 +
54250 + do_posix_clock_monotonic_gettime(&timeval);
54251 + runtime = timeval.tv_sec - task->start_time.tv_sec;
54252 + wday = runtime / (3600 * 24);
54253 + runtime -= wday * (3600 * 24);
54254 + whr = runtime / 3600;
54255 + runtime -= whr * 3600;
54256 + wmin = runtime / 60;
54257 + runtime -= wmin * 60;
54258 + wsec = runtime;
54259 +
54260 + cputime = (task->utime + task->stime) / HZ;
54261 + cday = cputime / (3600 * 24);
54262 + cputime -= cday * (3600 * 24);
54263 + chr = cputime / 3600;
54264 + cputime -= chr * 3600;
54265 + cmin = cputime / 60;
54266 + cputime -= cmin * 60;
54267 + csec = cputime;
54268 +
54269 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
54270 +
54271 + return;
54272 +}
54273 +
54274 +void gr_set_kernel_label(struct task_struct *task)
54275 +{
54276 + if (gr_status & GR_READY) {
54277 + task->role = kernel_role;
54278 + task->acl = kernel_role->root_label;
54279 + }
54280 + return;
54281 +}
54282 +
54283 +#ifdef CONFIG_TASKSTATS
54284 +int gr_is_taskstats_denied(int pid)
54285 +{
54286 + struct task_struct *task;
54287 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54288 + const struct cred *cred;
54289 +#endif
54290 + int ret = 0;
54291 +
54292 + /* restrict taskstats viewing to un-chrooted root users
54293 + who have the 'view' subject flag if the RBAC system is enabled
54294 + */
54295 +
54296 + rcu_read_lock();
54297 + read_lock(&tasklist_lock);
54298 + task = find_task_by_vpid(pid);
54299 + if (task) {
54300 +#ifdef CONFIG_GRKERNSEC_CHROOT
54301 + if (proc_is_chrooted(task))
54302 + ret = -EACCES;
54303 +#endif
54304 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54305 + cred = __task_cred(task);
54306 +#ifdef CONFIG_GRKERNSEC_PROC_USER
54307 + if (cred->uid != 0)
54308 + ret = -EACCES;
54309 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54310 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
54311 + ret = -EACCES;
54312 +#endif
54313 +#endif
54314 + if (gr_status & GR_READY) {
54315 + if (!(task->acl->mode & GR_VIEW))
54316 + ret = -EACCES;
54317 + }
54318 + } else
54319 + ret = -ENOENT;
54320 +
54321 + read_unlock(&tasklist_lock);
54322 + rcu_read_unlock();
54323 +
54324 + return ret;
54325 +}
54326 +#endif
54327 +
54328 +/* AUXV entries are filled via a descendant of search_binary_handler
54329 + after we've already applied the subject for the target
54330 +*/
54331 +int gr_acl_enable_at_secure(void)
54332 +{
54333 + if (unlikely(!(gr_status & GR_READY)))
54334 + return 0;
54335 +
54336 + if (current->acl->mode & GR_ATSECURE)
54337 + return 1;
54338 +
54339 + return 0;
54340 +}
54341 +
54342 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
54343 +{
54344 + struct task_struct *task = current;
54345 + struct dentry *dentry = file->f_path.dentry;
54346 + struct vfsmount *mnt = file->f_path.mnt;
54347 + struct acl_object_label *obj, *tmp;
54348 + struct acl_subject_label *subj;
54349 + unsigned int bufsize;
54350 + int is_not_root;
54351 + char *path;
54352 + dev_t dev = __get_dev(dentry);
54353 +
54354 + if (unlikely(!(gr_status & GR_READY)))
54355 + return 1;
54356 +
54357 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
54358 + return 1;
54359 +
54360 + /* ignore Eric Biederman */
54361 + if (IS_PRIVATE(dentry->d_inode))
54362 + return 1;
54363 +
54364 + subj = task->acl;
54365 + read_lock(&gr_inode_lock);
54366 + do {
54367 + obj = lookup_acl_obj_label(ino, dev, subj);
54368 + if (obj != NULL) {
54369 + read_unlock(&gr_inode_lock);
54370 + return (obj->mode & GR_FIND) ? 1 : 0;
54371 + }
54372 + } while ((subj = subj->parent_subject));
54373 + read_unlock(&gr_inode_lock);
54374 +
54375 + /* this is purely an optimization since we're looking for an object
54376 + for the directory we're doing a readdir on
54377 + if it's possible for any globbed object to match the entry we're
54378 + filling into the directory, then the object we find here will be
54379 + an anchor point with attached globbed objects
54380 + */
54381 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
54382 + if (obj->globbed == NULL)
54383 + return (obj->mode & GR_FIND) ? 1 : 0;
54384 +
54385 + is_not_root = ((obj->filename[0] == '/') &&
54386 + (obj->filename[1] == '\0')) ? 0 : 1;
54387 + bufsize = PAGE_SIZE - namelen - is_not_root;
54388 +
54389 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
54390 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
54391 + return 1;
54392 +
54393 + preempt_disable();
54394 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
54395 + bufsize);
54396 +
54397 + bufsize = strlen(path);
54398 +
54399 + /* if base is "/", don't append an additional slash */
54400 + if (is_not_root)
54401 + *(path + bufsize) = '/';
54402 + memcpy(path + bufsize + is_not_root, name, namelen);
54403 + *(path + bufsize + namelen + is_not_root) = '\0';
54404 +
54405 + tmp = obj->globbed;
54406 + while (tmp) {
54407 + if (!glob_match(tmp->filename, path)) {
54408 + preempt_enable();
54409 + return (tmp->mode & GR_FIND) ? 1 : 0;
54410 + }
54411 + tmp = tmp->next;
54412 + }
54413 + preempt_enable();
54414 + return (obj->mode & GR_FIND) ? 1 : 0;
54415 +}
54416 +
54417 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
54418 +EXPORT_SYMBOL(gr_acl_is_enabled);
54419 +#endif
54420 +EXPORT_SYMBOL(gr_learn_resource);
54421 +EXPORT_SYMBOL(gr_set_kernel_label);
54422 +#ifdef CONFIG_SECURITY
54423 +EXPORT_SYMBOL(gr_check_user_change);
54424 +EXPORT_SYMBOL(gr_check_group_change);
54425 +#endif
54426 +
54427 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
54428 new file mode 100644
54429 index 0000000..34fefda
54430 --- /dev/null
54431 +++ b/grsecurity/gracl_alloc.c
54432 @@ -0,0 +1,105 @@
54433 +#include <linux/kernel.h>
54434 +#include <linux/mm.h>
54435 +#include <linux/slab.h>
54436 +#include <linux/vmalloc.h>
54437 +#include <linux/gracl.h>
54438 +#include <linux/grsecurity.h>
54439 +
54440 +static unsigned long alloc_stack_next = 1;
54441 +static unsigned long alloc_stack_size = 1;
54442 +static void **alloc_stack;
54443 +
54444 +static __inline__ int
54445 +alloc_pop(void)
54446 +{
54447 + if (alloc_stack_next == 1)
54448 + return 0;
54449 +
54450 + kfree(alloc_stack[alloc_stack_next - 2]);
54451 +
54452 + alloc_stack_next--;
54453 +
54454 + return 1;
54455 +}
54456 +
54457 +static __inline__ int
54458 +alloc_push(void *buf)
54459 +{
54460 + if (alloc_stack_next >= alloc_stack_size)
54461 + return 1;
54462 +
54463 + alloc_stack[alloc_stack_next - 1] = buf;
54464 +
54465 + alloc_stack_next++;
54466 +
54467 + return 0;
54468 +}
54469 +
54470 +void *
54471 +acl_alloc(unsigned long len)
54472 +{
54473 + void *ret = NULL;
54474 +
54475 + if (!len || len > PAGE_SIZE)
54476 + goto out;
54477 +
54478 + ret = kmalloc(len, GFP_KERNEL);
54479 +
54480 + if (ret) {
54481 + if (alloc_push(ret)) {
54482 + kfree(ret);
54483 + ret = NULL;
54484 + }
54485 + }
54486 +
54487 +out:
54488 + return ret;
54489 +}
54490 +
54491 +void *
54492 +acl_alloc_num(unsigned long num, unsigned long len)
54493 +{
54494 + if (!len || (num > (PAGE_SIZE / len)))
54495 + return NULL;
54496 +
54497 + return acl_alloc(num * len);
54498 +}
54499 +
54500 +void
54501 +acl_free_all(void)
54502 +{
54503 + if (gr_acl_is_enabled() || !alloc_stack)
54504 + return;
54505 +
54506 + while (alloc_pop()) ;
54507 +
54508 + if (alloc_stack) {
54509 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
54510 + kfree(alloc_stack);
54511 + else
54512 + vfree(alloc_stack);
54513 + }
54514 +
54515 + alloc_stack = NULL;
54516 + alloc_stack_size = 1;
54517 + alloc_stack_next = 1;
54518 +
54519 + return;
54520 +}
54521 +
54522 +int
54523 +acl_alloc_stack_init(unsigned long size)
54524 +{
54525 + if ((size * sizeof (void *)) <= PAGE_SIZE)
54526 + alloc_stack =
54527 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
54528 + else
54529 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
54530 +
54531 + alloc_stack_size = size;
54532 +
54533 + if (!alloc_stack)
54534 + return 0;
54535 + else
54536 + return 1;
54537 +}
54538 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
54539 new file mode 100644
54540 index 0000000..6d21049
54541 --- /dev/null
54542 +++ b/grsecurity/gracl_cap.c
54543 @@ -0,0 +1,110 @@
54544 +#include <linux/kernel.h>
54545 +#include <linux/module.h>
54546 +#include <linux/sched.h>
54547 +#include <linux/gracl.h>
54548 +#include <linux/grsecurity.h>
54549 +#include <linux/grinternal.h>
54550 +
54551 +extern const char *captab_log[];
54552 +extern int captab_log_entries;
54553 +
54554 +int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
54555 +{
54556 + struct acl_subject_label *curracl;
54557 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
54558 + kernel_cap_t cap_audit = __cap_empty_set;
54559 +
54560 + if (!gr_acl_is_enabled())
54561 + return 1;
54562 +
54563 + curracl = task->acl;
54564 +
54565 + cap_drop = curracl->cap_lower;
54566 + cap_mask = curracl->cap_mask;
54567 + cap_audit = curracl->cap_invert_audit;
54568 +
54569 + while ((curracl = curracl->parent_subject)) {
54570 + /* if the cap isn't specified in the current computed mask but is specified in the
54571 + current level subject, and is lowered in the current level subject, then add
54572 + it to the set of dropped capabilities
54573 + otherwise, add the current level subject's mask to the current computed mask
54574 + */
54575 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
54576 + cap_raise(cap_mask, cap);
54577 + if (cap_raised(curracl->cap_lower, cap))
54578 + cap_raise(cap_drop, cap);
54579 + if (cap_raised(curracl->cap_invert_audit, cap))
54580 + cap_raise(cap_audit, cap);
54581 + }
54582 + }
54583 +
54584 + if (!cap_raised(cap_drop, cap)) {
54585 + if (cap_raised(cap_audit, cap))
54586 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
54587 + return 1;
54588 + }
54589 +
54590 + curracl = task->acl;
54591 +
54592 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
54593 + && cap_raised(cred->cap_effective, cap)) {
54594 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
54595 + task->role->roletype, cred->uid,
54596 + cred->gid, task->exec_file ?
54597 + gr_to_filename(task->exec_file->f_path.dentry,
54598 + task->exec_file->f_path.mnt) : curracl->filename,
54599 + curracl->filename, 0UL,
54600 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
54601 + return 1;
54602 + }
54603 +
54604 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
54605 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
54606 +
54607 + return 0;
54608 +}
54609 +
54610 +int
54611 +gr_acl_is_capable(const int cap)
54612 +{
54613 + return gr_task_acl_is_capable(current, current_cred(), cap);
54614 +}
54615 +
54616 +int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
54617 +{
54618 + struct acl_subject_label *curracl;
54619 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
54620 +
54621 + if (!gr_acl_is_enabled())
54622 + return 1;
54623 +
54624 + curracl = task->acl;
54625 +
54626 + cap_drop = curracl->cap_lower;
54627 + cap_mask = curracl->cap_mask;
54628 +
54629 + while ((curracl = curracl->parent_subject)) {
54630 + /* if the cap isn't specified in the current computed mask but is specified in the
54631 + current level subject, and is lowered in the current level subject, then add
54632 + it to the set of dropped capabilities
54633 + otherwise, add the current level subject's mask to the current computed mask
54634 + */
54635 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
54636 + cap_raise(cap_mask, cap);
54637 + if (cap_raised(curracl->cap_lower, cap))
54638 + cap_raise(cap_drop, cap);
54639 + }
54640 + }
54641 +
54642 + if (!cap_raised(cap_drop, cap))
54643 + return 1;
54644 +
54645 + return 0;
54646 +}
54647 +
54648 +int
54649 +gr_acl_is_capable_nolog(const int cap)
54650 +{
54651 + return gr_task_acl_is_capable_nolog(current, cap);
54652 +}
54653 +
54654 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
54655 new file mode 100644
54656 index 0000000..88d0e87
54657 --- /dev/null
54658 +++ b/grsecurity/gracl_fs.c
54659 @@ -0,0 +1,435 @@
54660 +#include <linux/kernel.h>
54661 +#include <linux/sched.h>
54662 +#include <linux/types.h>
54663 +#include <linux/fs.h>
54664 +#include <linux/file.h>
54665 +#include <linux/stat.h>
54666 +#include <linux/grsecurity.h>
54667 +#include <linux/grinternal.h>
54668 +#include <linux/gracl.h>
54669 +
54670 +umode_t
54671 +gr_acl_umask(void)
54672 +{
54673 + if (unlikely(!gr_acl_is_enabled()))
54674 + return 0;
54675 +
54676 + return current->role->umask;
54677 +}
54678 +
54679 +__u32
54680 +gr_acl_handle_hidden_file(const struct dentry * dentry,
54681 + const struct vfsmount * mnt)
54682 +{
54683 + __u32 mode;
54684 +
54685 + if (unlikely(!dentry->d_inode))
54686 + return GR_FIND;
54687 +
54688 + mode =
54689 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
54690 +
54691 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
54692 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
54693 + return mode;
54694 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
54695 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
54696 + return 0;
54697 + } else if (unlikely(!(mode & GR_FIND)))
54698 + return 0;
54699 +
54700 + return GR_FIND;
54701 +}
54702 +
54703 +__u32
54704 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
54705 + int acc_mode)
54706 +{
54707 + __u32 reqmode = GR_FIND;
54708 + __u32 mode;
54709 +
54710 + if (unlikely(!dentry->d_inode))
54711 + return reqmode;
54712 +
54713 + if (acc_mode & MAY_APPEND)
54714 + reqmode |= GR_APPEND;
54715 + else if (acc_mode & MAY_WRITE)
54716 + reqmode |= GR_WRITE;
54717 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
54718 + reqmode |= GR_READ;
54719 +
54720 + mode =
54721 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
54722 + mnt);
54723 +
54724 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54725 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
54726 + reqmode & GR_READ ? " reading" : "",
54727 + reqmode & GR_WRITE ? " writing" : reqmode &
54728 + GR_APPEND ? " appending" : "");
54729 + return reqmode;
54730 + } else
54731 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54732 + {
54733 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
54734 + reqmode & GR_READ ? " reading" : "",
54735 + reqmode & GR_WRITE ? " writing" : reqmode &
54736 + GR_APPEND ? " appending" : "");
54737 + return 0;
54738 + } else if (unlikely((mode & reqmode) != reqmode))
54739 + return 0;
54740 +
54741 + return reqmode;
54742 +}
54743 +
54744 +__u32
54745 +gr_acl_handle_creat(const struct dentry * dentry,
54746 + const struct dentry * p_dentry,
54747 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
54748 + const int imode)
54749 +{
54750 + __u32 reqmode = GR_WRITE | GR_CREATE;
54751 + __u32 mode;
54752 +
54753 + if (acc_mode & MAY_APPEND)
54754 + reqmode |= GR_APPEND;
54755 + // if a directory was required or the directory already exists, then
54756 + // don't count this open as a read
54757 + if ((acc_mode & MAY_READ) &&
54758 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
54759 + reqmode |= GR_READ;
54760 + if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
54761 + reqmode |= GR_SETID;
54762 +
54763 + mode =
54764 + gr_check_create(dentry, p_dentry, p_mnt,
54765 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
54766 +
54767 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54768 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
54769 + reqmode & GR_READ ? " reading" : "",
54770 + reqmode & GR_WRITE ? " writing" : reqmode &
54771 + GR_APPEND ? " appending" : "");
54772 + return reqmode;
54773 + } else
54774 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54775 + {
54776 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
54777 + reqmode & GR_READ ? " reading" : "",
54778 + reqmode & GR_WRITE ? " writing" : reqmode &
54779 + GR_APPEND ? " appending" : "");
54780 + return 0;
54781 + } else if (unlikely((mode & reqmode) != reqmode))
54782 + return 0;
54783 +
54784 + return reqmode;
54785 +}
54786 +
54787 +__u32
54788 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
54789 + const int fmode)
54790 +{
54791 + __u32 mode, reqmode = GR_FIND;
54792 +
54793 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
54794 + reqmode |= GR_EXEC;
54795 + if (fmode & S_IWOTH)
54796 + reqmode |= GR_WRITE;
54797 + if (fmode & S_IROTH)
54798 + reqmode |= GR_READ;
54799 +
54800 + mode =
54801 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
54802 + mnt);
54803 +
54804 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54805 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
54806 + reqmode & GR_READ ? " reading" : "",
54807 + reqmode & GR_WRITE ? " writing" : "",
54808 + reqmode & GR_EXEC ? " executing" : "");
54809 + return reqmode;
54810 + } else
54811 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54812 + {
54813 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
54814 + reqmode & GR_READ ? " reading" : "",
54815 + reqmode & GR_WRITE ? " writing" : "",
54816 + reqmode & GR_EXEC ? " executing" : "");
54817 + return 0;
54818 + } else if (unlikely((mode & reqmode) != reqmode))
54819 + return 0;
54820 +
54821 + return reqmode;
54822 +}
54823 +
54824 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
54825 +{
54826 + __u32 mode;
54827 +
54828 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
54829 +
54830 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
54831 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
54832 + return mode;
54833 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
54834 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
54835 + return 0;
54836 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
54837 + return 0;
54838 +
54839 + return (reqmode);
54840 +}
54841 +
54842 +__u32
54843 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
54844 +{
54845 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
54846 +}
54847 +
54848 +__u32
54849 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
54850 +{
54851 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
54852 +}
54853 +
54854 +__u32
54855 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
54856 +{
54857 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
54858 +}
54859 +
54860 +__u32
54861 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
54862 +{
54863 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
54864 +}
54865 +
54866 +__u32
54867 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
54868 + umode_t *modeptr)
54869 +{
54870 + umode_t mode;
54871 +
54872 + *modeptr &= ~gr_acl_umask();
54873 + mode = *modeptr;
54874 +
54875 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
54876 + return 1;
54877 +
54878 + if (unlikely(mode & (S_ISUID | S_ISGID))) {
54879 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
54880 + GR_CHMOD_ACL_MSG);
54881 + } else {
54882 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
54883 + }
54884 +}
54885 +
54886 +__u32
54887 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
54888 +{
54889 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
54890 +}
54891 +
54892 +__u32
54893 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
54894 +{
54895 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
54896 +}
54897 +
54898 +__u32
54899 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
54900 +{
54901 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
54902 +}
54903 +
54904 +__u32
54905 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
54906 +{
54907 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
54908 + GR_UNIXCONNECT_ACL_MSG);
54909 +}
54910 +
54911 +/* hardlinks require at minimum create and link permission,
54912 + any additional privilege required is based on the
54913 + privilege of the file being linked to
54914 +*/
54915 +__u32
54916 +gr_acl_handle_link(const struct dentry * new_dentry,
54917 + const struct dentry * parent_dentry,
54918 + const struct vfsmount * parent_mnt,
54919 + const struct dentry * old_dentry,
54920 + const struct vfsmount * old_mnt, const char *to)
54921 +{
54922 + __u32 mode;
54923 + __u32 needmode = GR_CREATE | GR_LINK;
54924 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
54925 +
54926 + mode =
54927 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
54928 + old_mnt);
54929 +
54930 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
54931 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
54932 + return mode;
54933 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
54934 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
54935 + return 0;
54936 + } else if (unlikely((mode & needmode) != needmode))
54937 + return 0;
54938 +
54939 + return 1;
54940 +}
54941 +
54942 +__u32
54943 +gr_acl_handle_symlink(const struct dentry * new_dentry,
54944 + const struct dentry * parent_dentry,
54945 + const struct vfsmount * parent_mnt, const char *from)
54946 +{
54947 + __u32 needmode = GR_WRITE | GR_CREATE;
54948 + __u32 mode;
54949 +
54950 + mode =
54951 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
54952 + GR_CREATE | GR_AUDIT_CREATE |
54953 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
54954 +
54955 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
54956 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
54957 + return mode;
54958 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
54959 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
54960 + return 0;
54961 + } else if (unlikely((mode & needmode) != needmode))
54962 + return 0;
54963 +
54964 + return (GR_WRITE | GR_CREATE);
54965 +}
54966 +
54967 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
54968 +{
54969 + __u32 mode;
54970 +
54971 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
54972 +
54973 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
54974 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
54975 + return mode;
54976 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
54977 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
54978 + return 0;
54979 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
54980 + return 0;
54981 +
54982 + return (reqmode);
54983 +}
54984 +
54985 +__u32
54986 +gr_acl_handle_mknod(const struct dentry * new_dentry,
54987 + const struct dentry * parent_dentry,
54988 + const struct vfsmount * parent_mnt,
54989 + const int mode)
54990 +{
54991 + __u32 reqmode = GR_WRITE | GR_CREATE;
54992 + if (unlikely(mode & (S_ISUID | S_ISGID)))
54993 + reqmode |= GR_SETID;
54994 +
54995 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
54996 + reqmode, GR_MKNOD_ACL_MSG);
54997 +}
54998 +
54999 +__u32
55000 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
55001 + const struct dentry *parent_dentry,
55002 + const struct vfsmount *parent_mnt)
55003 +{
55004 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
55005 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
55006 +}
55007 +
55008 +#define RENAME_CHECK_SUCCESS(old, new) \
55009 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
55010 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
55011 +
55012 +int
55013 +gr_acl_handle_rename(struct dentry *new_dentry,
55014 + struct dentry *parent_dentry,
55015 + const struct vfsmount *parent_mnt,
55016 + struct dentry *old_dentry,
55017 + struct inode *old_parent_inode,
55018 + struct vfsmount *old_mnt, const char *newname)
55019 +{
55020 + __u32 comp1, comp2;
55021 + int error = 0;
55022 +
55023 + if (unlikely(!gr_acl_is_enabled()))
55024 + return 0;
55025 +
55026 + if (!new_dentry->d_inode) {
55027 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
55028 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
55029 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
55030 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
55031 + GR_DELETE | GR_AUDIT_DELETE |
55032 + GR_AUDIT_READ | GR_AUDIT_WRITE |
55033 + GR_SUPPRESS, old_mnt);
55034 + } else {
55035 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
55036 + GR_CREATE | GR_DELETE |
55037 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
55038 + GR_AUDIT_READ | GR_AUDIT_WRITE |
55039 + GR_SUPPRESS, parent_mnt);
55040 + comp2 =
55041 + gr_search_file(old_dentry,
55042 + GR_READ | GR_WRITE | GR_AUDIT_READ |
55043 + GR_DELETE | GR_AUDIT_DELETE |
55044 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
55045 + }
55046 +
55047 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
55048 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
55049 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
55050 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
55051 + && !(comp2 & GR_SUPPRESS)) {
55052 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
55053 + error = -EACCES;
55054 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
55055 + error = -EACCES;
55056 +
55057 + return error;
55058 +}
55059 +
55060 +void
55061 +gr_acl_handle_exit(void)
55062 +{
55063 + u16 id;
55064 + char *rolename;
55065 + struct file *exec_file;
55066 +
55067 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
55068 + !(current->role->roletype & GR_ROLE_PERSIST))) {
55069 + id = current->acl_role_id;
55070 + rolename = current->role->rolename;
55071 + gr_set_acls(1);
55072 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
55073 + }
55074 +
55075 + write_lock(&grsec_exec_file_lock);
55076 + exec_file = current->exec_file;
55077 + current->exec_file = NULL;
55078 + write_unlock(&grsec_exec_file_lock);
55079 +
55080 + if (exec_file)
55081 + fput(exec_file);
55082 +}
55083 +
55084 +int
55085 +gr_acl_handle_procpidmem(const struct task_struct *task)
55086 +{
55087 + if (unlikely(!gr_acl_is_enabled()))
55088 + return 0;
55089 +
55090 + if (task != current && task->acl->mode & GR_PROTPROCFD)
55091 + return -EACCES;
55092 +
55093 + return 0;
55094 +}
55095 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
55096 new file mode 100644
55097 index 0000000..58800a7
55098 --- /dev/null
55099 +++ b/grsecurity/gracl_ip.c
55100 @@ -0,0 +1,384 @@
55101 +#include <linux/kernel.h>
55102 +#include <asm/uaccess.h>
55103 +#include <asm/errno.h>
55104 +#include <net/sock.h>
55105 +#include <linux/file.h>
55106 +#include <linux/fs.h>
55107 +#include <linux/net.h>
55108 +#include <linux/in.h>
55109 +#include <linux/skbuff.h>
55110 +#include <linux/ip.h>
55111 +#include <linux/udp.h>
55112 +#include <linux/types.h>
55113 +#include <linux/sched.h>
55114 +#include <linux/netdevice.h>
55115 +#include <linux/inetdevice.h>
55116 +#include <linux/gracl.h>
55117 +#include <linux/grsecurity.h>
55118 +#include <linux/grinternal.h>
55119 +
55120 +#define GR_BIND 0x01
55121 +#define GR_CONNECT 0x02
55122 +#define GR_INVERT 0x04
55123 +#define GR_BINDOVERRIDE 0x08
55124 +#define GR_CONNECTOVERRIDE 0x10
55125 +#define GR_SOCK_FAMILY 0x20
55126 +
55127 +static const char * gr_protocols[IPPROTO_MAX] = {
55128 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
55129 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
55130 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
55131 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
55132 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
55133 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
55134 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
55135 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
55136 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
55137 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
55138 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
55139 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
55140 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
55141 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
55142 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
55143 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
55144 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
55145 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
55146 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
55147 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
55148 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
55149 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
55150 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
55151 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
55152 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
55153 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
55154 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
55155 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
55156 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
55157 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
55158 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
55159 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
55160 + };
55161 +
55162 +static const char * gr_socktypes[SOCK_MAX] = {
55163 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
55164 + "unknown:7", "unknown:8", "unknown:9", "packet"
55165 + };
55166 +
55167 +static const char * gr_sockfamilies[AF_MAX+1] = {
55168 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
55169 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
55170 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
55171 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
55172 + };
55173 +
55174 +const char *
55175 +gr_proto_to_name(unsigned char proto)
55176 +{
55177 + return gr_protocols[proto];
55178 +}
55179 +
55180 +const char *
55181 +gr_socktype_to_name(unsigned char type)
55182 +{
55183 + return gr_socktypes[type];
55184 +}
55185 +
55186 +const char *
55187 +gr_sockfamily_to_name(unsigned char family)
55188 +{
55189 + return gr_sockfamilies[family];
55190 +}
55191 +
55192 +int
55193 +gr_search_socket(const int domain, const int type, const int protocol)
55194 +{
55195 + struct acl_subject_label *curr;
55196 + const struct cred *cred = current_cred();
55197 +
55198 + if (unlikely(!gr_acl_is_enabled()))
55199 + goto exit;
55200 +
55201 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
55202 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
55203 + goto exit; // let the kernel handle it
55204 +
55205 + curr = current->acl;
55206 +
55207 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
55208 + /* the family is allowed, if this is PF_INET allow it only if
55209 + the extra sock type/protocol checks pass */
55210 + if (domain == PF_INET)
55211 + goto inet_check;
55212 + goto exit;
55213 + } else {
55214 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55215 + __u32 fakeip = 0;
55216 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55217 + current->role->roletype, cred->uid,
55218 + cred->gid, current->exec_file ?
55219 + gr_to_filename(current->exec_file->f_path.dentry,
55220 + current->exec_file->f_path.mnt) :
55221 + curr->filename, curr->filename,
55222 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
55223 + &current->signal->saved_ip);
55224 + goto exit;
55225 + }
55226 + goto exit_fail;
55227 + }
55228 +
55229 +inet_check:
55230 + /* the rest of this checking is for IPv4 only */
55231 + if (!curr->ips)
55232 + goto exit;
55233 +
55234 + if ((curr->ip_type & (1 << type)) &&
55235 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
55236 + goto exit;
55237 +
55238 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55239 + /* we don't place acls on raw sockets , and sometimes
55240 + dgram/ip sockets are opened for ioctl and not
55241 + bind/connect, so we'll fake a bind learn log */
55242 + if (type == SOCK_RAW || type == SOCK_PACKET) {
55243 + __u32 fakeip = 0;
55244 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55245 + current->role->roletype, cred->uid,
55246 + cred->gid, current->exec_file ?
55247 + gr_to_filename(current->exec_file->f_path.dentry,
55248 + current->exec_file->f_path.mnt) :
55249 + curr->filename, curr->filename,
55250 + &fakeip, 0, type,
55251 + protocol, GR_CONNECT, &current->signal->saved_ip);
55252 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
55253 + __u32 fakeip = 0;
55254 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55255 + current->role->roletype, cred->uid,
55256 + cred->gid, current->exec_file ?
55257 + gr_to_filename(current->exec_file->f_path.dentry,
55258 + current->exec_file->f_path.mnt) :
55259 + curr->filename, curr->filename,
55260 + &fakeip, 0, type,
55261 + protocol, GR_BIND, &current->signal->saved_ip);
55262 + }
55263 + /* we'll log when they use connect or bind */
55264 + goto exit;
55265 + }
55266 +
55267 +exit_fail:
55268 + if (domain == PF_INET)
55269 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
55270 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
55271 + else
55272 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
55273 + gr_socktype_to_name(type), protocol);
55274 +
55275 + return 0;
55276 +exit:
55277 + return 1;
55278 +}
55279 +
55280 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
55281 +{
55282 + if ((ip->mode & mode) &&
55283 + (ip_port >= ip->low) &&
55284 + (ip_port <= ip->high) &&
55285 + ((ntohl(ip_addr) & our_netmask) ==
55286 + (ntohl(our_addr) & our_netmask))
55287 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
55288 + && (ip->type & (1 << type))) {
55289 + if (ip->mode & GR_INVERT)
55290 + return 2; // specifically denied
55291 + else
55292 + return 1; // allowed
55293 + }
55294 +
55295 + return 0; // not specifically allowed, may continue parsing
55296 +}
55297 +
55298 +static int
55299 +gr_search_connectbind(const int full_mode, struct sock *sk,
55300 + struct sockaddr_in *addr, const int type)
55301 +{
55302 + char iface[IFNAMSIZ] = {0};
55303 + struct acl_subject_label *curr;
55304 + struct acl_ip_label *ip;
55305 + struct inet_sock *isk;
55306 + struct net_device *dev;
55307 + struct in_device *idev;
55308 + unsigned long i;
55309 + int ret;
55310 + int mode = full_mode & (GR_BIND | GR_CONNECT);
55311 + __u32 ip_addr = 0;
55312 + __u32 our_addr;
55313 + __u32 our_netmask;
55314 + char *p;
55315 + __u16 ip_port = 0;
55316 + const struct cred *cred = current_cred();
55317 +
55318 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
55319 + return 0;
55320 +
55321 + curr = current->acl;
55322 + isk = inet_sk(sk);
55323 +
55324 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
55325 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
55326 + addr->sin_addr.s_addr = curr->inaddr_any_override;
55327 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
55328 + struct sockaddr_in saddr;
55329 + int err;
55330 +
55331 + saddr.sin_family = AF_INET;
55332 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
55333 + saddr.sin_port = isk->inet_sport;
55334 +
55335 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
55336 + if (err)
55337 + return err;
55338 +
55339 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
55340 + if (err)
55341 + return err;
55342 + }
55343 +
55344 + if (!curr->ips)
55345 + return 0;
55346 +
55347 + ip_addr = addr->sin_addr.s_addr;
55348 + ip_port = ntohs(addr->sin_port);
55349 +
55350 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55351 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55352 + current->role->roletype, cred->uid,
55353 + cred->gid, current->exec_file ?
55354 + gr_to_filename(current->exec_file->f_path.dentry,
55355 + current->exec_file->f_path.mnt) :
55356 + curr->filename, curr->filename,
55357 + &ip_addr, ip_port, type,
55358 + sk->sk_protocol, mode, &current->signal->saved_ip);
55359 + return 0;
55360 + }
55361 +
55362 + for (i = 0; i < curr->ip_num; i++) {
55363 + ip = *(curr->ips + i);
55364 + if (ip->iface != NULL) {
55365 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
55366 + p = strchr(iface, ':');
55367 + if (p != NULL)
55368 + *p = '\0';
55369 + dev = dev_get_by_name(sock_net(sk), iface);
55370 + if (dev == NULL)
55371 + continue;
55372 + idev = in_dev_get(dev);
55373 + if (idev == NULL) {
55374 + dev_put(dev);
55375 + continue;
55376 + }
55377 + rcu_read_lock();
55378 + for_ifa(idev) {
55379 + if (!strcmp(ip->iface, ifa->ifa_label)) {
55380 + our_addr = ifa->ifa_address;
55381 + our_netmask = 0xffffffff;
55382 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
55383 + if (ret == 1) {
55384 + rcu_read_unlock();
55385 + in_dev_put(idev);
55386 + dev_put(dev);
55387 + return 0;
55388 + } else if (ret == 2) {
55389 + rcu_read_unlock();
55390 + in_dev_put(idev);
55391 + dev_put(dev);
55392 + goto denied;
55393 + }
55394 + }
55395 + } endfor_ifa(idev);
55396 + rcu_read_unlock();
55397 + in_dev_put(idev);
55398 + dev_put(dev);
55399 + } else {
55400 + our_addr = ip->addr;
55401 + our_netmask = ip->netmask;
55402 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
55403 + if (ret == 1)
55404 + return 0;
55405 + else if (ret == 2)
55406 + goto denied;
55407 + }
55408 + }
55409 +
55410 +denied:
55411 + if (mode == GR_BIND)
55412 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
55413 + else if (mode == GR_CONNECT)
55414 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
55415 +
55416 + return -EACCES;
55417 +}
55418 +
55419 +int
55420 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
55421 +{
55422 + /* always allow disconnection of dgram sockets with connect */
55423 + if (addr->sin_family == AF_UNSPEC)
55424 + return 0;
55425 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
55426 +}
55427 +
55428 +int
55429 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
55430 +{
55431 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
55432 +}
55433 +
55434 +int gr_search_listen(struct socket *sock)
55435 +{
55436 + struct sock *sk = sock->sk;
55437 + struct sockaddr_in addr;
55438 +
55439 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
55440 + addr.sin_port = inet_sk(sk)->inet_sport;
55441 +
55442 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
55443 +}
55444 +
55445 +int gr_search_accept(struct socket *sock)
55446 +{
55447 + struct sock *sk = sock->sk;
55448 + struct sockaddr_in addr;
55449 +
55450 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
55451 + addr.sin_port = inet_sk(sk)->inet_sport;
55452 +
55453 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
55454 +}
55455 +
55456 +int
55457 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
55458 +{
55459 + if (addr)
55460 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
55461 + else {
55462 + struct sockaddr_in sin;
55463 + const struct inet_sock *inet = inet_sk(sk);
55464 +
55465 + sin.sin_addr.s_addr = inet->inet_daddr;
55466 + sin.sin_port = inet->inet_dport;
55467 +
55468 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
55469 + }
55470 +}
55471 +
55472 +int
55473 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
55474 +{
55475 + struct sockaddr_in sin;
55476 +
55477 + if (unlikely(skb->len < sizeof (struct udphdr)))
55478 + return 0; // skip this packet
55479 +
55480 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
55481 + sin.sin_port = udp_hdr(skb)->source;
55482 +
55483 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
55484 +}
55485 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
55486 new file mode 100644
55487 index 0000000..25f54ef
55488 --- /dev/null
55489 +++ b/grsecurity/gracl_learn.c
55490 @@ -0,0 +1,207 @@
55491 +#include <linux/kernel.h>
55492 +#include <linux/mm.h>
55493 +#include <linux/sched.h>
55494 +#include <linux/poll.h>
55495 +#include <linux/string.h>
55496 +#include <linux/file.h>
55497 +#include <linux/types.h>
55498 +#include <linux/vmalloc.h>
55499 +#include <linux/grinternal.h>
55500 +
55501 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
55502 + size_t count, loff_t *ppos);
55503 +extern int gr_acl_is_enabled(void);
55504 +
55505 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
55506 +static int gr_learn_attached;
55507 +
55508 +/* use a 512k buffer */
55509 +#define LEARN_BUFFER_SIZE (512 * 1024)
55510 +
55511 +static DEFINE_SPINLOCK(gr_learn_lock);
55512 +static DEFINE_MUTEX(gr_learn_user_mutex);
55513 +
55514 +/* we need to maintain two buffers, so that the kernel context of grlearn
55515 + uses a semaphore around the userspace copying, and the other kernel contexts
55516 + use a spinlock when copying into the buffer, since they cannot sleep
55517 +*/
55518 +static char *learn_buffer;
55519 +static char *learn_buffer_user;
55520 +static int learn_buffer_len;
55521 +static int learn_buffer_user_len;
55522 +
55523 +static ssize_t
55524 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
55525 +{
55526 + DECLARE_WAITQUEUE(wait, current);
55527 + ssize_t retval = 0;
55528 +
55529 + add_wait_queue(&learn_wait, &wait);
55530 + set_current_state(TASK_INTERRUPTIBLE);
55531 + do {
55532 + mutex_lock(&gr_learn_user_mutex);
55533 + spin_lock(&gr_learn_lock);
55534 + if (learn_buffer_len)
55535 + break;
55536 + spin_unlock(&gr_learn_lock);
55537 + mutex_unlock(&gr_learn_user_mutex);
55538 + if (file->f_flags & O_NONBLOCK) {
55539 + retval = -EAGAIN;
55540 + goto out;
55541 + }
55542 + if (signal_pending(current)) {
55543 + retval = -ERESTARTSYS;
55544 + goto out;
55545 + }
55546 +
55547 + schedule();
55548 + } while (1);
55549 +
55550 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
55551 + learn_buffer_user_len = learn_buffer_len;
55552 + retval = learn_buffer_len;
55553 + learn_buffer_len = 0;
55554 +
55555 + spin_unlock(&gr_learn_lock);
55556 +
55557 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
55558 + retval = -EFAULT;
55559 +
55560 + mutex_unlock(&gr_learn_user_mutex);
55561 +out:
55562 + set_current_state(TASK_RUNNING);
55563 + remove_wait_queue(&learn_wait, &wait);
55564 + return retval;
55565 +}
55566 +
55567 +static unsigned int
55568 +poll_learn(struct file * file, poll_table * wait)
55569 +{
55570 + poll_wait(file, &learn_wait, wait);
55571 +
55572 + if (learn_buffer_len)
55573 + return (POLLIN | POLLRDNORM);
55574 +
55575 + return 0;
55576 +}
55577 +
55578 +void
55579 +gr_clear_learn_entries(void)
55580 +{
55581 + char *tmp;
55582 +
55583 + mutex_lock(&gr_learn_user_mutex);
55584 + spin_lock(&gr_learn_lock);
55585 + tmp = learn_buffer;
55586 + learn_buffer = NULL;
55587 + spin_unlock(&gr_learn_lock);
55588 + if (tmp)
55589 + vfree(tmp);
55590 + if (learn_buffer_user != NULL) {
55591 + vfree(learn_buffer_user);
55592 + learn_buffer_user = NULL;
55593 + }
55594 + learn_buffer_len = 0;
55595 + mutex_unlock(&gr_learn_user_mutex);
55596 +
55597 + return;
55598 +}
55599 +
55600 +void
55601 +gr_add_learn_entry(const char *fmt, ...)
55602 +{
55603 + va_list args;
55604 + unsigned int len;
55605 +
55606 + if (!gr_learn_attached)
55607 + return;
55608 +
55609 + spin_lock(&gr_learn_lock);
55610 +
55611 + /* leave a gap at the end so we know when it's "full" but don't have to
55612 + compute the exact length of the string we're trying to append
55613 + */
55614 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
55615 + spin_unlock(&gr_learn_lock);
55616 + wake_up_interruptible(&learn_wait);
55617 + return;
55618 + }
55619 + if (learn_buffer == NULL) {
55620 + spin_unlock(&gr_learn_lock);
55621 + return;
55622 + }
55623 +
55624 + va_start(args, fmt);
55625 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
55626 + va_end(args);
55627 +
55628 + learn_buffer_len += len + 1;
55629 +
55630 + spin_unlock(&gr_learn_lock);
55631 + wake_up_interruptible(&learn_wait);
55632 +
55633 + return;
55634 +}
55635 +
55636 +static int
55637 +open_learn(struct inode *inode, struct file *file)
55638 +{
55639 + if (file->f_mode & FMODE_READ && gr_learn_attached)
55640 + return -EBUSY;
55641 + if (file->f_mode & FMODE_READ) {
55642 + int retval = 0;
55643 + mutex_lock(&gr_learn_user_mutex);
55644 + if (learn_buffer == NULL)
55645 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
55646 + if (learn_buffer_user == NULL)
55647 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
55648 + if (learn_buffer == NULL) {
55649 + retval = -ENOMEM;
55650 + goto out_error;
55651 + }
55652 + if (learn_buffer_user == NULL) {
55653 + retval = -ENOMEM;
55654 + goto out_error;
55655 + }
55656 + learn_buffer_len = 0;
55657 + learn_buffer_user_len = 0;
55658 + gr_learn_attached = 1;
55659 +out_error:
55660 + mutex_unlock(&gr_learn_user_mutex);
55661 + return retval;
55662 + }
55663 + return 0;
55664 +}
55665 +
55666 +static int
55667 +close_learn(struct inode *inode, struct file *file)
55668 +{
55669 + if (file->f_mode & FMODE_READ) {
55670 + char *tmp = NULL;
55671 + mutex_lock(&gr_learn_user_mutex);
55672 + spin_lock(&gr_learn_lock);
55673 + tmp = learn_buffer;
55674 + learn_buffer = NULL;
55675 + spin_unlock(&gr_learn_lock);
55676 + if (tmp)
55677 + vfree(tmp);
55678 + if (learn_buffer_user != NULL) {
55679 + vfree(learn_buffer_user);
55680 + learn_buffer_user = NULL;
55681 + }
55682 + learn_buffer_len = 0;
55683 + learn_buffer_user_len = 0;
55684 + gr_learn_attached = 0;
55685 + mutex_unlock(&gr_learn_user_mutex);
55686 + }
55687 +
55688 + return 0;
55689 +}
55690 +
55691 +const struct file_operations grsec_fops = {
55692 + .read = read_learn,
55693 + .write = write_grsec_handler,
55694 + .open = open_learn,
55695 + .release = close_learn,
55696 + .poll = poll_learn,
55697 +};
55698 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
55699 new file mode 100644
55700 index 0000000..39645c9
55701 --- /dev/null
55702 +++ b/grsecurity/gracl_res.c
55703 @@ -0,0 +1,68 @@
55704 +#include <linux/kernel.h>
55705 +#include <linux/sched.h>
55706 +#include <linux/gracl.h>
55707 +#include <linux/grinternal.h>
55708 +
55709 +static const char *restab_log[] = {
55710 + [RLIMIT_CPU] = "RLIMIT_CPU",
55711 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
55712 + [RLIMIT_DATA] = "RLIMIT_DATA",
55713 + [RLIMIT_STACK] = "RLIMIT_STACK",
55714 + [RLIMIT_CORE] = "RLIMIT_CORE",
55715 + [RLIMIT_RSS] = "RLIMIT_RSS",
55716 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
55717 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
55718 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
55719 + [RLIMIT_AS] = "RLIMIT_AS",
55720 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
55721 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
55722 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
55723 + [RLIMIT_NICE] = "RLIMIT_NICE",
55724 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
55725 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
55726 + [GR_CRASH_RES] = "RLIMIT_CRASH"
55727 +};
55728 +
55729 +void
55730 +gr_log_resource(const struct task_struct *task,
55731 + const int res, const unsigned long wanted, const int gt)
55732 +{
55733 + const struct cred *cred;
55734 + unsigned long rlim;
55735 +
55736 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
55737 + return;
55738 +
55739 + // not yet supported resource
55740 + if (unlikely(!restab_log[res]))
55741 + return;
55742 +
55743 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
55744 + rlim = task_rlimit_max(task, res);
55745 + else
55746 + rlim = task_rlimit(task, res);
55747 +
55748 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
55749 + return;
55750 +
55751 + rcu_read_lock();
55752 + cred = __task_cred(task);
55753 +
55754 + if (res == RLIMIT_NPROC &&
55755 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
55756 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
55757 + goto out_rcu_unlock;
55758 + else if (res == RLIMIT_MEMLOCK &&
55759 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
55760 + goto out_rcu_unlock;
55761 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
55762 + goto out_rcu_unlock;
55763 + rcu_read_unlock();
55764 +
55765 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
55766 +
55767 + return;
55768 +out_rcu_unlock:
55769 + rcu_read_unlock();
55770 + return;
55771 +}
55772 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
55773 new file mode 100644
55774 index 0000000..5556be3
55775 --- /dev/null
55776 +++ b/grsecurity/gracl_segv.c
55777 @@ -0,0 +1,299 @@
55778 +#include <linux/kernel.h>
55779 +#include <linux/mm.h>
55780 +#include <asm/uaccess.h>
55781 +#include <asm/errno.h>
55782 +#include <asm/mman.h>
55783 +#include <net/sock.h>
55784 +#include <linux/file.h>
55785 +#include <linux/fs.h>
55786 +#include <linux/net.h>
55787 +#include <linux/in.h>
55788 +#include <linux/slab.h>
55789 +#include <linux/types.h>
55790 +#include <linux/sched.h>
55791 +#include <linux/timer.h>
55792 +#include <linux/gracl.h>
55793 +#include <linux/grsecurity.h>
55794 +#include <linux/grinternal.h>
55795 +
55796 +static struct crash_uid *uid_set;
55797 +static unsigned short uid_used;
55798 +static DEFINE_SPINLOCK(gr_uid_lock);
55799 +extern rwlock_t gr_inode_lock;
55800 +extern struct acl_subject_label *
55801 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
55802 + struct acl_role_label *role);
55803 +
55804 +#ifdef CONFIG_BTRFS_FS
55805 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
55806 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
55807 +#endif
55808 +
55809 +static inline dev_t __get_dev(const struct dentry *dentry)
55810 +{
55811 +#ifdef CONFIG_BTRFS_FS
55812 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
55813 + return get_btrfs_dev_from_inode(dentry->d_inode);
55814 + else
55815 +#endif
55816 + return dentry->d_inode->i_sb->s_dev;
55817 +}
55818 +
55819 +int
55820 +gr_init_uidset(void)
55821 +{
55822 + uid_set =
55823 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
55824 + uid_used = 0;
55825 +
55826 + return uid_set ? 1 : 0;
55827 +}
55828 +
55829 +void
55830 +gr_free_uidset(void)
55831 +{
55832 + if (uid_set)
55833 + kfree(uid_set);
55834 +
55835 + return;
55836 +}
55837 +
55838 +int
55839 +gr_find_uid(const uid_t uid)
55840 +{
55841 + struct crash_uid *tmp = uid_set;
55842 + uid_t buid;
55843 + int low = 0, high = uid_used - 1, mid;
55844 +
55845 + while (high >= low) {
55846 + mid = (low + high) >> 1;
55847 + buid = tmp[mid].uid;
55848 + if (buid == uid)
55849 + return mid;
55850 + if (buid > uid)
55851 + high = mid - 1;
55852 + if (buid < uid)
55853 + low = mid + 1;
55854 + }
55855 +
55856 + return -1;
55857 +}
55858 +
55859 +static __inline__ void
55860 +gr_insertsort(void)
55861 +{
55862 + unsigned short i, j;
55863 + struct crash_uid index;
55864 +
55865 + for (i = 1; i < uid_used; i++) {
55866 + index = uid_set[i];
55867 + j = i;
55868 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
55869 + uid_set[j] = uid_set[j - 1];
55870 + j--;
55871 + }
55872 + uid_set[j] = index;
55873 + }
55874 +
55875 + return;
55876 +}
55877 +
55878 +static __inline__ void
55879 +gr_insert_uid(const uid_t uid, const unsigned long expires)
55880 +{
55881 + int loc;
55882 +
55883 + if (uid_used == GR_UIDTABLE_MAX)
55884 + return;
55885 +
55886 + loc = gr_find_uid(uid);
55887 +
55888 + if (loc >= 0) {
55889 + uid_set[loc].expires = expires;
55890 + return;
55891 + }
55892 +
55893 + uid_set[uid_used].uid = uid;
55894 + uid_set[uid_used].expires = expires;
55895 + uid_used++;
55896 +
55897 + gr_insertsort();
55898 +
55899 + return;
55900 +}
55901 +
55902 +void
55903 +gr_remove_uid(const unsigned short loc)
55904 +{
55905 + unsigned short i;
55906 +
55907 + for (i = loc + 1; i < uid_used; i++)
55908 + uid_set[i - 1] = uid_set[i];
55909 +
55910 + uid_used--;
55911 +
55912 + return;
55913 +}
55914 +
55915 +int
55916 +gr_check_crash_uid(const uid_t uid)
55917 +{
55918 + int loc;
55919 + int ret = 0;
55920 +
55921 + if (unlikely(!gr_acl_is_enabled()))
55922 + return 0;
55923 +
55924 + spin_lock(&gr_uid_lock);
55925 + loc = gr_find_uid(uid);
55926 +
55927 + if (loc < 0)
55928 + goto out_unlock;
55929 +
55930 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
55931 + gr_remove_uid(loc);
55932 + else
55933 + ret = 1;
55934 +
55935 +out_unlock:
55936 + spin_unlock(&gr_uid_lock);
55937 + return ret;
55938 +}
55939 +
55940 +static __inline__ int
55941 +proc_is_setxid(const struct cred *cred)
55942 +{
55943 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
55944 + cred->uid != cred->fsuid)
55945 + return 1;
55946 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
55947 + cred->gid != cred->fsgid)
55948 + return 1;
55949 +
55950 + return 0;
55951 +}
55952 +
55953 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
55954 +
55955 +void
55956 +gr_handle_crash(struct task_struct *task, const int sig)
55957 +{
55958 + struct acl_subject_label *curr;
55959 + struct task_struct *tsk, *tsk2;
55960 + const struct cred *cred;
55961 + const struct cred *cred2;
55962 +
55963 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
55964 + return;
55965 +
55966 + if (unlikely(!gr_acl_is_enabled()))
55967 + return;
55968 +
55969 + curr = task->acl;
55970 +
55971 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
55972 + return;
55973 +
55974 + if (time_before_eq(curr->expires, get_seconds())) {
55975 + curr->expires = 0;
55976 + curr->crashes = 0;
55977 + }
55978 +
55979 + curr->crashes++;
55980 +
55981 + if (!curr->expires)
55982 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
55983 +
55984 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
55985 + time_after(curr->expires, get_seconds())) {
55986 + rcu_read_lock();
55987 + cred = __task_cred(task);
55988 + if (cred->uid && proc_is_setxid(cred)) {
55989 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
55990 + spin_lock(&gr_uid_lock);
55991 + gr_insert_uid(cred->uid, curr->expires);
55992 + spin_unlock(&gr_uid_lock);
55993 + curr->expires = 0;
55994 + curr->crashes = 0;
55995 + read_lock(&tasklist_lock);
55996 + do_each_thread(tsk2, tsk) {
55997 + cred2 = __task_cred(tsk);
55998 + if (tsk != task && cred2->uid == cred->uid)
55999 + gr_fake_force_sig(SIGKILL, tsk);
56000 + } while_each_thread(tsk2, tsk);
56001 + read_unlock(&tasklist_lock);
56002 + } else {
56003 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
56004 + read_lock(&tasklist_lock);
56005 + read_lock(&grsec_exec_file_lock);
56006 + do_each_thread(tsk2, tsk) {
56007 + if (likely(tsk != task)) {
56008 + // if this thread has the same subject as the one that triggered
56009 + // RES_CRASH and it's the same binary, kill it
56010 + if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
56011 + gr_fake_force_sig(SIGKILL, tsk);
56012 + }
56013 + } while_each_thread(tsk2, tsk);
56014 + read_unlock(&grsec_exec_file_lock);
56015 + read_unlock(&tasklist_lock);
56016 + }
56017 + rcu_read_unlock();
56018 + }
56019 +
56020 + return;
56021 +}
56022 +
56023 +int
56024 +gr_check_crash_exec(const struct file *filp)
56025 +{
56026 + struct acl_subject_label *curr;
56027 +
56028 + if (unlikely(!gr_acl_is_enabled()))
56029 + return 0;
56030 +
56031 + read_lock(&gr_inode_lock);
56032 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
56033 + __get_dev(filp->f_path.dentry),
56034 + current->role);
56035 + read_unlock(&gr_inode_lock);
56036 +
56037 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
56038 + (!curr->crashes && !curr->expires))
56039 + return 0;
56040 +
56041 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
56042 + time_after(curr->expires, get_seconds()))
56043 + return 1;
56044 + else if (time_before_eq(curr->expires, get_seconds())) {
56045 + curr->crashes = 0;
56046 + curr->expires = 0;
56047 + }
56048 +
56049 + return 0;
56050 +}
56051 +
56052 +void
56053 +gr_handle_alertkill(struct task_struct *task)
56054 +{
56055 + struct acl_subject_label *curracl;
56056 + __u32 curr_ip;
56057 + struct task_struct *p, *p2;
56058 +
56059 + if (unlikely(!gr_acl_is_enabled()))
56060 + return;
56061 +
56062 + curracl = task->acl;
56063 + curr_ip = task->signal->curr_ip;
56064 +
56065 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
56066 + read_lock(&tasklist_lock);
56067 + do_each_thread(p2, p) {
56068 + if (p->signal->curr_ip == curr_ip)
56069 + gr_fake_force_sig(SIGKILL, p);
56070 + } while_each_thread(p2, p);
56071 + read_unlock(&tasklist_lock);
56072 + } else if (curracl->mode & GR_KILLPROC)
56073 + gr_fake_force_sig(SIGKILL, task);
56074 +
56075 + return;
56076 +}
56077 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
56078 new file mode 100644
56079 index 0000000..9d83a69
56080 --- /dev/null
56081 +++ b/grsecurity/gracl_shm.c
56082 @@ -0,0 +1,40 @@
56083 +#include <linux/kernel.h>
56084 +#include <linux/mm.h>
56085 +#include <linux/sched.h>
56086 +#include <linux/file.h>
56087 +#include <linux/ipc.h>
56088 +#include <linux/gracl.h>
56089 +#include <linux/grsecurity.h>
56090 +#include <linux/grinternal.h>
56091 +
56092 +int
56093 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56094 + const time_t shm_createtime, const uid_t cuid, const int shmid)
56095 +{
56096 + struct task_struct *task;
56097 +
56098 + if (!gr_acl_is_enabled())
56099 + return 1;
56100 +
56101 + rcu_read_lock();
56102 + read_lock(&tasklist_lock);
56103 +
56104 + task = find_task_by_vpid(shm_cprid);
56105 +
56106 + if (unlikely(!task))
56107 + task = find_task_by_vpid(shm_lapid);
56108 +
56109 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
56110 + (task->pid == shm_lapid)) &&
56111 + (task->acl->mode & GR_PROTSHM) &&
56112 + (task->acl != current->acl))) {
56113 + read_unlock(&tasklist_lock);
56114 + rcu_read_unlock();
56115 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
56116 + return 0;
56117 + }
56118 + read_unlock(&tasklist_lock);
56119 + rcu_read_unlock();
56120 +
56121 + return 1;
56122 +}
56123 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
56124 new file mode 100644
56125 index 0000000..bc0be01
56126 --- /dev/null
56127 +++ b/grsecurity/grsec_chdir.c
56128 @@ -0,0 +1,19 @@
56129 +#include <linux/kernel.h>
56130 +#include <linux/sched.h>
56131 +#include <linux/fs.h>
56132 +#include <linux/file.h>
56133 +#include <linux/grsecurity.h>
56134 +#include <linux/grinternal.h>
56135 +
56136 +void
56137 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
56138 +{
56139 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
56140 + if ((grsec_enable_chdir && grsec_enable_group &&
56141 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
56142 + !grsec_enable_group)) {
56143 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
56144 + }
56145 +#endif
56146 + return;
56147 +}
56148 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
56149 new file mode 100644
56150 index 0000000..9807ee2
56151 --- /dev/null
56152 +++ b/grsecurity/grsec_chroot.c
56153 @@ -0,0 +1,368 @@
56154 +#include <linux/kernel.h>
56155 +#include <linux/module.h>
56156 +#include <linux/sched.h>
56157 +#include <linux/file.h>
56158 +#include <linux/fs.h>
56159 +#include <linux/mount.h>
56160 +#include <linux/types.h>
56161 +#include "../fs/mount.h"
56162 +#include <linux/grsecurity.h>
56163 +#include <linux/grinternal.h>
56164 +
56165 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
56166 +{
56167 +#ifdef CONFIG_GRKERNSEC
56168 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
56169 + path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root)
56170 + task->gr_is_chrooted = 1;
56171 + else
56172 + task->gr_is_chrooted = 0;
56173 +
56174 + task->gr_chroot_dentry = path->dentry;
56175 +#endif
56176 + return;
56177 +}
56178 +
56179 +void gr_clear_chroot_entries(struct task_struct *task)
56180 +{
56181 +#ifdef CONFIG_GRKERNSEC
56182 + task->gr_is_chrooted = 0;
56183 + task->gr_chroot_dentry = NULL;
56184 +#endif
56185 + return;
56186 +}
56187 +
56188 +int
56189 +gr_handle_chroot_unix(const pid_t pid)
56190 +{
56191 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
56192 + struct task_struct *p;
56193 +
56194 + if (unlikely(!grsec_enable_chroot_unix))
56195 + return 1;
56196 +
56197 + if (likely(!proc_is_chrooted(current)))
56198 + return 1;
56199 +
56200 + rcu_read_lock();
56201 + read_lock(&tasklist_lock);
56202 + p = find_task_by_vpid_unrestricted(pid);
56203 + if (unlikely(p && !have_same_root(current, p))) {
56204 + read_unlock(&tasklist_lock);
56205 + rcu_read_unlock();
56206 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
56207 + return 0;
56208 + }
56209 + read_unlock(&tasklist_lock);
56210 + rcu_read_unlock();
56211 +#endif
56212 + return 1;
56213 +}
56214 +
56215 +int
56216 +gr_handle_chroot_nice(void)
56217 +{
56218 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56219 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
56220 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
56221 + return -EPERM;
56222 + }
56223 +#endif
56224 + return 0;
56225 +}
56226 +
56227 +int
56228 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
56229 +{
56230 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56231 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
56232 + && proc_is_chrooted(current)) {
56233 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
56234 + return -EACCES;
56235 + }
56236 +#endif
56237 + return 0;
56238 +}
56239 +
56240 +int
56241 +gr_handle_chroot_rawio(const struct inode *inode)
56242 +{
56243 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56244 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
56245 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
56246 + return 1;
56247 +#endif
56248 + return 0;
56249 +}
56250 +
56251 +int
56252 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
56253 +{
56254 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56255 + struct task_struct *p;
56256 + int ret = 0;
56257 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
56258 + return ret;
56259 +
56260 + read_lock(&tasklist_lock);
56261 + do_each_pid_task(pid, type, p) {
56262 + if (!have_same_root(current, p)) {
56263 + ret = 1;
56264 + goto out;
56265 + }
56266 + } while_each_pid_task(pid, type, p);
56267 +out:
56268 + read_unlock(&tasklist_lock);
56269 + return ret;
56270 +#endif
56271 + return 0;
56272 +}
56273 +
56274 +int
56275 +gr_pid_is_chrooted(struct task_struct *p)
56276 +{
56277 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56278 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
56279 + return 0;
56280 +
56281 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
56282 + !have_same_root(current, p)) {
56283 + return 1;
56284 + }
56285 +#endif
56286 + return 0;
56287 +}
56288 +
56289 +EXPORT_SYMBOL(gr_pid_is_chrooted);
56290 +
56291 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
56292 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
56293 +{
56294 + struct path path, currentroot;
56295 + int ret = 0;
56296 +
56297 + path.dentry = (struct dentry *)u_dentry;
56298 + path.mnt = (struct vfsmount *)u_mnt;
56299 + get_fs_root(current->fs, &currentroot);
56300 + if (path_is_under(&path, &currentroot))
56301 + ret = 1;
56302 + path_put(&currentroot);
56303 +
56304 + return ret;
56305 +}
56306 +#endif
56307 +
56308 +int
56309 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
56310 +{
56311 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
56312 + if (!grsec_enable_chroot_fchdir)
56313 + return 1;
56314 +
56315 + if (!proc_is_chrooted(current))
56316 + return 1;
56317 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
56318 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
56319 + return 0;
56320 + }
56321 +#endif
56322 + return 1;
56323 +}
56324 +
56325 +int
56326 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56327 + const time_t shm_createtime)
56328 +{
56329 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
56330 + struct task_struct *p;
56331 + time_t starttime;
56332 +
56333 + if (unlikely(!grsec_enable_chroot_shmat))
56334 + return 1;
56335 +
56336 + if (likely(!proc_is_chrooted(current)))
56337 + return 1;
56338 +
56339 + rcu_read_lock();
56340 + read_lock(&tasklist_lock);
56341 +
56342 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
56343 + starttime = p->start_time.tv_sec;
56344 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
56345 + if (have_same_root(current, p)) {
56346 + goto allow;
56347 + } else {
56348 + read_unlock(&tasklist_lock);
56349 + rcu_read_unlock();
56350 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
56351 + return 0;
56352 + }
56353 + }
56354 + /* creator exited, pid reuse, fall through to next check */
56355 + }
56356 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
56357 + if (unlikely(!have_same_root(current, p))) {
56358 + read_unlock(&tasklist_lock);
56359 + rcu_read_unlock();
56360 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
56361 + return 0;
56362 + }
56363 + }
56364 +
56365 +allow:
56366 + read_unlock(&tasklist_lock);
56367 + rcu_read_unlock();
56368 +#endif
56369 + return 1;
56370 +}
56371 +
56372 +void
56373 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
56374 +{
56375 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
56376 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
56377 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
56378 +#endif
56379 + return;
56380 +}
56381 +
56382 +int
56383 +gr_handle_chroot_mknod(const struct dentry *dentry,
56384 + const struct vfsmount *mnt, const int mode)
56385 +{
56386 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
56387 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
56388 + proc_is_chrooted(current)) {
56389 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
56390 + return -EPERM;
56391 + }
56392 +#endif
56393 + return 0;
56394 +}
56395 +
56396 +int
56397 +gr_handle_chroot_mount(const struct dentry *dentry,
56398 + const struct vfsmount *mnt, const char *dev_name)
56399 +{
56400 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
56401 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
56402 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
56403 + return -EPERM;
56404 + }
56405 +#endif
56406 + return 0;
56407 +}
56408 +
56409 +int
56410 +gr_handle_chroot_pivot(void)
56411 +{
56412 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
56413 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
56414 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
56415 + return -EPERM;
56416 + }
56417 +#endif
56418 + return 0;
56419 +}
56420 +
56421 +int
56422 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
56423 +{
56424 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
56425 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
56426 + !gr_is_outside_chroot(dentry, mnt)) {
56427 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
56428 + return -EPERM;
56429 + }
56430 +#endif
56431 + return 0;
56432 +}
56433 +
56434 +extern const char *captab_log[];
56435 +extern int captab_log_entries;
56436 +
56437 +int
56438 +gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
56439 +{
56440 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56441 + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
56442 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
56443 + if (cap_raised(chroot_caps, cap)) {
56444 + if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
56445 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
56446 + }
56447 + return 0;
56448 + }
56449 + }
56450 +#endif
56451 + return 1;
56452 +}
56453 +
56454 +int
56455 +gr_chroot_is_capable(const int cap)
56456 +{
56457 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56458 + return gr_task_chroot_is_capable(current, current_cred(), cap);
56459 +#endif
56460 + return 1;
56461 +}
56462 +
56463 +int
56464 +gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
56465 +{
56466 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56467 + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
56468 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
56469 + if (cap_raised(chroot_caps, cap)) {
56470 + return 0;
56471 + }
56472 + }
56473 +#endif
56474 + return 1;
56475 +}
56476 +
56477 +int
56478 +gr_chroot_is_capable_nolog(const int cap)
56479 +{
56480 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56481 + return gr_task_chroot_is_capable_nolog(current, cap);
56482 +#endif
56483 + return 1;
56484 +}
56485 +
56486 +int
56487 +gr_handle_chroot_sysctl(const int op)
56488 +{
56489 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
56490 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
56491 + proc_is_chrooted(current))
56492 + return -EACCES;
56493 +#endif
56494 + return 0;
56495 +}
56496 +
56497 +void
56498 +gr_handle_chroot_chdir(struct path *path)
56499 +{
56500 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
56501 + if (grsec_enable_chroot_chdir)
56502 + set_fs_pwd(current->fs, path);
56503 +#endif
56504 + return;
56505 +}
56506 +
56507 +int
56508 +gr_handle_chroot_chmod(const struct dentry *dentry,
56509 + const struct vfsmount *mnt, const int mode)
56510 +{
56511 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
56512 + /* allow chmod +s on directories, but not files */
56513 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
56514 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
56515 + proc_is_chrooted(current)) {
56516 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
56517 + return -EPERM;
56518 + }
56519 +#endif
56520 + return 0;
56521 +}
56522 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
56523 new file mode 100644
56524 index 0000000..213ad8b
56525 --- /dev/null
56526 +++ b/grsecurity/grsec_disabled.c
56527 @@ -0,0 +1,437 @@
56528 +#include <linux/kernel.h>
56529 +#include <linux/module.h>
56530 +#include <linux/sched.h>
56531 +#include <linux/file.h>
56532 +#include <linux/fs.h>
56533 +#include <linux/kdev_t.h>
56534 +#include <linux/net.h>
56535 +#include <linux/in.h>
56536 +#include <linux/ip.h>
56537 +#include <linux/skbuff.h>
56538 +#include <linux/sysctl.h>
56539 +
56540 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
56541 +void
56542 +pax_set_initial_flags(struct linux_binprm *bprm)
56543 +{
56544 + return;
56545 +}
56546 +#endif
56547 +
56548 +#ifdef CONFIG_SYSCTL
56549 +__u32
56550 +gr_handle_sysctl(const struct ctl_table * table, const int op)
56551 +{
56552 + return 0;
56553 +}
56554 +#endif
56555 +
56556 +#ifdef CONFIG_TASKSTATS
56557 +int gr_is_taskstats_denied(int pid)
56558 +{
56559 + return 0;
56560 +}
56561 +#endif
56562 +
56563 +int
56564 +gr_acl_is_enabled(void)
56565 +{
56566 + return 0;
56567 +}
56568 +
56569 +void
56570 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
56571 +{
56572 + return;
56573 +}
56574 +
56575 +int
56576 +gr_handle_rawio(const struct inode *inode)
56577 +{
56578 + return 0;
56579 +}
56580 +
56581 +void
56582 +gr_acl_handle_psacct(struct task_struct *task, const long code)
56583 +{
56584 + return;
56585 +}
56586 +
56587 +int
56588 +gr_handle_ptrace(struct task_struct *task, const long request)
56589 +{
56590 + return 0;
56591 +}
56592 +
56593 +int
56594 +gr_handle_proc_ptrace(struct task_struct *task)
56595 +{
56596 + return 0;
56597 +}
56598 +
56599 +void
56600 +gr_learn_resource(const struct task_struct *task,
56601 + const int res, const unsigned long wanted, const int gt)
56602 +{
56603 + return;
56604 +}
56605 +
56606 +int
56607 +gr_set_acls(const int type)
56608 +{
56609 + return 0;
56610 +}
56611 +
56612 +int
56613 +gr_check_hidden_task(const struct task_struct *tsk)
56614 +{
56615 + return 0;
56616 +}
56617 +
56618 +int
56619 +gr_check_protected_task(const struct task_struct *task)
56620 +{
56621 + return 0;
56622 +}
56623 +
56624 +int
56625 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
56626 +{
56627 + return 0;
56628 +}
56629 +
56630 +void
56631 +gr_copy_label(struct task_struct *tsk)
56632 +{
56633 + return;
56634 +}
56635 +
56636 +void
56637 +gr_set_pax_flags(struct task_struct *task)
56638 +{
56639 + return;
56640 +}
56641 +
56642 +int
56643 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
56644 + const int unsafe_share)
56645 +{
56646 + return 0;
56647 +}
56648 +
56649 +void
56650 +gr_handle_delete(const ino_t ino, const dev_t dev)
56651 +{
56652 + return;
56653 +}
56654 +
56655 +void
56656 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
56657 +{
56658 + return;
56659 +}
56660 +
56661 +void
56662 +gr_handle_crash(struct task_struct *task, const int sig)
56663 +{
56664 + return;
56665 +}
56666 +
56667 +int
56668 +gr_check_crash_exec(const struct file *filp)
56669 +{
56670 + return 0;
56671 +}
56672 +
56673 +int
56674 +gr_check_crash_uid(const uid_t uid)
56675 +{
56676 + return 0;
56677 +}
56678 +
56679 +void
56680 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
56681 + struct dentry *old_dentry,
56682 + struct dentry *new_dentry,
56683 + struct vfsmount *mnt, const __u8 replace)
56684 +{
56685 + return;
56686 +}
56687 +
56688 +int
56689 +gr_search_socket(const int family, const int type, const int protocol)
56690 +{
56691 + return 1;
56692 +}
56693 +
56694 +int
56695 +gr_search_connectbind(const int mode, const struct socket *sock,
56696 + const struct sockaddr_in *addr)
56697 +{
56698 + return 0;
56699 +}
56700 +
56701 +void
56702 +gr_handle_alertkill(struct task_struct *task)
56703 +{
56704 + return;
56705 +}
56706 +
56707 +__u32
56708 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
56709 +{
56710 + return 1;
56711 +}
56712 +
56713 +__u32
56714 +gr_acl_handle_hidden_file(const struct dentry * dentry,
56715 + const struct vfsmount * mnt)
56716 +{
56717 + return 1;
56718 +}
56719 +
56720 +__u32
56721 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
56722 + int acc_mode)
56723 +{
56724 + return 1;
56725 +}
56726 +
56727 +__u32
56728 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
56729 +{
56730 + return 1;
56731 +}
56732 +
56733 +__u32
56734 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
56735 +{
56736 + return 1;
56737 +}
56738 +
56739 +int
56740 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
56741 + unsigned int *vm_flags)
56742 +{
56743 + return 1;
56744 +}
56745 +
56746 +__u32
56747 +gr_acl_handle_truncate(const struct dentry * dentry,
56748 + const struct vfsmount * mnt)
56749 +{
56750 + return 1;
56751 +}
56752 +
56753 +__u32
56754 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
56755 +{
56756 + return 1;
56757 +}
56758 +
56759 +__u32
56760 +gr_acl_handle_access(const struct dentry * dentry,
56761 + const struct vfsmount * mnt, const int fmode)
56762 +{
56763 + return 1;
56764 +}
56765 +
56766 +__u32
56767 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
56768 + umode_t *mode)
56769 +{
56770 + return 1;
56771 +}
56772 +
56773 +__u32
56774 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
56775 +{
56776 + return 1;
56777 +}
56778 +
56779 +__u32
56780 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
56781 +{
56782 + return 1;
56783 +}
56784 +
56785 +void
56786 +grsecurity_init(void)
56787 +{
56788 + return;
56789 +}
56790 +
56791 +umode_t gr_acl_umask(void)
56792 +{
56793 + return 0;
56794 +}
56795 +
56796 +__u32
56797 +gr_acl_handle_mknod(const struct dentry * new_dentry,
56798 + const struct dentry * parent_dentry,
56799 + const struct vfsmount * parent_mnt,
56800 + const int mode)
56801 +{
56802 + return 1;
56803 +}
56804 +
56805 +__u32
56806 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
56807 + const struct dentry * parent_dentry,
56808 + const struct vfsmount * parent_mnt)
56809 +{
56810 + return 1;
56811 +}
56812 +
56813 +__u32
56814 +gr_acl_handle_symlink(const struct dentry * new_dentry,
56815 + const struct dentry * parent_dentry,
56816 + const struct vfsmount * parent_mnt, const char *from)
56817 +{
56818 + return 1;
56819 +}
56820 +
56821 +__u32
56822 +gr_acl_handle_link(const struct dentry * new_dentry,
56823 + const struct dentry * parent_dentry,
56824 + const struct vfsmount * parent_mnt,
56825 + const struct dentry * old_dentry,
56826 + const struct vfsmount * old_mnt, const char *to)
56827 +{
56828 + return 1;
56829 +}
56830 +
56831 +int
56832 +gr_acl_handle_rename(const struct dentry *new_dentry,
56833 + const struct dentry *parent_dentry,
56834 + const struct vfsmount *parent_mnt,
56835 + const struct dentry *old_dentry,
56836 + const struct inode *old_parent_inode,
56837 + const struct vfsmount *old_mnt, const char *newname)
56838 +{
56839 + return 0;
56840 +}
56841 +
56842 +int
56843 +gr_acl_handle_filldir(const struct file *file, const char *name,
56844 + const int namelen, const ino_t ino)
56845 +{
56846 + return 1;
56847 +}
56848 +
56849 +int
56850 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56851 + const time_t shm_createtime, const uid_t cuid, const int shmid)
56852 +{
56853 + return 1;
56854 +}
56855 +
56856 +int
56857 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
56858 +{
56859 + return 0;
56860 +}
56861 +
56862 +int
56863 +gr_search_accept(const struct socket *sock)
56864 +{
56865 + return 0;
56866 +}
56867 +
56868 +int
56869 +gr_search_listen(const struct socket *sock)
56870 +{
56871 + return 0;
56872 +}
56873 +
56874 +int
56875 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
56876 +{
56877 + return 0;
56878 +}
56879 +
56880 +__u32
56881 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
56882 +{
56883 + return 1;
56884 +}
56885 +
56886 +__u32
56887 +gr_acl_handle_creat(const struct dentry * dentry,
56888 + const struct dentry * p_dentry,
56889 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
56890 + const int imode)
56891 +{
56892 + return 1;
56893 +}
56894 +
56895 +void
56896 +gr_acl_handle_exit(void)
56897 +{
56898 + return;
56899 +}
56900 +
56901 +int
56902 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
56903 +{
56904 + return 1;
56905 +}
56906 +
56907 +void
56908 +gr_set_role_label(const uid_t uid, const gid_t gid)
56909 +{
56910 + return;
56911 +}
56912 +
56913 +int
56914 +gr_acl_handle_procpidmem(const struct task_struct *task)
56915 +{
56916 + return 0;
56917 +}
56918 +
56919 +int
56920 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
56921 +{
56922 + return 0;
56923 +}
56924 +
56925 +int
56926 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
56927 +{
56928 + return 0;
56929 +}
56930 +
56931 +void
56932 +gr_set_kernel_label(struct task_struct *task)
56933 +{
56934 + return;
56935 +}
56936 +
56937 +int
56938 +gr_check_user_change(int real, int effective, int fs)
56939 +{
56940 + return 0;
56941 +}
56942 +
56943 +int
56944 +gr_check_group_change(int real, int effective, int fs)
56945 +{
56946 + return 0;
56947 +}
56948 +
56949 +int gr_acl_enable_at_secure(void)
56950 +{
56951 + return 0;
56952 +}
56953 +
56954 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
56955 +{
56956 + return dentry->d_inode->i_sb->s_dev;
56957 +}
56958 +
56959 +EXPORT_SYMBOL(gr_learn_resource);
56960 +EXPORT_SYMBOL(gr_set_kernel_label);
56961 +#ifdef CONFIG_SECURITY
56962 +EXPORT_SYMBOL(gr_check_user_change);
56963 +EXPORT_SYMBOL(gr_check_group_change);
56964 +#endif
56965 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
56966 new file mode 100644
56967 index 0000000..abfa971
56968 --- /dev/null
56969 +++ b/grsecurity/grsec_exec.c
56970 @@ -0,0 +1,174 @@
56971 +#include <linux/kernel.h>
56972 +#include <linux/sched.h>
56973 +#include <linux/file.h>
56974 +#include <linux/binfmts.h>
56975 +#include <linux/fs.h>
56976 +#include <linux/types.h>
56977 +#include <linux/grdefs.h>
56978 +#include <linux/grsecurity.h>
56979 +#include <linux/grinternal.h>
56980 +#include <linux/capability.h>
56981 +#include <linux/module.h>
56982 +
56983 +#include <asm/uaccess.h>
56984 +
56985 +#ifdef CONFIG_GRKERNSEC_EXECLOG
56986 +static char gr_exec_arg_buf[132];
56987 +static DEFINE_MUTEX(gr_exec_arg_mutex);
56988 +#endif
56989 +
56990 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
56991 +
56992 +void
56993 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
56994 +{
56995 +#ifdef CONFIG_GRKERNSEC_EXECLOG
56996 + char *grarg = gr_exec_arg_buf;
56997 + unsigned int i, x, execlen = 0;
56998 + char c;
56999 +
57000 + if (!((grsec_enable_execlog && grsec_enable_group &&
57001 + in_group_p(grsec_audit_gid))
57002 + || (grsec_enable_execlog && !grsec_enable_group)))
57003 + return;
57004 +
57005 + mutex_lock(&gr_exec_arg_mutex);
57006 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
57007 +
57008 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
57009 + const char __user *p;
57010 + unsigned int len;
57011 +
57012 + p = get_user_arg_ptr(argv, i);
57013 + if (IS_ERR(p))
57014 + goto log;
57015 +
57016 + len = strnlen_user(p, 128 - execlen);
57017 + if (len > 128 - execlen)
57018 + len = 128 - execlen;
57019 + else if (len > 0)
57020 + len--;
57021 + if (copy_from_user(grarg + execlen, p, len))
57022 + goto log;
57023 +
57024 + /* rewrite unprintable characters */
57025 + for (x = 0; x < len; x++) {
57026 + c = *(grarg + execlen + x);
57027 + if (c < 32 || c > 126)
57028 + *(grarg + execlen + x) = ' ';
57029 + }
57030 +
57031 + execlen += len;
57032 + *(grarg + execlen) = ' ';
57033 + *(grarg + execlen + 1) = '\0';
57034 + execlen++;
57035 + }
57036 +
57037 + log:
57038 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
57039 + bprm->file->f_path.mnt, grarg);
57040 + mutex_unlock(&gr_exec_arg_mutex);
57041 +#endif
57042 + return;
57043 +}
57044 +
57045 +#ifdef CONFIG_GRKERNSEC
57046 +extern int gr_acl_is_capable(const int cap);
57047 +extern int gr_acl_is_capable_nolog(const int cap);
57048 +extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
57049 +extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
57050 +extern int gr_chroot_is_capable(const int cap);
57051 +extern int gr_chroot_is_capable_nolog(const int cap);
57052 +extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
57053 +extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
57054 +#endif
57055 +
57056 +const char *captab_log[] = {
57057 + "CAP_CHOWN",
57058 + "CAP_DAC_OVERRIDE",
57059 + "CAP_DAC_READ_SEARCH",
57060 + "CAP_FOWNER",
57061 + "CAP_FSETID",
57062 + "CAP_KILL",
57063 + "CAP_SETGID",
57064 + "CAP_SETUID",
57065 + "CAP_SETPCAP",
57066 + "CAP_LINUX_IMMUTABLE",
57067 + "CAP_NET_BIND_SERVICE",
57068 + "CAP_NET_BROADCAST",
57069 + "CAP_NET_ADMIN",
57070 + "CAP_NET_RAW",
57071 + "CAP_IPC_LOCK",
57072 + "CAP_IPC_OWNER",
57073 + "CAP_SYS_MODULE",
57074 + "CAP_SYS_RAWIO",
57075 + "CAP_SYS_CHROOT",
57076 + "CAP_SYS_PTRACE",
57077 + "CAP_SYS_PACCT",
57078 + "CAP_SYS_ADMIN",
57079 + "CAP_SYS_BOOT",
57080 + "CAP_SYS_NICE",
57081 + "CAP_SYS_RESOURCE",
57082 + "CAP_SYS_TIME",
57083 + "CAP_SYS_TTY_CONFIG",
57084 + "CAP_MKNOD",
57085 + "CAP_LEASE",
57086 + "CAP_AUDIT_WRITE",
57087 + "CAP_AUDIT_CONTROL",
57088 + "CAP_SETFCAP",
57089 + "CAP_MAC_OVERRIDE",
57090 + "CAP_MAC_ADMIN",
57091 + "CAP_SYSLOG",
57092 + "CAP_WAKE_ALARM"
57093 +};
57094 +
57095 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
57096 +
57097 +int gr_is_capable(const int cap)
57098 +{
57099 +#ifdef CONFIG_GRKERNSEC
57100 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
57101 + return 1;
57102 + return 0;
57103 +#else
57104 + return 1;
57105 +#endif
57106 +}
57107 +
57108 +int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
57109 +{
57110 +#ifdef CONFIG_GRKERNSEC
57111 + if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
57112 + return 1;
57113 + return 0;
57114 +#else
57115 + return 1;
57116 +#endif
57117 +}
57118 +
57119 +int gr_is_capable_nolog(const int cap)
57120 +{
57121 +#ifdef CONFIG_GRKERNSEC
57122 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
57123 + return 1;
57124 + return 0;
57125 +#else
57126 + return 1;
57127 +#endif
57128 +}
57129 +
57130 +int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
57131 +{
57132 +#ifdef CONFIG_GRKERNSEC
57133 + if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
57134 + return 1;
57135 + return 0;
57136 +#else
57137 + return 1;
57138 +#endif
57139 +}
57140 +
57141 +EXPORT_SYMBOL(gr_is_capable);
57142 +EXPORT_SYMBOL(gr_is_capable_nolog);
57143 +EXPORT_SYMBOL(gr_task_is_capable);
57144 +EXPORT_SYMBOL(gr_task_is_capable_nolog);
57145 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
57146 new file mode 100644
57147 index 0000000..d3ee748
57148 --- /dev/null
57149 +++ b/grsecurity/grsec_fifo.c
57150 @@ -0,0 +1,24 @@
57151 +#include <linux/kernel.h>
57152 +#include <linux/sched.h>
57153 +#include <linux/fs.h>
57154 +#include <linux/file.h>
57155 +#include <linux/grinternal.h>
57156 +
57157 +int
57158 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
57159 + const struct dentry *dir, const int flag, const int acc_mode)
57160 +{
57161 +#ifdef CONFIG_GRKERNSEC_FIFO
57162 + const struct cred *cred = current_cred();
57163 +
57164 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
57165 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
57166 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
57167 + (cred->fsuid != dentry->d_inode->i_uid)) {
57168 + if (!inode_permission(dentry->d_inode, acc_mode))
57169 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
57170 + return -EACCES;
57171 + }
57172 +#endif
57173 + return 0;
57174 +}
57175 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
57176 new file mode 100644
57177 index 0000000..8ca18bf
57178 --- /dev/null
57179 +++ b/grsecurity/grsec_fork.c
57180 @@ -0,0 +1,23 @@
57181 +#include <linux/kernel.h>
57182 +#include <linux/sched.h>
57183 +#include <linux/grsecurity.h>
57184 +#include <linux/grinternal.h>
57185 +#include <linux/errno.h>
57186 +
57187 +void
57188 +gr_log_forkfail(const int retval)
57189 +{
57190 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
57191 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
57192 + switch (retval) {
57193 + case -EAGAIN:
57194 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
57195 + break;
57196 + case -ENOMEM:
57197 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
57198 + break;
57199 + }
57200 + }
57201 +#endif
57202 + return;
57203 +}
57204 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
57205 new file mode 100644
57206 index 0000000..05a6015
57207 --- /dev/null
57208 +++ b/grsecurity/grsec_init.c
57209 @@ -0,0 +1,283 @@
57210 +#include <linux/kernel.h>
57211 +#include <linux/sched.h>
57212 +#include <linux/mm.h>
57213 +#include <linux/gracl.h>
57214 +#include <linux/slab.h>
57215 +#include <linux/vmalloc.h>
57216 +#include <linux/percpu.h>
57217 +#include <linux/module.h>
57218 +
57219 +int grsec_enable_ptrace_readexec;
57220 +int grsec_enable_setxid;
57221 +int grsec_enable_symlinkown;
57222 +int grsec_symlinkown_gid;
57223 +int grsec_enable_brute;
57224 +int grsec_enable_link;
57225 +int grsec_enable_dmesg;
57226 +int grsec_enable_harden_ptrace;
57227 +int grsec_enable_fifo;
57228 +int grsec_enable_execlog;
57229 +int grsec_enable_signal;
57230 +int grsec_enable_forkfail;
57231 +int grsec_enable_audit_ptrace;
57232 +int grsec_enable_time;
57233 +int grsec_enable_audit_textrel;
57234 +int grsec_enable_group;
57235 +int grsec_audit_gid;
57236 +int grsec_enable_chdir;
57237 +int grsec_enable_mount;
57238 +int grsec_enable_rofs;
57239 +int grsec_enable_chroot_findtask;
57240 +int grsec_enable_chroot_mount;
57241 +int grsec_enable_chroot_shmat;
57242 +int grsec_enable_chroot_fchdir;
57243 +int grsec_enable_chroot_double;
57244 +int grsec_enable_chroot_pivot;
57245 +int grsec_enable_chroot_chdir;
57246 +int grsec_enable_chroot_chmod;
57247 +int grsec_enable_chroot_mknod;
57248 +int grsec_enable_chroot_nice;
57249 +int grsec_enable_chroot_execlog;
57250 +int grsec_enable_chroot_caps;
57251 +int grsec_enable_chroot_sysctl;
57252 +int grsec_enable_chroot_unix;
57253 +int grsec_enable_tpe;
57254 +int grsec_tpe_gid;
57255 +int grsec_enable_blackhole;
57256 +#ifdef CONFIG_IPV6_MODULE
57257 +EXPORT_SYMBOL(grsec_enable_blackhole);
57258 +#endif
57259 +int grsec_lastack_retries;
57260 +int grsec_enable_tpe_all;
57261 +int grsec_enable_tpe_invert;
57262 +int grsec_enable_socket_all;
57263 +int grsec_socket_all_gid;
57264 +int grsec_enable_socket_client;
57265 +int grsec_socket_client_gid;
57266 +int grsec_enable_socket_server;
57267 +int grsec_socket_server_gid;
57268 +int grsec_resource_logging;
57269 +int grsec_disable_privio;
57270 +int grsec_enable_log_rwxmaps;
57271 +int grsec_lock;
57272 +
57273 +DEFINE_SPINLOCK(grsec_alert_lock);
57274 +unsigned long grsec_alert_wtime = 0;
57275 +unsigned long grsec_alert_fyet = 0;
57276 +
57277 +DEFINE_SPINLOCK(grsec_audit_lock);
57278 +
57279 +DEFINE_RWLOCK(grsec_exec_file_lock);
57280 +
57281 +char *gr_shared_page[4];
57282 +
57283 +char *gr_alert_log_fmt;
57284 +char *gr_audit_log_fmt;
57285 +char *gr_alert_log_buf;
57286 +char *gr_audit_log_buf;
57287 +
57288 +extern struct gr_arg *gr_usermode;
57289 +extern unsigned char *gr_system_salt;
57290 +extern unsigned char *gr_system_sum;
57291 +
57292 +void __init
57293 +grsecurity_init(void)
57294 +{
57295 + int j;
57296 + /* create the per-cpu shared pages */
57297 +
57298 +#ifdef CONFIG_X86
57299 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
57300 +#endif
57301 +
57302 + for (j = 0; j < 4; j++) {
57303 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
57304 + if (gr_shared_page[j] == NULL) {
57305 + panic("Unable to allocate grsecurity shared page");
57306 + return;
57307 + }
57308 + }
57309 +
57310 + /* allocate log buffers */
57311 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
57312 + if (!gr_alert_log_fmt) {
57313 + panic("Unable to allocate grsecurity alert log format buffer");
57314 + return;
57315 + }
57316 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
57317 + if (!gr_audit_log_fmt) {
57318 + panic("Unable to allocate grsecurity audit log format buffer");
57319 + return;
57320 + }
57321 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
57322 + if (!gr_alert_log_buf) {
57323 + panic("Unable to allocate grsecurity alert log buffer");
57324 + return;
57325 + }
57326 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
57327 + if (!gr_audit_log_buf) {
57328 + panic("Unable to allocate grsecurity audit log buffer");
57329 + return;
57330 + }
57331 +
57332 + /* allocate memory for authentication structure */
57333 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
57334 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
57335 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
57336 +
57337 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
57338 + panic("Unable to allocate grsecurity authentication structure");
57339 + return;
57340 + }
57341 +
57342 +
57343 +#ifdef CONFIG_GRKERNSEC_IO
57344 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
57345 + grsec_disable_privio = 1;
57346 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
57347 + grsec_disable_privio = 1;
57348 +#else
57349 + grsec_disable_privio = 0;
57350 +#endif
57351 +#endif
57352 +
57353 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
57354 + /* for backward compatibility, tpe_invert always defaults to on if
57355 + enabled in the kernel
57356 + */
57357 + grsec_enable_tpe_invert = 1;
57358 +#endif
57359 +
57360 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
57361 +#ifndef CONFIG_GRKERNSEC_SYSCTL
57362 + grsec_lock = 1;
57363 +#endif
57364 +
57365 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
57366 + grsec_enable_audit_textrel = 1;
57367 +#endif
57368 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57369 + grsec_enable_log_rwxmaps = 1;
57370 +#endif
57371 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
57372 + grsec_enable_group = 1;
57373 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
57374 +#endif
57375 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
57376 + grsec_enable_ptrace_readexec = 1;
57377 +#endif
57378 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
57379 + grsec_enable_chdir = 1;
57380 +#endif
57381 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
57382 + grsec_enable_harden_ptrace = 1;
57383 +#endif
57384 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57385 + grsec_enable_mount = 1;
57386 +#endif
57387 +#ifdef CONFIG_GRKERNSEC_LINK
57388 + grsec_enable_link = 1;
57389 +#endif
57390 +#ifdef CONFIG_GRKERNSEC_BRUTE
57391 + grsec_enable_brute = 1;
57392 +#endif
57393 +#ifdef CONFIG_GRKERNSEC_DMESG
57394 + grsec_enable_dmesg = 1;
57395 +#endif
57396 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
57397 + grsec_enable_blackhole = 1;
57398 + grsec_lastack_retries = 4;
57399 +#endif
57400 +#ifdef CONFIG_GRKERNSEC_FIFO
57401 + grsec_enable_fifo = 1;
57402 +#endif
57403 +#ifdef CONFIG_GRKERNSEC_EXECLOG
57404 + grsec_enable_execlog = 1;
57405 +#endif
57406 +#ifdef CONFIG_GRKERNSEC_SETXID
57407 + grsec_enable_setxid = 1;
57408 +#endif
57409 +#ifdef CONFIG_GRKERNSEC_SIGNAL
57410 + grsec_enable_signal = 1;
57411 +#endif
57412 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
57413 + grsec_enable_forkfail = 1;
57414 +#endif
57415 +#ifdef CONFIG_GRKERNSEC_TIME
57416 + grsec_enable_time = 1;
57417 +#endif
57418 +#ifdef CONFIG_GRKERNSEC_RESLOG
57419 + grsec_resource_logging = 1;
57420 +#endif
57421 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57422 + grsec_enable_chroot_findtask = 1;
57423 +#endif
57424 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
57425 + grsec_enable_chroot_unix = 1;
57426 +#endif
57427 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
57428 + grsec_enable_chroot_mount = 1;
57429 +#endif
57430 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
57431 + grsec_enable_chroot_fchdir = 1;
57432 +#endif
57433 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
57434 + grsec_enable_chroot_shmat = 1;
57435 +#endif
57436 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
57437 + grsec_enable_audit_ptrace = 1;
57438 +#endif
57439 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
57440 + grsec_enable_chroot_double = 1;
57441 +#endif
57442 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
57443 + grsec_enable_chroot_pivot = 1;
57444 +#endif
57445 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
57446 + grsec_enable_chroot_chdir = 1;
57447 +#endif
57448 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
57449 + grsec_enable_chroot_chmod = 1;
57450 +#endif
57451 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
57452 + grsec_enable_chroot_mknod = 1;
57453 +#endif
57454 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
57455 + grsec_enable_chroot_nice = 1;
57456 +#endif
57457 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
57458 + grsec_enable_chroot_execlog = 1;
57459 +#endif
57460 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57461 + grsec_enable_chroot_caps = 1;
57462 +#endif
57463 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
57464 + grsec_enable_chroot_sysctl = 1;
57465 +#endif
57466 +#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
57467 + grsec_enable_symlinkown = 1;
57468 + grsec_symlinkown_gid = CONFIG_GRKERNSEC_SYMLINKOWN_GID;
57469 +#endif
57470 +#ifdef CONFIG_GRKERNSEC_TPE
57471 + grsec_enable_tpe = 1;
57472 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
57473 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
57474 + grsec_enable_tpe_all = 1;
57475 +#endif
57476 +#endif
57477 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
57478 + grsec_enable_socket_all = 1;
57479 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
57480 +#endif
57481 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
57482 + grsec_enable_socket_client = 1;
57483 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
57484 +#endif
57485 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
57486 + grsec_enable_socket_server = 1;
57487 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
57488 +#endif
57489 +#endif
57490 +
57491 + return;
57492 +}
57493 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
57494 new file mode 100644
57495 index 0000000..35a96d1
57496 --- /dev/null
57497 +++ b/grsecurity/grsec_link.c
57498 @@ -0,0 +1,59 @@
57499 +#include <linux/kernel.h>
57500 +#include <linux/sched.h>
57501 +#include <linux/fs.h>
57502 +#include <linux/file.h>
57503 +#include <linux/grinternal.h>
57504 +
57505 +int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
57506 +{
57507 +#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
57508 + const struct inode *link_inode = link->dentry->d_inode;
57509 +
57510 + if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
57511 + /* ignore root-owned links, e.g. /proc/self */
57512 + link_inode->i_uid &&
57513 + link_inode->i_uid != target->i_uid) {
57514 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
57515 + return 1;
57516 + }
57517 +#endif
57518 + return 0;
57519 +}
57520 +
57521 +int
57522 +gr_handle_follow_link(const struct inode *parent,
57523 + const struct inode *inode,
57524 + const struct dentry *dentry, const struct vfsmount *mnt)
57525 +{
57526 +#ifdef CONFIG_GRKERNSEC_LINK
57527 + const struct cred *cred = current_cred();
57528 +
57529 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
57530 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
57531 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
57532 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
57533 + return -EACCES;
57534 + }
57535 +#endif
57536 + return 0;
57537 +}
57538 +
57539 +int
57540 +gr_handle_hardlink(const struct dentry *dentry,
57541 + const struct vfsmount *mnt,
57542 + struct inode *inode, const int mode, const char *to)
57543 +{
57544 +#ifdef CONFIG_GRKERNSEC_LINK
57545 + const struct cred *cred = current_cred();
57546 +
57547 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
57548 + (!S_ISREG(mode) || (mode & S_ISUID) ||
57549 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
57550 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
57551 + !capable(CAP_FOWNER) && cred->uid) {
57552 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
57553 + return -EPERM;
57554 + }
57555 +#endif
57556 + return 0;
57557 +}
57558 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
57559 new file mode 100644
57560 index 0000000..a45d2e9
57561 --- /dev/null
57562 +++ b/grsecurity/grsec_log.c
57563 @@ -0,0 +1,322 @@
57564 +#include <linux/kernel.h>
57565 +#include <linux/sched.h>
57566 +#include <linux/file.h>
57567 +#include <linux/tty.h>
57568 +#include <linux/fs.h>
57569 +#include <linux/grinternal.h>
57570 +
57571 +#ifdef CONFIG_TREE_PREEMPT_RCU
57572 +#define DISABLE_PREEMPT() preempt_disable()
57573 +#define ENABLE_PREEMPT() preempt_enable()
57574 +#else
57575 +#define DISABLE_PREEMPT()
57576 +#define ENABLE_PREEMPT()
57577 +#endif
57578 +
57579 +#define BEGIN_LOCKS(x) \
57580 + DISABLE_PREEMPT(); \
57581 + rcu_read_lock(); \
57582 + read_lock(&tasklist_lock); \
57583 + read_lock(&grsec_exec_file_lock); \
57584 + if (x != GR_DO_AUDIT) \
57585 + spin_lock(&grsec_alert_lock); \
57586 + else \
57587 + spin_lock(&grsec_audit_lock)
57588 +
57589 +#define END_LOCKS(x) \
57590 + if (x != GR_DO_AUDIT) \
57591 + spin_unlock(&grsec_alert_lock); \
57592 + else \
57593 + spin_unlock(&grsec_audit_lock); \
57594 + read_unlock(&grsec_exec_file_lock); \
57595 + read_unlock(&tasklist_lock); \
57596 + rcu_read_unlock(); \
57597 + ENABLE_PREEMPT(); \
57598 + if (x == GR_DONT_AUDIT) \
57599 + gr_handle_alertkill(current)
57600 +
57601 +enum {
57602 + FLOODING,
57603 + NO_FLOODING
57604 +};
57605 +
57606 +extern char *gr_alert_log_fmt;
57607 +extern char *gr_audit_log_fmt;
57608 +extern char *gr_alert_log_buf;
57609 +extern char *gr_audit_log_buf;
57610 +
57611 +static int gr_log_start(int audit)
57612 +{
57613 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
57614 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
57615 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57616 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
57617 + unsigned long curr_secs = get_seconds();
57618 +
57619 + if (audit == GR_DO_AUDIT)
57620 + goto set_fmt;
57621 +
57622 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
57623 + grsec_alert_wtime = curr_secs;
57624 + grsec_alert_fyet = 0;
57625 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
57626 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
57627 + grsec_alert_fyet++;
57628 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
57629 + grsec_alert_wtime = curr_secs;
57630 + grsec_alert_fyet++;
57631 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
57632 + return FLOODING;
57633 + }
57634 + else return FLOODING;
57635 +
57636 +set_fmt:
57637 +#endif
57638 + memset(buf, 0, PAGE_SIZE);
57639 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
57640 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
57641 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
57642 + } else if (current->signal->curr_ip) {
57643 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
57644 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
57645 + } else if (gr_acl_is_enabled()) {
57646 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
57647 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
57648 + } else {
57649 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
57650 + strcpy(buf, fmt);
57651 + }
57652 +
57653 + return NO_FLOODING;
57654 +}
57655 +
57656 +static void gr_log_middle(int audit, const char *msg, va_list ap)
57657 + __attribute__ ((format (printf, 2, 0)));
57658 +
57659 +static void gr_log_middle(int audit, const char *msg, va_list ap)
57660 +{
57661 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57662 + unsigned int len = strlen(buf);
57663 +
57664 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
57665 +
57666 + return;
57667 +}
57668 +
57669 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
57670 + __attribute__ ((format (printf, 2, 3)));
57671 +
57672 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
57673 +{
57674 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57675 + unsigned int len = strlen(buf);
57676 + va_list ap;
57677 +
57678 + va_start(ap, msg);
57679 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
57680 + va_end(ap);
57681 +
57682 + return;
57683 +}
57684 +
57685 +static void gr_log_end(int audit, int append_default)
57686 +{
57687 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57688 +
57689 + if (append_default) {
57690 + unsigned int len = strlen(buf);
57691 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
57692 + }
57693 +
57694 + printk("%s\n", buf);
57695 +
57696 + return;
57697 +}
57698 +
57699 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
57700 +{
57701 + int logtype;
57702 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
57703 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
57704 + void *voidptr = NULL;
57705 + int num1 = 0, num2 = 0;
57706 + unsigned long ulong1 = 0, ulong2 = 0;
57707 + struct dentry *dentry = NULL;
57708 + struct vfsmount *mnt = NULL;
57709 + struct file *file = NULL;
57710 + struct task_struct *task = NULL;
57711 + const struct cred *cred, *pcred;
57712 + va_list ap;
57713 +
57714 + BEGIN_LOCKS(audit);
57715 + logtype = gr_log_start(audit);
57716 + if (logtype == FLOODING) {
57717 + END_LOCKS(audit);
57718 + return;
57719 + }
57720 + va_start(ap, argtypes);
57721 + switch (argtypes) {
57722 + case GR_TTYSNIFF:
57723 + task = va_arg(ap, struct task_struct *);
57724 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
57725 + break;
57726 + case GR_SYSCTL_HIDDEN:
57727 + str1 = va_arg(ap, char *);
57728 + gr_log_middle_varargs(audit, msg, result, str1);
57729 + break;
57730 + case GR_RBAC:
57731 + dentry = va_arg(ap, struct dentry *);
57732 + mnt = va_arg(ap, struct vfsmount *);
57733 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
57734 + break;
57735 + case GR_RBAC_STR:
57736 + dentry = va_arg(ap, struct dentry *);
57737 + mnt = va_arg(ap, struct vfsmount *);
57738 + str1 = va_arg(ap, char *);
57739 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
57740 + break;
57741 + case GR_STR_RBAC:
57742 + str1 = va_arg(ap, char *);
57743 + dentry = va_arg(ap, struct dentry *);
57744 + mnt = va_arg(ap, struct vfsmount *);
57745 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
57746 + break;
57747 + case GR_RBAC_MODE2:
57748 + dentry = va_arg(ap, struct dentry *);
57749 + mnt = va_arg(ap, struct vfsmount *);
57750 + str1 = va_arg(ap, char *);
57751 + str2 = va_arg(ap, char *);
57752 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
57753 + break;
57754 + case GR_RBAC_MODE3:
57755 + dentry = va_arg(ap, struct dentry *);
57756 + mnt = va_arg(ap, struct vfsmount *);
57757 + str1 = va_arg(ap, char *);
57758 + str2 = va_arg(ap, char *);
57759 + str3 = va_arg(ap, char *);
57760 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
57761 + break;
57762 + case GR_FILENAME:
57763 + dentry = va_arg(ap, struct dentry *);
57764 + mnt = va_arg(ap, struct vfsmount *);
57765 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
57766 + break;
57767 + case GR_STR_FILENAME:
57768 + str1 = va_arg(ap, char *);
57769 + dentry = va_arg(ap, struct dentry *);
57770 + mnt = va_arg(ap, struct vfsmount *);
57771 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
57772 + break;
57773 + case GR_FILENAME_STR:
57774 + dentry = va_arg(ap, struct dentry *);
57775 + mnt = va_arg(ap, struct vfsmount *);
57776 + str1 = va_arg(ap, char *);
57777 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
57778 + break;
57779 + case GR_FILENAME_TWO_INT:
57780 + dentry = va_arg(ap, struct dentry *);
57781 + mnt = va_arg(ap, struct vfsmount *);
57782 + num1 = va_arg(ap, int);
57783 + num2 = va_arg(ap, int);
57784 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
57785 + break;
57786 + case GR_FILENAME_TWO_INT_STR:
57787 + dentry = va_arg(ap, struct dentry *);
57788 + mnt = va_arg(ap, struct vfsmount *);
57789 + num1 = va_arg(ap, int);
57790 + num2 = va_arg(ap, int);
57791 + str1 = va_arg(ap, char *);
57792 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
57793 + break;
57794 + case GR_TEXTREL:
57795 + file = va_arg(ap, struct file *);
57796 + ulong1 = va_arg(ap, unsigned long);
57797 + ulong2 = va_arg(ap, unsigned long);
57798 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
57799 + break;
57800 + case GR_PTRACE:
57801 + task = va_arg(ap, struct task_struct *);
57802 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
57803 + break;
57804 + case GR_RESOURCE:
57805 + task = va_arg(ap, struct task_struct *);
57806 + cred = __task_cred(task);
57807 + pcred = __task_cred(task->real_parent);
57808 + ulong1 = va_arg(ap, unsigned long);
57809 + str1 = va_arg(ap, char *);
57810 + ulong2 = va_arg(ap, unsigned long);
57811 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57812 + break;
57813 + case GR_CAP:
57814 + task = va_arg(ap, struct task_struct *);
57815 + cred = __task_cred(task);
57816 + pcred = __task_cred(task->real_parent);
57817 + str1 = va_arg(ap, char *);
57818 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57819 + break;
57820 + case GR_SIG:
57821 + str1 = va_arg(ap, char *);
57822 + voidptr = va_arg(ap, void *);
57823 + gr_log_middle_varargs(audit, msg, str1, voidptr);
57824 + break;
57825 + case GR_SIG2:
57826 + task = va_arg(ap, struct task_struct *);
57827 + cred = __task_cred(task);
57828 + pcred = __task_cred(task->real_parent);
57829 + num1 = va_arg(ap, int);
57830 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57831 + break;
57832 + case GR_CRASH1:
57833 + task = va_arg(ap, struct task_struct *);
57834 + cred = __task_cred(task);
57835 + pcred = __task_cred(task->real_parent);
57836 + ulong1 = va_arg(ap, unsigned long);
57837 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
57838 + break;
57839 + case GR_CRASH2:
57840 + task = va_arg(ap, struct task_struct *);
57841 + cred = __task_cred(task);
57842 + pcred = __task_cred(task->real_parent);
57843 + ulong1 = va_arg(ap, unsigned long);
57844 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
57845 + break;
57846 + case GR_RWXMAP:
57847 + file = va_arg(ap, struct file *);
57848 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
57849 + break;
57850 + case GR_PSACCT:
57851 + {
57852 + unsigned int wday, cday;
57853 + __u8 whr, chr;
57854 + __u8 wmin, cmin;
57855 + __u8 wsec, csec;
57856 + char cur_tty[64] = { 0 };
57857 + char parent_tty[64] = { 0 };
57858 +
57859 + task = va_arg(ap, struct task_struct *);
57860 + wday = va_arg(ap, unsigned int);
57861 + cday = va_arg(ap, unsigned int);
57862 + whr = va_arg(ap, int);
57863 + chr = va_arg(ap, int);
57864 + wmin = va_arg(ap, int);
57865 + cmin = va_arg(ap, int);
57866 + wsec = va_arg(ap, int);
57867 + csec = va_arg(ap, int);
57868 + ulong1 = va_arg(ap, unsigned long);
57869 + cred = __task_cred(task);
57870 + pcred = __task_cred(task->real_parent);
57871 +
57872 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
57873 + }
57874 + break;
57875 + default:
57876 + gr_log_middle(audit, msg, ap);
57877 + }
57878 + va_end(ap);
57879 + // these don't need DEFAULTSECARGS printed on the end
57880 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
57881 + gr_log_end(audit, 0);
57882 + else
57883 + gr_log_end(audit, 1);
57884 + END_LOCKS(audit);
57885 +}
57886 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
57887 new file mode 100644
57888 index 0000000..f536303
57889 --- /dev/null
57890 +++ b/grsecurity/grsec_mem.c
57891 @@ -0,0 +1,40 @@
57892 +#include <linux/kernel.h>
57893 +#include <linux/sched.h>
57894 +#include <linux/mm.h>
57895 +#include <linux/mman.h>
57896 +#include <linux/grinternal.h>
57897 +
57898 +void
57899 +gr_handle_ioperm(void)
57900 +{
57901 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
57902 + return;
57903 +}
57904 +
57905 +void
57906 +gr_handle_iopl(void)
57907 +{
57908 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
57909 + return;
57910 +}
57911 +
57912 +void
57913 +gr_handle_mem_readwrite(u64 from, u64 to)
57914 +{
57915 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
57916 + return;
57917 +}
57918 +
57919 +void
57920 +gr_handle_vm86(void)
57921 +{
57922 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
57923 + return;
57924 +}
57925 +
57926 +void
57927 +gr_log_badprocpid(const char *entry)
57928 +{
57929 + gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
57930 + return;
57931 +}
57932 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
57933 new file mode 100644
57934 index 0000000..2131422
57935 --- /dev/null
57936 +++ b/grsecurity/grsec_mount.c
57937 @@ -0,0 +1,62 @@
57938 +#include <linux/kernel.h>
57939 +#include <linux/sched.h>
57940 +#include <linux/mount.h>
57941 +#include <linux/grsecurity.h>
57942 +#include <linux/grinternal.h>
57943 +
57944 +void
57945 +gr_log_remount(const char *devname, const int retval)
57946 +{
57947 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57948 + if (grsec_enable_mount && (retval >= 0))
57949 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
57950 +#endif
57951 + return;
57952 +}
57953 +
57954 +void
57955 +gr_log_unmount(const char *devname, const int retval)
57956 +{
57957 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57958 + if (grsec_enable_mount && (retval >= 0))
57959 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
57960 +#endif
57961 + return;
57962 +}
57963 +
57964 +void
57965 +gr_log_mount(const char *from, const char *to, const int retval)
57966 +{
57967 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57968 + if (grsec_enable_mount && (retval >= 0))
57969 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
57970 +#endif
57971 + return;
57972 +}
57973 +
57974 +int
57975 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
57976 +{
57977 +#ifdef CONFIG_GRKERNSEC_ROFS
57978 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
57979 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
57980 + return -EPERM;
57981 + } else
57982 + return 0;
57983 +#endif
57984 + return 0;
57985 +}
57986 +
57987 +int
57988 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
57989 +{
57990 +#ifdef CONFIG_GRKERNSEC_ROFS
57991 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
57992 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
57993 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
57994 + return -EPERM;
57995 + } else
57996 + return 0;
57997 +#endif
57998 + return 0;
57999 +}
58000 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
58001 new file mode 100644
58002 index 0000000..a3b12a0
58003 --- /dev/null
58004 +++ b/grsecurity/grsec_pax.c
58005 @@ -0,0 +1,36 @@
58006 +#include <linux/kernel.h>
58007 +#include <linux/sched.h>
58008 +#include <linux/mm.h>
58009 +#include <linux/file.h>
58010 +#include <linux/grinternal.h>
58011 +#include <linux/grsecurity.h>
58012 +
58013 +void
58014 +gr_log_textrel(struct vm_area_struct * vma)
58015 +{
58016 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
58017 + if (grsec_enable_audit_textrel)
58018 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
58019 +#endif
58020 + return;
58021 +}
58022 +
58023 +void
58024 +gr_log_rwxmmap(struct file *file)
58025 +{
58026 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58027 + if (grsec_enable_log_rwxmaps)
58028 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
58029 +#endif
58030 + return;
58031 +}
58032 +
58033 +void
58034 +gr_log_rwxmprotect(struct file *file)
58035 +{
58036 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58037 + if (grsec_enable_log_rwxmaps)
58038 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
58039 +#endif
58040 + return;
58041 +}
58042 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
58043 new file mode 100644
58044 index 0000000..f7f29aa
58045 --- /dev/null
58046 +++ b/grsecurity/grsec_ptrace.c
58047 @@ -0,0 +1,30 @@
58048 +#include <linux/kernel.h>
58049 +#include <linux/sched.h>
58050 +#include <linux/grinternal.h>
58051 +#include <linux/security.h>
58052 +
58053 +void
58054 +gr_audit_ptrace(struct task_struct *task)
58055 +{
58056 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
58057 + if (grsec_enable_audit_ptrace)
58058 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
58059 +#endif
58060 + return;
58061 +}
58062 +
58063 +int
58064 +gr_ptrace_readexec(struct file *file, int unsafe_flags)
58065 +{
58066 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
58067 + const struct dentry *dentry = file->f_path.dentry;
58068 + const struct vfsmount *mnt = file->f_path.mnt;
58069 +
58070 + if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
58071 + (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
58072 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
58073 + return -EACCES;
58074 + }
58075 +#endif
58076 + return 0;
58077 +}
58078 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
58079 new file mode 100644
58080 index 0000000..7a5b2de
58081 --- /dev/null
58082 +++ b/grsecurity/grsec_sig.c
58083 @@ -0,0 +1,207 @@
58084 +#include <linux/kernel.h>
58085 +#include <linux/sched.h>
58086 +#include <linux/delay.h>
58087 +#include <linux/grsecurity.h>
58088 +#include <linux/grinternal.h>
58089 +#include <linux/hardirq.h>
58090 +
58091 +char *signames[] = {
58092 + [SIGSEGV] = "Segmentation fault",
58093 + [SIGILL] = "Illegal instruction",
58094 + [SIGABRT] = "Abort",
58095 + [SIGBUS] = "Invalid alignment/Bus error"
58096 +};
58097 +
58098 +void
58099 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
58100 +{
58101 +#ifdef CONFIG_GRKERNSEC_SIGNAL
58102 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
58103 + (sig == SIGABRT) || (sig == SIGBUS))) {
58104 + if (t->pid == current->pid) {
58105 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
58106 + } else {
58107 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
58108 + }
58109 + }
58110 +#endif
58111 + return;
58112 +}
58113 +
58114 +int
58115 +gr_handle_signal(const struct task_struct *p, const int sig)
58116 +{
58117 +#ifdef CONFIG_GRKERNSEC
58118 + /* ignore the 0 signal for protected task checks */
58119 + if (current->pid > 1 && sig && gr_check_protected_task(p)) {
58120 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
58121 + return -EPERM;
58122 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
58123 + return -EPERM;
58124 + }
58125 +#endif
58126 + return 0;
58127 +}
58128 +
58129 +#ifdef CONFIG_GRKERNSEC
58130 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
58131 +
58132 +int gr_fake_force_sig(int sig, struct task_struct *t)
58133 +{
58134 + unsigned long int flags;
58135 + int ret, blocked, ignored;
58136 + struct k_sigaction *action;
58137 +
58138 + spin_lock_irqsave(&t->sighand->siglock, flags);
58139 + action = &t->sighand->action[sig-1];
58140 + ignored = action->sa.sa_handler == SIG_IGN;
58141 + blocked = sigismember(&t->blocked, sig);
58142 + if (blocked || ignored) {
58143 + action->sa.sa_handler = SIG_DFL;
58144 + if (blocked) {
58145 + sigdelset(&t->blocked, sig);
58146 + recalc_sigpending_and_wake(t);
58147 + }
58148 + }
58149 + if (action->sa.sa_handler == SIG_DFL)
58150 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
58151 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
58152 +
58153 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
58154 +
58155 + return ret;
58156 +}
58157 +#endif
58158 +
58159 +#ifdef CONFIG_GRKERNSEC_BRUTE
58160 +#define GR_USER_BAN_TIME (15 * 60)
58161 +
58162 +static int __get_dumpable(unsigned long mm_flags)
58163 +{
58164 + int ret;
58165 +
58166 + ret = mm_flags & MMF_DUMPABLE_MASK;
58167 + return (ret >= 2) ? 2 : ret;
58168 +}
58169 +#endif
58170 +
58171 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
58172 +{
58173 +#ifdef CONFIG_GRKERNSEC_BRUTE
58174 + uid_t uid = 0;
58175 +
58176 + if (!grsec_enable_brute)
58177 + return;
58178 +
58179 + rcu_read_lock();
58180 + read_lock(&tasklist_lock);
58181 + read_lock(&grsec_exec_file_lock);
58182 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
58183 + p->real_parent->brute = 1;
58184 + else {
58185 + const struct cred *cred = __task_cred(p), *cred2;
58186 + struct task_struct *tsk, *tsk2;
58187 +
58188 + if (!__get_dumpable(mm_flags) && cred->uid) {
58189 + struct user_struct *user;
58190 +
58191 + uid = cred->uid;
58192 +
58193 + /* this is put upon execution past expiration */
58194 + user = find_user(uid);
58195 + if (user == NULL)
58196 + goto unlock;
58197 + user->banned = 1;
58198 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
58199 + if (user->ban_expires == ~0UL)
58200 + user->ban_expires--;
58201 +
58202 + do_each_thread(tsk2, tsk) {
58203 + cred2 = __task_cred(tsk);
58204 + if (tsk != p && cred2->uid == uid)
58205 + gr_fake_force_sig(SIGKILL, tsk);
58206 + } while_each_thread(tsk2, tsk);
58207 + }
58208 + }
58209 +unlock:
58210 + read_unlock(&grsec_exec_file_lock);
58211 + read_unlock(&tasklist_lock);
58212 + rcu_read_unlock();
58213 +
58214 + if (uid)
58215 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
58216 +
58217 +#endif
58218 + return;
58219 +}
58220 +
58221 +void gr_handle_brute_check(void)
58222 +{
58223 +#ifdef CONFIG_GRKERNSEC_BRUTE
58224 + if (current->brute)
58225 + msleep(30 * 1000);
58226 +#endif
58227 + return;
58228 +}
58229 +
58230 +void gr_handle_kernel_exploit(void)
58231 +{
58232 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
58233 + const struct cred *cred;
58234 + struct task_struct *tsk, *tsk2;
58235 + struct user_struct *user;
58236 + uid_t uid;
58237 +
58238 + if (in_irq() || in_serving_softirq() || in_nmi())
58239 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
58240 +
58241 + uid = current_uid();
58242 +
58243 + if (uid == 0)
58244 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
58245 + else {
58246 + /* kill all the processes of this user, hold a reference
58247 + to their creds struct, and prevent them from creating
58248 + another process until system reset
58249 + */
58250 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
58251 + /* we intentionally leak this ref */
58252 + user = get_uid(current->cred->user);
58253 + if (user) {
58254 + user->banned = 1;
58255 + user->ban_expires = ~0UL;
58256 + }
58257 +
58258 + read_lock(&tasklist_lock);
58259 + do_each_thread(tsk2, tsk) {
58260 + cred = __task_cred(tsk);
58261 + if (cred->uid == uid)
58262 + gr_fake_force_sig(SIGKILL, tsk);
58263 + } while_each_thread(tsk2, tsk);
58264 + read_unlock(&tasklist_lock);
58265 + }
58266 +#endif
58267 +}
58268 +
58269 +int __gr_process_user_ban(struct user_struct *user)
58270 +{
58271 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
58272 + if (unlikely(user->banned)) {
58273 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
58274 + user->banned = 0;
58275 + user->ban_expires = 0;
58276 + free_uid(user);
58277 + } else
58278 + return -EPERM;
58279 + }
58280 +#endif
58281 + return 0;
58282 +}
58283 +
58284 +int gr_process_user_ban(void)
58285 +{
58286 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
58287 + return __gr_process_user_ban(current->cred->user);
58288 +#endif
58289 + return 0;
58290 +}
58291 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
58292 new file mode 100644
58293 index 0000000..4030d57
58294 --- /dev/null
58295 +++ b/grsecurity/grsec_sock.c
58296 @@ -0,0 +1,244 @@
58297 +#include <linux/kernel.h>
58298 +#include <linux/module.h>
58299 +#include <linux/sched.h>
58300 +#include <linux/file.h>
58301 +#include <linux/net.h>
58302 +#include <linux/in.h>
58303 +#include <linux/ip.h>
58304 +#include <net/sock.h>
58305 +#include <net/inet_sock.h>
58306 +#include <linux/grsecurity.h>
58307 +#include <linux/grinternal.h>
58308 +#include <linux/gracl.h>
58309 +
58310 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
58311 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
58312 +
58313 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
58314 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
58315 +
58316 +#ifdef CONFIG_UNIX_MODULE
58317 +EXPORT_SYMBOL(gr_acl_handle_unix);
58318 +EXPORT_SYMBOL(gr_acl_handle_mknod);
58319 +EXPORT_SYMBOL(gr_handle_chroot_unix);
58320 +EXPORT_SYMBOL(gr_handle_create);
58321 +#endif
58322 +
58323 +#ifdef CONFIG_GRKERNSEC
58324 +#define gr_conn_table_size 32749
58325 +struct conn_table_entry {
58326 + struct conn_table_entry *next;
58327 + struct signal_struct *sig;
58328 +};
58329 +
58330 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
58331 +DEFINE_SPINLOCK(gr_conn_table_lock);
58332 +
58333 +extern const char * gr_socktype_to_name(unsigned char type);
58334 +extern const char * gr_proto_to_name(unsigned char proto);
58335 +extern const char * gr_sockfamily_to_name(unsigned char family);
58336 +
58337 +static __inline__ int
58338 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
58339 +{
58340 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
58341 +}
58342 +
58343 +static __inline__ int
58344 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
58345 + __u16 sport, __u16 dport)
58346 +{
58347 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
58348 + sig->gr_sport == sport && sig->gr_dport == dport))
58349 + return 1;
58350 + else
58351 + return 0;
58352 +}
58353 +
58354 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
58355 +{
58356 + struct conn_table_entry **match;
58357 + unsigned int index;
58358 +
58359 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
58360 + sig->gr_sport, sig->gr_dport,
58361 + gr_conn_table_size);
58362 +
58363 + newent->sig = sig;
58364 +
58365 + match = &gr_conn_table[index];
58366 + newent->next = *match;
58367 + *match = newent;
58368 +
58369 + return;
58370 +}
58371 +
58372 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
58373 +{
58374 + struct conn_table_entry *match, *last = NULL;
58375 + unsigned int index;
58376 +
58377 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
58378 + sig->gr_sport, sig->gr_dport,
58379 + gr_conn_table_size);
58380 +
58381 + match = gr_conn_table[index];
58382 + while (match && !conn_match(match->sig,
58383 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
58384 + sig->gr_dport)) {
58385 + last = match;
58386 + match = match->next;
58387 + }
58388 +
58389 + if (match) {
58390 + if (last)
58391 + last->next = match->next;
58392 + else
58393 + gr_conn_table[index] = NULL;
58394 + kfree(match);
58395 + }
58396 +
58397 + return;
58398 +}
58399 +
58400 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
58401 + __u16 sport, __u16 dport)
58402 +{
58403 + struct conn_table_entry *match;
58404 + unsigned int index;
58405 +
58406 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
58407 +
58408 + match = gr_conn_table[index];
58409 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
58410 + match = match->next;
58411 +
58412 + if (match)
58413 + return match->sig;
58414 + else
58415 + return NULL;
58416 +}
58417 +
58418 +#endif
58419 +
58420 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
58421 +{
58422 +#ifdef CONFIG_GRKERNSEC
58423 + struct signal_struct *sig = task->signal;
58424 + struct conn_table_entry *newent;
58425 +
58426 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
58427 + if (newent == NULL)
58428 + return;
58429 + /* no bh lock needed since we are called with bh disabled */
58430 + spin_lock(&gr_conn_table_lock);
58431 + gr_del_task_from_ip_table_nolock(sig);
58432 + sig->gr_saddr = inet->inet_rcv_saddr;
58433 + sig->gr_daddr = inet->inet_daddr;
58434 + sig->gr_sport = inet->inet_sport;
58435 + sig->gr_dport = inet->inet_dport;
58436 + gr_add_to_task_ip_table_nolock(sig, newent);
58437 + spin_unlock(&gr_conn_table_lock);
58438 +#endif
58439 + return;
58440 +}
58441 +
58442 +void gr_del_task_from_ip_table(struct task_struct *task)
58443 +{
58444 +#ifdef CONFIG_GRKERNSEC
58445 + spin_lock_bh(&gr_conn_table_lock);
58446 + gr_del_task_from_ip_table_nolock(task->signal);
58447 + spin_unlock_bh(&gr_conn_table_lock);
58448 +#endif
58449 + return;
58450 +}
58451 +
58452 +void
58453 +gr_attach_curr_ip(const struct sock *sk)
58454 +{
58455 +#ifdef CONFIG_GRKERNSEC
58456 + struct signal_struct *p, *set;
58457 + const struct inet_sock *inet = inet_sk(sk);
58458 +
58459 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
58460 + return;
58461 +
58462 + set = current->signal;
58463 +
58464 + spin_lock_bh(&gr_conn_table_lock);
58465 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
58466 + inet->inet_dport, inet->inet_sport);
58467 + if (unlikely(p != NULL)) {
58468 + set->curr_ip = p->curr_ip;
58469 + set->used_accept = 1;
58470 + gr_del_task_from_ip_table_nolock(p);
58471 + spin_unlock_bh(&gr_conn_table_lock);
58472 + return;
58473 + }
58474 + spin_unlock_bh(&gr_conn_table_lock);
58475 +
58476 + set->curr_ip = inet->inet_daddr;
58477 + set->used_accept = 1;
58478 +#endif
58479 + return;
58480 +}
58481 +
58482 +int
58483 +gr_handle_sock_all(const int family, const int type, const int protocol)
58484 +{
58485 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
58486 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
58487 + (family != AF_UNIX)) {
58488 + if (family == AF_INET)
58489 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
58490 + else
58491 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
58492 + return -EACCES;
58493 + }
58494 +#endif
58495 + return 0;
58496 +}
58497 +
58498 +int
58499 +gr_handle_sock_server(const struct sockaddr *sck)
58500 +{
58501 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58502 + if (grsec_enable_socket_server &&
58503 + in_group_p(grsec_socket_server_gid) &&
58504 + sck && (sck->sa_family != AF_UNIX) &&
58505 + (sck->sa_family != AF_LOCAL)) {
58506 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
58507 + return -EACCES;
58508 + }
58509 +#endif
58510 + return 0;
58511 +}
58512 +
58513 +int
58514 +gr_handle_sock_server_other(const struct sock *sck)
58515 +{
58516 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58517 + if (grsec_enable_socket_server &&
58518 + in_group_p(grsec_socket_server_gid) &&
58519 + sck && (sck->sk_family != AF_UNIX) &&
58520 + (sck->sk_family != AF_LOCAL)) {
58521 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
58522 + return -EACCES;
58523 + }
58524 +#endif
58525 + return 0;
58526 +}
58527 +
58528 +int
58529 +gr_handle_sock_client(const struct sockaddr *sck)
58530 +{
58531 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
58532 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
58533 + sck && (sck->sa_family != AF_UNIX) &&
58534 + (sck->sa_family != AF_LOCAL)) {
58535 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
58536 + return -EACCES;
58537 + }
58538 +#endif
58539 + return 0;
58540 +}
58541 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
58542 new file mode 100644
58543 index 0000000..f55ef0f
58544 --- /dev/null
58545 +++ b/grsecurity/grsec_sysctl.c
58546 @@ -0,0 +1,469 @@
58547 +#include <linux/kernel.h>
58548 +#include <linux/sched.h>
58549 +#include <linux/sysctl.h>
58550 +#include <linux/grsecurity.h>
58551 +#include <linux/grinternal.h>
58552 +
58553 +int
58554 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
58555 +{
58556 +#ifdef CONFIG_GRKERNSEC_SYSCTL
58557 + if (dirname == NULL || name == NULL)
58558 + return 0;
58559 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
58560 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
58561 + return -EACCES;
58562 + }
58563 +#endif
58564 + return 0;
58565 +}
58566 +
58567 +#ifdef CONFIG_GRKERNSEC_ROFS
58568 +static int __maybe_unused one = 1;
58569 +#endif
58570 +
58571 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
58572 +struct ctl_table grsecurity_table[] = {
58573 +#ifdef CONFIG_GRKERNSEC_SYSCTL
58574 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
58575 +#ifdef CONFIG_GRKERNSEC_IO
58576 + {
58577 + .procname = "disable_priv_io",
58578 + .data = &grsec_disable_privio,
58579 + .maxlen = sizeof(int),
58580 + .mode = 0600,
58581 + .proc_handler = &proc_dointvec,
58582 + },
58583 +#endif
58584 +#endif
58585 +#ifdef CONFIG_GRKERNSEC_LINK
58586 + {
58587 + .procname = "linking_restrictions",
58588 + .data = &grsec_enable_link,
58589 + .maxlen = sizeof(int),
58590 + .mode = 0600,
58591 + .proc_handler = &proc_dointvec,
58592 + },
58593 +#endif
58594 +#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
58595 + {
58596 + .procname = "enforce_symlinksifowner",
58597 + .data = &grsec_enable_symlinkown,
58598 + .maxlen = sizeof(int),
58599 + .mode = 0600,
58600 + .proc_handler = &proc_dointvec,
58601 + },
58602 + {
58603 + .procname = "symlinkown_gid",
58604 + .data = &grsec_symlinkown_gid,
58605 + .maxlen = sizeof(int),
58606 + .mode = 0600,
58607 + .proc_handler = &proc_dointvec,
58608 + },
58609 +#endif
58610 +#ifdef CONFIG_GRKERNSEC_BRUTE
58611 + {
58612 + .procname = "deter_bruteforce",
58613 + .data = &grsec_enable_brute,
58614 + .maxlen = sizeof(int),
58615 + .mode = 0600,
58616 + .proc_handler = &proc_dointvec,
58617 + },
58618 +#endif
58619 +#ifdef CONFIG_GRKERNSEC_FIFO
58620 + {
58621 + .procname = "fifo_restrictions",
58622 + .data = &grsec_enable_fifo,
58623 + .maxlen = sizeof(int),
58624 + .mode = 0600,
58625 + .proc_handler = &proc_dointvec,
58626 + },
58627 +#endif
58628 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
58629 + {
58630 + .procname = "ptrace_readexec",
58631 + .data = &grsec_enable_ptrace_readexec,
58632 + .maxlen = sizeof(int),
58633 + .mode = 0600,
58634 + .proc_handler = &proc_dointvec,
58635 + },
58636 +#endif
58637 +#ifdef CONFIG_GRKERNSEC_SETXID
58638 + {
58639 + .procname = "consistent_setxid",
58640 + .data = &grsec_enable_setxid,
58641 + .maxlen = sizeof(int),
58642 + .mode = 0600,
58643 + .proc_handler = &proc_dointvec,
58644 + },
58645 +#endif
58646 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
58647 + {
58648 + .procname = "ip_blackhole",
58649 + .data = &grsec_enable_blackhole,
58650 + .maxlen = sizeof(int),
58651 + .mode = 0600,
58652 + .proc_handler = &proc_dointvec,
58653 + },
58654 + {
58655 + .procname = "lastack_retries",
58656 + .data = &grsec_lastack_retries,
58657 + .maxlen = sizeof(int),
58658 + .mode = 0600,
58659 + .proc_handler = &proc_dointvec,
58660 + },
58661 +#endif
58662 +#ifdef CONFIG_GRKERNSEC_EXECLOG
58663 + {
58664 + .procname = "exec_logging",
58665 + .data = &grsec_enable_execlog,
58666 + .maxlen = sizeof(int),
58667 + .mode = 0600,
58668 + .proc_handler = &proc_dointvec,
58669 + },
58670 +#endif
58671 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58672 + {
58673 + .procname = "rwxmap_logging",
58674 + .data = &grsec_enable_log_rwxmaps,
58675 + .maxlen = sizeof(int),
58676 + .mode = 0600,
58677 + .proc_handler = &proc_dointvec,
58678 + },
58679 +#endif
58680 +#ifdef CONFIG_GRKERNSEC_SIGNAL
58681 + {
58682 + .procname = "signal_logging",
58683 + .data = &grsec_enable_signal,
58684 + .maxlen = sizeof(int),
58685 + .mode = 0600,
58686 + .proc_handler = &proc_dointvec,
58687 + },
58688 +#endif
58689 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
58690 + {
58691 + .procname = "forkfail_logging",
58692 + .data = &grsec_enable_forkfail,
58693 + .maxlen = sizeof(int),
58694 + .mode = 0600,
58695 + .proc_handler = &proc_dointvec,
58696 + },
58697 +#endif
58698 +#ifdef CONFIG_GRKERNSEC_TIME
58699 + {
58700 + .procname = "timechange_logging",
58701 + .data = &grsec_enable_time,
58702 + .maxlen = sizeof(int),
58703 + .mode = 0600,
58704 + .proc_handler = &proc_dointvec,
58705 + },
58706 +#endif
58707 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
58708 + {
58709 + .procname = "chroot_deny_shmat",
58710 + .data = &grsec_enable_chroot_shmat,
58711 + .maxlen = sizeof(int),
58712 + .mode = 0600,
58713 + .proc_handler = &proc_dointvec,
58714 + },
58715 +#endif
58716 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
58717 + {
58718 + .procname = "chroot_deny_unix",
58719 + .data = &grsec_enable_chroot_unix,
58720 + .maxlen = sizeof(int),
58721 + .mode = 0600,
58722 + .proc_handler = &proc_dointvec,
58723 + },
58724 +#endif
58725 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
58726 + {
58727 + .procname = "chroot_deny_mount",
58728 + .data = &grsec_enable_chroot_mount,
58729 + .maxlen = sizeof(int),
58730 + .mode = 0600,
58731 + .proc_handler = &proc_dointvec,
58732 + },
58733 +#endif
58734 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
58735 + {
58736 + .procname = "chroot_deny_fchdir",
58737 + .data = &grsec_enable_chroot_fchdir,
58738 + .maxlen = sizeof(int),
58739 + .mode = 0600,
58740 + .proc_handler = &proc_dointvec,
58741 + },
58742 +#endif
58743 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
58744 + {
58745 + .procname = "chroot_deny_chroot",
58746 + .data = &grsec_enable_chroot_double,
58747 + .maxlen = sizeof(int),
58748 + .mode = 0600,
58749 + .proc_handler = &proc_dointvec,
58750 + },
58751 +#endif
58752 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
58753 + {
58754 + .procname = "chroot_deny_pivot",
58755 + .data = &grsec_enable_chroot_pivot,
58756 + .maxlen = sizeof(int),
58757 + .mode = 0600,
58758 + .proc_handler = &proc_dointvec,
58759 + },
58760 +#endif
58761 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
58762 + {
58763 + .procname = "chroot_enforce_chdir",
58764 + .data = &grsec_enable_chroot_chdir,
58765 + .maxlen = sizeof(int),
58766 + .mode = 0600,
58767 + .proc_handler = &proc_dointvec,
58768 + },
58769 +#endif
58770 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
58771 + {
58772 + .procname = "chroot_deny_chmod",
58773 + .data = &grsec_enable_chroot_chmod,
58774 + .maxlen = sizeof(int),
58775 + .mode = 0600,
58776 + .proc_handler = &proc_dointvec,
58777 + },
58778 +#endif
58779 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
58780 + {
58781 + .procname = "chroot_deny_mknod",
58782 + .data = &grsec_enable_chroot_mknod,
58783 + .maxlen = sizeof(int),
58784 + .mode = 0600,
58785 + .proc_handler = &proc_dointvec,
58786 + },
58787 +#endif
58788 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
58789 + {
58790 + .procname = "chroot_restrict_nice",
58791 + .data = &grsec_enable_chroot_nice,
58792 + .maxlen = sizeof(int),
58793 + .mode = 0600,
58794 + .proc_handler = &proc_dointvec,
58795 + },
58796 +#endif
58797 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
58798 + {
58799 + .procname = "chroot_execlog",
58800 + .data = &grsec_enable_chroot_execlog,
58801 + .maxlen = sizeof(int),
58802 + .mode = 0600,
58803 + .proc_handler = &proc_dointvec,
58804 + },
58805 +#endif
58806 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
58807 + {
58808 + .procname = "chroot_caps",
58809 + .data = &grsec_enable_chroot_caps,
58810 + .maxlen = sizeof(int),
58811 + .mode = 0600,
58812 + .proc_handler = &proc_dointvec,
58813 + },
58814 +#endif
58815 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
58816 + {
58817 + .procname = "chroot_deny_sysctl",
58818 + .data = &grsec_enable_chroot_sysctl,
58819 + .maxlen = sizeof(int),
58820 + .mode = 0600,
58821 + .proc_handler = &proc_dointvec,
58822 + },
58823 +#endif
58824 +#ifdef CONFIG_GRKERNSEC_TPE
58825 + {
58826 + .procname = "tpe",
58827 + .data = &grsec_enable_tpe,
58828 + .maxlen = sizeof(int),
58829 + .mode = 0600,
58830 + .proc_handler = &proc_dointvec,
58831 + },
58832 + {
58833 + .procname = "tpe_gid",
58834 + .data = &grsec_tpe_gid,
58835 + .maxlen = sizeof(int),
58836 + .mode = 0600,
58837 + .proc_handler = &proc_dointvec,
58838 + },
58839 +#endif
58840 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
58841 + {
58842 + .procname = "tpe_invert",
58843 + .data = &grsec_enable_tpe_invert,
58844 + .maxlen = sizeof(int),
58845 + .mode = 0600,
58846 + .proc_handler = &proc_dointvec,
58847 + },
58848 +#endif
58849 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
58850 + {
58851 + .procname = "tpe_restrict_all",
58852 + .data = &grsec_enable_tpe_all,
58853 + .maxlen = sizeof(int),
58854 + .mode = 0600,
58855 + .proc_handler = &proc_dointvec,
58856 + },
58857 +#endif
58858 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
58859 + {
58860 + .procname = "socket_all",
58861 + .data = &grsec_enable_socket_all,
58862 + .maxlen = sizeof(int),
58863 + .mode = 0600,
58864 + .proc_handler = &proc_dointvec,
58865 + },
58866 + {
58867 + .procname = "socket_all_gid",
58868 + .data = &grsec_socket_all_gid,
58869 + .maxlen = sizeof(int),
58870 + .mode = 0600,
58871 + .proc_handler = &proc_dointvec,
58872 + },
58873 +#endif
58874 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
58875 + {
58876 + .procname = "socket_client",
58877 + .data = &grsec_enable_socket_client,
58878 + .maxlen = sizeof(int),
58879 + .mode = 0600,
58880 + .proc_handler = &proc_dointvec,
58881 + },
58882 + {
58883 + .procname = "socket_client_gid",
58884 + .data = &grsec_socket_client_gid,
58885 + .maxlen = sizeof(int),
58886 + .mode = 0600,
58887 + .proc_handler = &proc_dointvec,
58888 + },
58889 +#endif
58890 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58891 + {
58892 + .procname = "socket_server",
58893 + .data = &grsec_enable_socket_server,
58894 + .maxlen = sizeof(int),
58895 + .mode = 0600,
58896 + .proc_handler = &proc_dointvec,
58897 + },
58898 + {
58899 + .procname = "socket_server_gid",
58900 + .data = &grsec_socket_server_gid,
58901 + .maxlen = sizeof(int),
58902 + .mode = 0600,
58903 + .proc_handler = &proc_dointvec,
58904 + },
58905 +#endif
58906 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
58907 + {
58908 + .procname = "audit_group",
58909 + .data = &grsec_enable_group,
58910 + .maxlen = sizeof(int),
58911 + .mode = 0600,
58912 + .proc_handler = &proc_dointvec,
58913 + },
58914 + {
58915 + .procname = "audit_gid",
58916 + .data = &grsec_audit_gid,
58917 + .maxlen = sizeof(int),
58918 + .mode = 0600,
58919 + .proc_handler = &proc_dointvec,
58920 + },
58921 +#endif
58922 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
58923 + {
58924 + .procname = "audit_chdir",
58925 + .data = &grsec_enable_chdir,
58926 + .maxlen = sizeof(int),
58927 + .mode = 0600,
58928 + .proc_handler = &proc_dointvec,
58929 + },
58930 +#endif
58931 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58932 + {
58933 + .procname = "audit_mount",
58934 + .data = &grsec_enable_mount,
58935 + .maxlen = sizeof(int),
58936 + .mode = 0600,
58937 + .proc_handler = &proc_dointvec,
58938 + },
58939 +#endif
58940 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
58941 + {
58942 + .procname = "audit_textrel",
58943 + .data = &grsec_enable_audit_textrel,
58944 + .maxlen = sizeof(int),
58945 + .mode = 0600,
58946 + .proc_handler = &proc_dointvec,
58947 + },
58948 +#endif
58949 +#ifdef CONFIG_GRKERNSEC_DMESG
58950 + {
58951 + .procname = "dmesg",
58952 + .data = &grsec_enable_dmesg,
58953 + .maxlen = sizeof(int),
58954 + .mode = 0600,
58955 + .proc_handler = &proc_dointvec,
58956 + },
58957 +#endif
58958 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
58959 + {
58960 + .procname = "chroot_findtask",
58961 + .data = &grsec_enable_chroot_findtask,
58962 + .maxlen = sizeof(int),
58963 + .mode = 0600,
58964 + .proc_handler = &proc_dointvec,
58965 + },
58966 +#endif
58967 +#ifdef CONFIG_GRKERNSEC_RESLOG
58968 + {
58969 + .procname = "resource_logging",
58970 + .data = &grsec_resource_logging,
58971 + .maxlen = sizeof(int),
58972 + .mode = 0600,
58973 + .proc_handler = &proc_dointvec,
58974 + },
58975 +#endif
58976 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
58977 + {
58978 + .procname = "audit_ptrace",
58979 + .data = &grsec_enable_audit_ptrace,
58980 + .maxlen = sizeof(int),
58981 + .mode = 0600,
58982 + .proc_handler = &proc_dointvec,
58983 + },
58984 +#endif
58985 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
58986 + {
58987 + .procname = "harden_ptrace",
58988 + .data = &grsec_enable_harden_ptrace,
58989 + .maxlen = sizeof(int),
58990 + .mode = 0600,
58991 + .proc_handler = &proc_dointvec,
58992 + },
58993 +#endif
58994 + {
58995 + .procname = "grsec_lock",
58996 + .data = &grsec_lock,
58997 + .maxlen = sizeof(int),
58998 + .mode = 0600,
58999 + .proc_handler = &proc_dointvec,
59000 + },
59001 +#endif
59002 +#ifdef CONFIG_GRKERNSEC_ROFS
59003 + {
59004 + .procname = "romount_protect",
59005 + .data = &grsec_enable_rofs,
59006 + .maxlen = sizeof(int),
59007 + .mode = 0600,
59008 + .proc_handler = &proc_dointvec_minmax,
59009 + .extra1 = &one,
59010 + .extra2 = &one,
59011 + },
59012 +#endif
59013 + { }
59014 +};
59015 +#endif
59016 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
59017 new file mode 100644
59018 index 0000000..0dc13c3
59019 --- /dev/null
59020 +++ b/grsecurity/grsec_time.c
59021 @@ -0,0 +1,16 @@
59022 +#include <linux/kernel.h>
59023 +#include <linux/sched.h>
59024 +#include <linux/grinternal.h>
59025 +#include <linux/module.h>
59026 +
59027 +void
59028 +gr_log_timechange(void)
59029 +{
59030 +#ifdef CONFIG_GRKERNSEC_TIME
59031 + if (grsec_enable_time)
59032 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
59033 +#endif
59034 + return;
59035 +}
59036 +
59037 +EXPORT_SYMBOL(gr_log_timechange);
59038 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
59039 new file mode 100644
59040 index 0000000..07e0dc0
59041 --- /dev/null
59042 +++ b/grsecurity/grsec_tpe.c
59043 @@ -0,0 +1,73 @@
59044 +#include <linux/kernel.h>
59045 +#include <linux/sched.h>
59046 +#include <linux/file.h>
59047 +#include <linux/fs.h>
59048 +#include <linux/grinternal.h>
59049 +
59050 +extern int gr_acl_tpe_check(void);
59051 +
59052 +int
59053 +gr_tpe_allow(const struct file *file)
59054 +{
59055 +#ifdef CONFIG_GRKERNSEC
59056 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
59057 + const struct cred *cred = current_cred();
59058 + char *msg = NULL;
59059 + char *msg2 = NULL;
59060 +
59061 + // never restrict root
59062 + if (!cred->uid)
59063 + return 1;
59064 +
59065 + if (grsec_enable_tpe) {
59066 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
59067 + if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
59068 + msg = "not being in trusted group";
59069 + else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
59070 + msg = "being in untrusted group";
59071 +#else
59072 + if (in_group_p(grsec_tpe_gid))
59073 + msg = "being in untrusted group";
59074 +#endif
59075 + }
59076 + if (!msg && gr_acl_tpe_check())
59077 + msg = "being in untrusted role";
59078 +
59079 + // not in any affected group/role
59080 + if (!msg)
59081 + goto next_check;
59082 +
59083 + if (inode->i_uid)
59084 + msg2 = "file in non-root-owned directory";
59085 + else if (inode->i_mode & S_IWOTH)
59086 + msg2 = "file in world-writable directory";
59087 + else if (inode->i_mode & S_IWGRP)
59088 + msg2 = "file in group-writable directory";
59089 +
59090 + if (msg && msg2) {
59091 + char fullmsg[70] = {0};
59092 + snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
59093 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
59094 + return 0;
59095 + }
59096 + msg = NULL;
59097 +next_check:
59098 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
59099 + if (!grsec_enable_tpe || !grsec_enable_tpe_all)
59100 + return 1;
59101 +
59102 + if (inode->i_uid && (inode->i_uid != cred->uid))
59103 + msg = "directory not owned by user";
59104 + else if (inode->i_mode & S_IWOTH)
59105 + msg = "file in world-writable directory";
59106 + else if (inode->i_mode & S_IWGRP)
59107 + msg = "file in group-writable directory";
59108 +
59109 + if (msg) {
59110 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
59111 + return 0;
59112 + }
59113 +#endif
59114 +#endif
59115 + return 1;
59116 +}
59117 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
59118 new file mode 100644
59119 index 0000000..9f7b1ac
59120 --- /dev/null
59121 +++ b/grsecurity/grsum.c
59122 @@ -0,0 +1,61 @@
59123 +#include <linux/err.h>
59124 +#include <linux/kernel.h>
59125 +#include <linux/sched.h>
59126 +#include <linux/mm.h>
59127 +#include <linux/scatterlist.h>
59128 +#include <linux/crypto.h>
59129 +#include <linux/gracl.h>
59130 +
59131 +
59132 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
59133 +#error "crypto and sha256 must be built into the kernel"
59134 +#endif
59135 +
59136 +int
59137 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
59138 +{
59139 + char *p;
59140 + struct crypto_hash *tfm;
59141 + struct hash_desc desc;
59142 + struct scatterlist sg;
59143 + unsigned char temp_sum[GR_SHA_LEN];
59144 + volatile int retval = 0;
59145 + volatile int dummy = 0;
59146 + unsigned int i;
59147 +
59148 + sg_init_table(&sg, 1);
59149 +
59150 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
59151 + if (IS_ERR(tfm)) {
59152 + /* should never happen, since sha256 should be built in */
59153 + return 1;
59154 + }
59155 +
59156 + desc.tfm = tfm;
59157 + desc.flags = 0;
59158 +
59159 + crypto_hash_init(&desc);
59160 +
59161 + p = salt;
59162 + sg_set_buf(&sg, p, GR_SALT_LEN);
59163 + crypto_hash_update(&desc, &sg, sg.length);
59164 +
59165 + p = entry->pw;
59166 + sg_set_buf(&sg, p, strlen(p));
59167 +
59168 + crypto_hash_update(&desc, &sg, sg.length);
59169 +
59170 + crypto_hash_final(&desc, temp_sum);
59171 +
59172 + memset(entry->pw, 0, GR_PW_LEN);
59173 +
59174 + for (i = 0; i < GR_SHA_LEN; i++)
59175 + if (sum[i] != temp_sum[i])
59176 + retval = 1;
59177 + else
59178 + dummy = 1; // waste a cycle
59179 +
59180 + crypto_free_hash(tfm);
59181 +
59182 + return retval;
59183 +}
59184 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
59185 index f1c8ca6..b5c1cc7 100644
59186 --- a/include/acpi/acpi_bus.h
59187 +++ b/include/acpi/acpi_bus.h
59188 @@ -107,7 +107,7 @@ struct acpi_device_ops {
59189 acpi_op_bind bind;
59190 acpi_op_unbind unbind;
59191 acpi_op_notify notify;
59192 -};
59193 +} __no_const;
59194
59195 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
59196
59197 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
59198 index b7babf0..c1e2d45 100644
59199 --- a/include/asm-generic/atomic-long.h
59200 +++ b/include/asm-generic/atomic-long.h
59201 @@ -22,6 +22,12 @@
59202
59203 typedef atomic64_t atomic_long_t;
59204
59205 +#ifdef CONFIG_PAX_REFCOUNT
59206 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
59207 +#else
59208 +typedef atomic64_t atomic_long_unchecked_t;
59209 +#endif
59210 +
59211 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
59212
59213 static inline long atomic_long_read(atomic_long_t *l)
59214 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
59215 return (long)atomic64_read(v);
59216 }
59217
59218 +#ifdef CONFIG_PAX_REFCOUNT
59219 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
59220 +{
59221 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59222 +
59223 + return (long)atomic64_read_unchecked(v);
59224 +}
59225 +#endif
59226 +
59227 static inline void atomic_long_set(atomic_long_t *l, long i)
59228 {
59229 atomic64_t *v = (atomic64_t *)l;
59230 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
59231 atomic64_set(v, i);
59232 }
59233
59234 +#ifdef CONFIG_PAX_REFCOUNT
59235 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
59236 +{
59237 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59238 +
59239 + atomic64_set_unchecked(v, i);
59240 +}
59241 +#endif
59242 +
59243 static inline void atomic_long_inc(atomic_long_t *l)
59244 {
59245 atomic64_t *v = (atomic64_t *)l;
59246 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
59247 atomic64_inc(v);
59248 }
59249
59250 +#ifdef CONFIG_PAX_REFCOUNT
59251 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
59252 +{
59253 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59254 +
59255 + atomic64_inc_unchecked(v);
59256 +}
59257 +#endif
59258 +
59259 static inline void atomic_long_dec(atomic_long_t *l)
59260 {
59261 atomic64_t *v = (atomic64_t *)l;
59262 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
59263 atomic64_dec(v);
59264 }
59265
59266 +#ifdef CONFIG_PAX_REFCOUNT
59267 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
59268 +{
59269 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59270 +
59271 + atomic64_dec_unchecked(v);
59272 +}
59273 +#endif
59274 +
59275 static inline void atomic_long_add(long i, atomic_long_t *l)
59276 {
59277 atomic64_t *v = (atomic64_t *)l;
59278 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
59279 atomic64_add(i, v);
59280 }
59281
59282 +#ifdef CONFIG_PAX_REFCOUNT
59283 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
59284 +{
59285 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59286 +
59287 + atomic64_add_unchecked(i, v);
59288 +}
59289 +#endif
59290 +
59291 static inline void atomic_long_sub(long i, atomic_long_t *l)
59292 {
59293 atomic64_t *v = (atomic64_t *)l;
59294 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
59295 atomic64_sub(i, v);
59296 }
59297
59298 +#ifdef CONFIG_PAX_REFCOUNT
59299 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
59300 +{
59301 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59302 +
59303 + atomic64_sub_unchecked(i, v);
59304 +}
59305 +#endif
59306 +
59307 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
59308 {
59309 atomic64_t *v = (atomic64_t *)l;
59310 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
59311 return (long)atomic64_inc_return(v);
59312 }
59313
59314 +#ifdef CONFIG_PAX_REFCOUNT
59315 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
59316 +{
59317 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59318 +
59319 + return (long)atomic64_inc_return_unchecked(v);
59320 +}
59321 +#endif
59322 +
59323 static inline long atomic_long_dec_return(atomic_long_t *l)
59324 {
59325 atomic64_t *v = (atomic64_t *)l;
59326 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
59327
59328 typedef atomic_t atomic_long_t;
59329
59330 +#ifdef CONFIG_PAX_REFCOUNT
59331 +typedef atomic_unchecked_t atomic_long_unchecked_t;
59332 +#else
59333 +typedef atomic_t atomic_long_unchecked_t;
59334 +#endif
59335 +
59336 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
59337 static inline long atomic_long_read(atomic_long_t *l)
59338 {
59339 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
59340 return (long)atomic_read(v);
59341 }
59342
59343 +#ifdef CONFIG_PAX_REFCOUNT
59344 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
59345 +{
59346 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59347 +
59348 + return (long)atomic_read_unchecked(v);
59349 +}
59350 +#endif
59351 +
59352 static inline void atomic_long_set(atomic_long_t *l, long i)
59353 {
59354 atomic_t *v = (atomic_t *)l;
59355 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
59356 atomic_set(v, i);
59357 }
59358
59359 +#ifdef CONFIG_PAX_REFCOUNT
59360 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
59361 +{
59362 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59363 +
59364 + atomic_set_unchecked(v, i);
59365 +}
59366 +#endif
59367 +
59368 static inline void atomic_long_inc(atomic_long_t *l)
59369 {
59370 atomic_t *v = (atomic_t *)l;
59371 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
59372 atomic_inc(v);
59373 }
59374
59375 +#ifdef CONFIG_PAX_REFCOUNT
59376 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
59377 +{
59378 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59379 +
59380 + atomic_inc_unchecked(v);
59381 +}
59382 +#endif
59383 +
59384 static inline void atomic_long_dec(atomic_long_t *l)
59385 {
59386 atomic_t *v = (atomic_t *)l;
59387 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
59388 atomic_dec(v);
59389 }
59390
59391 +#ifdef CONFIG_PAX_REFCOUNT
59392 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
59393 +{
59394 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59395 +
59396 + atomic_dec_unchecked(v);
59397 +}
59398 +#endif
59399 +
59400 static inline void atomic_long_add(long i, atomic_long_t *l)
59401 {
59402 atomic_t *v = (atomic_t *)l;
59403 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
59404 atomic_add(i, v);
59405 }
59406
59407 +#ifdef CONFIG_PAX_REFCOUNT
59408 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
59409 +{
59410 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59411 +
59412 + atomic_add_unchecked(i, v);
59413 +}
59414 +#endif
59415 +
59416 static inline void atomic_long_sub(long i, atomic_long_t *l)
59417 {
59418 atomic_t *v = (atomic_t *)l;
59419 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
59420 atomic_sub(i, v);
59421 }
59422
59423 +#ifdef CONFIG_PAX_REFCOUNT
59424 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
59425 +{
59426 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59427 +
59428 + atomic_sub_unchecked(i, v);
59429 +}
59430 +#endif
59431 +
59432 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
59433 {
59434 atomic_t *v = (atomic_t *)l;
59435 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
59436 return (long)atomic_inc_return(v);
59437 }
59438
59439 +#ifdef CONFIG_PAX_REFCOUNT
59440 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
59441 +{
59442 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59443 +
59444 + return (long)atomic_inc_return_unchecked(v);
59445 +}
59446 +#endif
59447 +
59448 static inline long atomic_long_dec_return(atomic_long_t *l)
59449 {
59450 atomic_t *v = (atomic_t *)l;
59451 @@ -255,4 +393,53 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
59452
59453 #endif /* BITS_PER_LONG == 64 */
59454
59455 +#ifdef CONFIG_PAX_REFCOUNT
59456 +static inline void pax_refcount_needs_these_functions(void)
59457 +{
59458 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
59459 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
59460 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
59461 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
59462 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
59463 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
59464 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
59465 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
59466 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
59467 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
59468 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
59469 + atomic_clear_mask_unchecked(0, NULL);
59470 + atomic_set_mask_unchecked(0, NULL);
59471 +
59472 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
59473 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
59474 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
59475 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
59476 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
59477 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
59478 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
59479 +}
59480 +#else
59481 +#define atomic_read_unchecked(v) atomic_read(v)
59482 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
59483 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
59484 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
59485 +#define atomic_inc_unchecked(v) atomic_inc(v)
59486 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
59487 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
59488 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
59489 +#define atomic_dec_unchecked(v) atomic_dec(v)
59490 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
59491 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
59492 +#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
59493 +#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
59494 +
59495 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
59496 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
59497 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
59498 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
59499 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
59500 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
59501 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
59502 +#endif
59503 +
59504 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
59505 diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
59506 index 1ced641..c896ee8 100644
59507 --- a/include/asm-generic/atomic.h
59508 +++ b/include/asm-generic/atomic.h
59509 @@ -159,7 +159,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
59510 * Atomically clears the bits set in @mask from @v
59511 */
59512 #ifndef atomic_clear_mask
59513 -static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
59514 +static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
59515 {
59516 unsigned long flags;
59517
59518 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
59519 index b18ce4f..2ee2843 100644
59520 --- a/include/asm-generic/atomic64.h
59521 +++ b/include/asm-generic/atomic64.h
59522 @@ -16,6 +16,8 @@ typedef struct {
59523 long long counter;
59524 } atomic64_t;
59525
59526 +typedef atomic64_t atomic64_unchecked_t;
59527 +
59528 #define ATOMIC64_INIT(i) { (i) }
59529
59530 extern long long atomic64_read(const atomic64_t *v);
59531 @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
59532 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
59533 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
59534
59535 +#define atomic64_read_unchecked(v) atomic64_read(v)
59536 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
59537 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
59538 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
59539 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
59540 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
59541 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
59542 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
59543 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
59544 +
59545 #endif /* _ASM_GENERIC_ATOMIC64_H */
59546 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
59547 index 1bfcfe5..e04c5c9 100644
59548 --- a/include/asm-generic/cache.h
59549 +++ b/include/asm-generic/cache.h
59550 @@ -6,7 +6,7 @@
59551 * cache lines need to provide their own cache.h.
59552 */
59553
59554 -#define L1_CACHE_SHIFT 5
59555 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
59556 +#define L1_CACHE_SHIFT 5UL
59557 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
59558
59559 #endif /* __ASM_GENERIC_CACHE_H */
59560 diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
59561 index 0d68a1e..b74a761 100644
59562 --- a/include/asm-generic/emergency-restart.h
59563 +++ b/include/asm-generic/emergency-restart.h
59564 @@ -1,7 +1,7 @@
59565 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
59566 #define _ASM_GENERIC_EMERGENCY_RESTART_H
59567
59568 -static inline void machine_emergency_restart(void)
59569 +static inline __noreturn void machine_emergency_restart(void)
59570 {
59571 machine_restart(NULL);
59572 }
59573 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
59574 index 0232ccb..13d9165 100644
59575 --- a/include/asm-generic/kmap_types.h
59576 +++ b/include/asm-generic/kmap_types.h
59577 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
59578 KMAP_D(17) KM_NMI,
59579 KMAP_D(18) KM_NMI_PTE,
59580 KMAP_D(19) KM_KDB,
59581 +KMAP_D(20) KM_CLEARPAGE,
59582 /*
59583 * Remember to update debug_kmap_atomic() when adding new kmap types!
59584 */
59585 -KMAP_D(20) KM_TYPE_NR
59586 +KMAP_D(21) KM_TYPE_NR
59587 };
59588
59589 #undef KMAP_D
59590 diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
59591 index 9ceb03b..2efbcbd 100644
59592 --- a/include/asm-generic/local.h
59593 +++ b/include/asm-generic/local.h
59594 @@ -39,6 +39,7 @@ typedef struct
59595 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
59596 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
59597 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
59598 +#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
59599
59600 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
59601 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
59602 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
59603 index 725612b..9cc513a 100644
59604 --- a/include/asm-generic/pgtable-nopmd.h
59605 +++ b/include/asm-generic/pgtable-nopmd.h
59606 @@ -1,14 +1,19 @@
59607 #ifndef _PGTABLE_NOPMD_H
59608 #define _PGTABLE_NOPMD_H
59609
59610 -#ifndef __ASSEMBLY__
59611 -
59612 #include <asm-generic/pgtable-nopud.h>
59613
59614 -struct mm_struct;
59615 -
59616 #define __PAGETABLE_PMD_FOLDED
59617
59618 +#define PMD_SHIFT PUD_SHIFT
59619 +#define PTRS_PER_PMD 1
59620 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
59621 +#define PMD_MASK (~(PMD_SIZE-1))
59622 +
59623 +#ifndef __ASSEMBLY__
59624 +
59625 +struct mm_struct;
59626 +
59627 /*
59628 * Having the pmd type consist of a pud gets the size right, and allows
59629 * us to conceptually access the pud entry that this pmd is folded into
59630 @@ -16,11 +21,6 @@ struct mm_struct;
59631 */
59632 typedef struct { pud_t pud; } pmd_t;
59633
59634 -#define PMD_SHIFT PUD_SHIFT
59635 -#define PTRS_PER_PMD 1
59636 -#define PMD_SIZE (1UL << PMD_SHIFT)
59637 -#define PMD_MASK (~(PMD_SIZE-1))
59638 -
59639 /*
59640 * The "pud_xxx()" functions here are trivial for a folded two-level
59641 * setup: the pmd is never bad, and a pmd always exists (as it's folded
59642 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
59643 index 810431d..0ec4804f 100644
59644 --- a/include/asm-generic/pgtable-nopud.h
59645 +++ b/include/asm-generic/pgtable-nopud.h
59646 @@ -1,10 +1,15 @@
59647 #ifndef _PGTABLE_NOPUD_H
59648 #define _PGTABLE_NOPUD_H
59649
59650 -#ifndef __ASSEMBLY__
59651 -
59652 #define __PAGETABLE_PUD_FOLDED
59653
59654 +#define PUD_SHIFT PGDIR_SHIFT
59655 +#define PTRS_PER_PUD 1
59656 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
59657 +#define PUD_MASK (~(PUD_SIZE-1))
59658 +
59659 +#ifndef __ASSEMBLY__
59660 +
59661 /*
59662 * Having the pud type consist of a pgd gets the size right, and allows
59663 * us to conceptually access the pgd entry that this pud is folded into
59664 @@ -12,11 +17,6 @@
59665 */
59666 typedef struct { pgd_t pgd; } pud_t;
59667
59668 -#define PUD_SHIFT PGDIR_SHIFT
59669 -#define PTRS_PER_PUD 1
59670 -#define PUD_SIZE (1UL << PUD_SHIFT)
59671 -#define PUD_MASK (~(PUD_SIZE-1))
59672 -
59673 /*
59674 * The "pgd_xxx()" functions here are trivial for a folded two-level
59675 * setup: the pud is never bad, and a pud always exists (as it's folded
59676 @@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
59677 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
59678
59679 #define pgd_populate(mm, pgd, pud) do { } while (0)
59680 +#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
59681 /*
59682 * (puds are folded into pgds so this doesn't get actually called,
59683 * but the define is needed for a generic inline function.)
59684 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
59685 index c7ec2cd..909d125 100644
59686 --- a/include/asm-generic/pgtable.h
59687 +++ b/include/asm-generic/pgtable.h
59688 @@ -531,6 +531,14 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
59689 #endif
59690 }
59691
59692 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
59693 +static inline unsigned long pax_open_kernel(void) { return 0; }
59694 +#endif
59695 +
59696 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
59697 +static inline unsigned long pax_close_kernel(void) { return 0; }
59698 +#endif
59699 +
59700 #endif /* CONFIG_MMU */
59701
59702 #endif /* !__ASSEMBLY__ */
59703 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
59704 index 8aeadf6..f1dc019 100644
59705 --- a/include/asm-generic/vmlinux.lds.h
59706 +++ b/include/asm-generic/vmlinux.lds.h
59707 @@ -218,6 +218,7 @@
59708 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
59709 VMLINUX_SYMBOL(__start_rodata) = .; \
59710 *(.rodata) *(.rodata.*) \
59711 + *(.data..read_only) \
59712 *(__vermagic) /* Kernel version magic */ \
59713 . = ALIGN(8); \
59714 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
59715 @@ -716,17 +717,18 @@
59716 * section in the linker script will go there too. @phdr should have
59717 * a leading colon.
59718 *
59719 - * Note that this macros defines __per_cpu_load as an absolute symbol.
59720 + * Note that this macros defines per_cpu_load as an absolute symbol.
59721 * If there is no need to put the percpu section at a predetermined
59722 * address, use PERCPU_SECTION.
59723 */
59724 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
59725 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
59726 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
59727 + per_cpu_load = .; \
59728 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
59729 - LOAD_OFFSET) { \
59730 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
59731 PERCPU_INPUT(cacheline) \
59732 } phdr \
59733 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
59734 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
59735
59736 /**
59737 * PERCPU_SECTION - define output section for percpu area, simple version
59738 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
59739 index dd73104..fde86bd 100644
59740 --- a/include/drm/drmP.h
59741 +++ b/include/drm/drmP.h
59742 @@ -72,6 +72,7 @@
59743 #include <linux/workqueue.h>
59744 #include <linux/poll.h>
59745 #include <asm/pgalloc.h>
59746 +#include <asm/local.h>
59747 #include "drm.h"
59748
59749 #include <linux/idr.h>
59750 @@ -1074,7 +1075,7 @@ struct drm_device {
59751
59752 /** \name Usage Counters */
59753 /*@{ */
59754 - int open_count; /**< Outstanding files open */
59755 + local_t open_count; /**< Outstanding files open */
59756 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
59757 atomic_t vma_count; /**< Outstanding vma areas open */
59758 int buf_use; /**< Buffers in use -- cannot alloc */
59759 @@ -1085,7 +1086,7 @@ struct drm_device {
59760 /*@{ */
59761 unsigned long counters;
59762 enum drm_stat_type types[15];
59763 - atomic_t counts[15];
59764 + atomic_unchecked_t counts[15];
59765 /*@} */
59766
59767 struct list_head filelist;
59768 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
59769 index 37515d1..34fa8b0 100644
59770 --- a/include/drm/drm_crtc_helper.h
59771 +++ b/include/drm/drm_crtc_helper.h
59772 @@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
59773
59774 /* disable crtc when not in use - more explicit than dpms off */
59775 void (*disable)(struct drm_crtc *crtc);
59776 -};
59777 +} __no_const;
59778
59779 struct drm_encoder_helper_funcs {
59780 void (*dpms)(struct drm_encoder *encoder, int mode);
59781 @@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
59782 struct drm_connector *connector);
59783 /* disable encoder when not in use - more explicit than dpms off */
59784 void (*disable)(struct drm_encoder *encoder);
59785 -};
59786 +} __no_const;
59787
59788 struct drm_connector_helper_funcs {
59789 int (*get_modes)(struct drm_connector *connector);
59790 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
59791 index d6d1da4..fdd1ac5 100644
59792 --- a/include/drm/ttm/ttm_memory.h
59793 +++ b/include/drm/ttm/ttm_memory.h
59794 @@ -48,7 +48,7 @@
59795
59796 struct ttm_mem_shrink {
59797 int (*do_shrink) (struct ttm_mem_shrink *);
59798 -};
59799 +} __no_const;
59800
59801 /**
59802 * struct ttm_mem_global - Global memory accounting structure.
59803 diff --git a/include/linux/a.out.h b/include/linux/a.out.h
59804 index e86dfca..40cc55f 100644
59805 --- a/include/linux/a.out.h
59806 +++ b/include/linux/a.out.h
59807 @@ -39,6 +39,14 @@ enum machine_type {
59808 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
59809 };
59810
59811 +/* Constants for the N_FLAGS field */
59812 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
59813 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
59814 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
59815 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
59816 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
59817 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
59818 +
59819 #if !defined (N_MAGIC)
59820 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
59821 #endif
59822 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
59823 index 06fd4bb..1caec0d 100644
59824 --- a/include/linux/atmdev.h
59825 +++ b/include/linux/atmdev.h
59826 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
59827 #endif
59828
59829 struct k_atm_aal_stats {
59830 -#define __HANDLE_ITEM(i) atomic_t i
59831 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
59832 __AAL_STAT_ITEMS
59833 #undef __HANDLE_ITEM
59834 };
59835 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
59836 index 366422b..1fa7f84 100644
59837 --- a/include/linux/binfmts.h
59838 +++ b/include/linux/binfmts.h
59839 @@ -89,6 +89,7 @@ struct linux_binfmt {
59840 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
59841 int (*load_shlib)(struct file *);
59842 int (*core_dump)(struct coredump_params *cprm);
59843 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
59844 unsigned long min_coredump; /* minimal dump size */
59845 };
59846
59847 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
59848 index 4d4ac24..2c3ccce 100644
59849 --- a/include/linux/blkdev.h
59850 +++ b/include/linux/blkdev.h
59851 @@ -1376,7 +1376,7 @@ struct block_device_operations {
59852 /* this callback is with swap_lock and sometimes page table lock held */
59853 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
59854 struct module *owner;
59855 -};
59856 +} __do_const;
59857
59858 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
59859 unsigned long);
59860 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
59861 index 4d1a074..88f929a 100644
59862 --- a/include/linux/blktrace_api.h
59863 +++ b/include/linux/blktrace_api.h
59864 @@ -162,7 +162,7 @@ struct blk_trace {
59865 struct dentry *dir;
59866 struct dentry *dropped_file;
59867 struct dentry *msg_file;
59868 - atomic_t dropped;
59869 + atomic_unchecked_t dropped;
59870 };
59871
59872 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
59873 diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
59874 index 83195fb..0b0f77d 100644
59875 --- a/include/linux/byteorder/little_endian.h
59876 +++ b/include/linux/byteorder/little_endian.h
59877 @@ -42,51 +42,51 @@
59878
59879 static inline __le64 __cpu_to_le64p(const __u64 *p)
59880 {
59881 - return (__force __le64)*p;
59882 + return (__force const __le64)*p;
59883 }
59884 static inline __u64 __le64_to_cpup(const __le64 *p)
59885 {
59886 - return (__force __u64)*p;
59887 + return (__force const __u64)*p;
59888 }
59889 static inline __le32 __cpu_to_le32p(const __u32 *p)
59890 {
59891 - return (__force __le32)*p;
59892 + return (__force const __le32)*p;
59893 }
59894 static inline __u32 __le32_to_cpup(const __le32 *p)
59895 {
59896 - return (__force __u32)*p;
59897 + return (__force const __u32)*p;
59898 }
59899 static inline __le16 __cpu_to_le16p(const __u16 *p)
59900 {
59901 - return (__force __le16)*p;
59902 + return (__force const __le16)*p;
59903 }
59904 static inline __u16 __le16_to_cpup(const __le16 *p)
59905 {
59906 - return (__force __u16)*p;
59907 + return (__force const __u16)*p;
59908 }
59909 static inline __be64 __cpu_to_be64p(const __u64 *p)
59910 {
59911 - return (__force __be64)__swab64p(p);
59912 + return (__force const __be64)__swab64p(p);
59913 }
59914 static inline __u64 __be64_to_cpup(const __be64 *p)
59915 {
59916 - return __swab64p((__u64 *)p);
59917 + return __swab64p((const __u64 *)p);
59918 }
59919 static inline __be32 __cpu_to_be32p(const __u32 *p)
59920 {
59921 - return (__force __be32)__swab32p(p);
59922 + return (__force const __be32)__swab32p(p);
59923 }
59924 static inline __u32 __be32_to_cpup(const __be32 *p)
59925 {
59926 - return __swab32p((__u32 *)p);
59927 + return __swab32p((const __u32 *)p);
59928 }
59929 static inline __be16 __cpu_to_be16p(const __u16 *p)
59930 {
59931 - return (__force __be16)__swab16p(p);
59932 + return (__force const __be16)__swab16p(p);
59933 }
59934 static inline __u16 __be16_to_cpup(const __be16 *p)
59935 {
59936 - return __swab16p((__u16 *)p);
59937 + return __swab16p((const __u16 *)p);
59938 }
59939 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
59940 #define __le64_to_cpus(x) do { (void)(x); } while (0)
59941 diff --git a/include/linux/cache.h b/include/linux/cache.h
59942 index 4c57065..4307975 100644
59943 --- a/include/linux/cache.h
59944 +++ b/include/linux/cache.h
59945 @@ -16,6 +16,10 @@
59946 #define __read_mostly
59947 #endif
59948
59949 +#ifndef __read_only
59950 +#define __read_only __read_mostly
59951 +#endif
59952 +
59953 #ifndef ____cacheline_aligned
59954 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
59955 #endif
59956 diff --git a/include/linux/capability.h b/include/linux/capability.h
59957 index 12d52de..b5f7fa7 100644
59958 --- a/include/linux/capability.h
59959 +++ b/include/linux/capability.h
59960 @@ -548,6 +548,8 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
59961 extern bool capable(int cap);
59962 extern bool ns_capable(struct user_namespace *ns, int cap);
59963 extern bool nsown_capable(int cap);
59964 +extern bool capable_nolog(int cap);
59965 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
59966
59967 /* audit system wants to get cap info from files as well */
59968 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
59969 diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
59970 index 42e55de..1cd0e66 100644
59971 --- a/include/linux/cleancache.h
59972 +++ b/include/linux/cleancache.h
59973 @@ -31,7 +31,7 @@ struct cleancache_ops {
59974 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
59975 void (*invalidate_inode)(int, struct cleancache_filekey);
59976 void (*invalidate_fs)(int);
59977 -};
59978 +} __no_const;
59979
59980 extern struct cleancache_ops
59981 cleancache_register_ops(struct cleancache_ops *ops);
59982 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
59983 index 2f40791..a62d196 100644
59984 --- a/include/linux/compiler-gcc4.h
59985 +++ b/include/linux/compiler-gcc4.h
59986 @@ -32,6 +32,16 @@
59987 #define __linktime_error(message) __attribute__((__error__(message)))
59988
59989 #if __GNUC_MINOR__ >= 5
59990 +
59991 +#ifdef CONSTIFY_PLUGIN
59992 +#define __no_const __attribute__((no_const))
59993 +#define __do_const __attribute__((do_const))
59994 +#endif
59995 +
59996 +#ifdef SIZE_OVERFLOW_PLUGIN
59997 +#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
59998 +#endif
59999 +
60000 /*
60001 * Mark a position in code as unreachable. This can be used to
60002 * suppress control flow warnings after asm blocks that transfer
60003 @@ -47,6 +57,11 @@
60004 #define __noclone __attribute__((__noclone__))
60005
60006 #endif
60007 +
60008 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
60009 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
60010 +#define __bos0(ptr) __bos((ptr), 0)
60011 +#define __bos1(ptr) __bos((ptr), 1)
60012 #endif
60013
60014 #if __GNUC_MINOR__ > 0
60015 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
60016 index 923d093..726c17f 100644
60017 --- a/include/linux/compiler.h
60018 +++ b/include/linux/compiler.h
60019 @@ -5,31 +5,62 @@
60020
60021 #ifdef __CHECKER__
60022 # define __user __attribute__((noderef, address_space(1)))
60023 +# define __force_user __force __user
60024 # define __kernel __attribute__((address_space(0)))
60025 +# define __force_kernel __force __kernel
60026 # define __safe __attribute__((safe))
60027 # define __force __attribute__((force))
60028 # define __nocast __attribute__((nocast))
60029 # define __iomem __attribute__((noderef, address_space(2)))
60030 +# define __force_iomem __force __iomem
60031 # define __acquires(x) __attribute__((context(x,0,1)))
60032 # define __releases(x) __attribute__((context(x,1,0)))
60033 # define __acquire(x) __context__(x,1)
60034 # define __release(x) __context__(x,-1)
60035 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
60036 # define __percpu __attribute__((noderef, address_space(3)))
60037 +# define __force_percpu __force __percpu
60038 #ifdef CONFIG_SPARSE_RCU_POINTER
60039 # define __rcu __attribute__((noderef, address_space(4)))
60040 +# define __force_rcu __force __rcu
60041 #else
60042 # define __rcu
60043 +# define __force_rcu
60044 #endif
60045 extern void __chk_user_ptr(const volatile void __user *);
60046 extern void __chk_io_ptr(const volatile void __iomem *);
60047 +#elif defined(CHECKER_PLUGIN)
60048 +//# define __user
60049 +//# define __force_user
60050 +//# define __kernel
60051 +//# define __force_kernel
60052 +# define __safe
60053 +# define __force
60054 +# define __nocast
60055 +# define __iomem
60056 +# define __force_iomem
60057 +# define __chk_user_ptr(x) (void)0
60058 +# define __chk_io_ptr(x) (void)0
60059 +# define __builtin_warning(x, y...) (1)
60060 +# define __acquires(x)
60061 +# define __releases(x)
60062 +# define __acquire(x) (void)0
60063 +# define __release(x) (void)0
60064 +# define __cond_lock(x,c) (c)
60065 +# define __percpu
60066 +# define __force_percpu
60067 +# define __rcu
60068 +# define __force_rcu
60069 #else
60070 # define __user
60071 +# define __force_user
60072 # define __kernel
60073 +# define __force_kernel
60074 # define __safe
60075 # define __force
60076 # define __nocast
60077 # define __iomem
60078 +# define __force_iomem
60079 # define __chk_user_ptr(x) (void)0
60080 # define __chk_io_ptr(x) (void)0
60081 # define __builtin_warning(x, y...) (1)
60082 @@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
60083 # define __release(x) (void)0
60084 # define __cond_lock(x,c) (c)
60085 # define __percpu
60086 +# define __force_percpu
60087 # define __rcu
60088 +# define __force_rcu
60089 #endif
60090
60091 #ifdef __KERNEL__
60092 @@ -264,6 +297,18 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
60093 # define __attribute_const__ /* unimplemented */
60094 #endif
60095
60096 +#ifndef __no_const
60097 +# define __no_const
60098 +#endif
60099 +
60100 +#ifndef __do_const
60101 +# define __do_const
60102 +#endif
60103 +
60104 +#ifndef __size_overflow
60105 +# define __size_overflow(...)
60106 +#endif
60107 +
60108 /*
60109 * Tell gcc if a function is cold. The compiler will assume any path
60110 * directly leading to the call is unlikely.
60111 @@ -273,6 +318,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
60112 #define __cold
60113 #endif
60114
60115 +#ifndef __alloc_size
60116 +#define __alloc_size(...)
60117 +#endif
60118 +
60119 +#ifndef __bos
60120 +#define __bos(ptr, arg)
60121 +#endif
60122 +
60123 +#ifndef __bos0
60124 +#define __bos0(ptr)
60125 +#endif
60126 +
60127 +#ifndef __bos1
60128 +#define __bos1(ptr)
60129 +#endif
60130 +
60131 /* Simple shorthand for a section definition */
60132 #ifndef __section
60133 # define __section(S) __attribute__ ((__section__(#S)))
60134 @@ -308,6 +369,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
60135 * use is to mediate communication between process-level code and irq/NMI
60136 * handlers, all running on the same CPU.
60137 */
60138 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
60139 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
60140 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
60141
60142 #endif /* __LINUX_COMPILER_H */
60143 diff --git a/include/linux/cred.h b/include/linux/cred.h
60144 index adadf71..6af5560 100644
60145 --- a/include/linux/cred.h
60146 +++ b/include/linux/cred.h
60147 @@ -207,6 +207,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
60148 static inline void validate_process_creds(void)
60149 {
60150 }
60151 +static inline void validate_task_creds(struct task_struct *task)
60152 +{
60153 +}
60154 #endif
60155
60156 /**
60157 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
60158 index b92eadf..b4ecdc1 100644
60159 --- a/include/linux/crypto.h
60160 +++ b/include/linux/crypto.h
60161 @@ -373,7 +373,7 @@ struct cipher_tfm {
60162 const u8 *key, unsigned int keylen);
60163 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
60164 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
60165 -};
60166 +} __no_const;
60167
60168 struct hash_tfm {
60169 int (*init)(struct hash_desc *desc);
60170 @@ -394,13 +394,13 @@ struct compress_tfm {
60171 int (*cot_decompress)(struct crypto_tfm *tfm,
60172 const u8 *src, unsigned int slen,
60173 u8 *dst, unsigned int *dlen);
60174 -};
60175 +} __no_const;
60176
60177 struct rng_tfm {
60178 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
60179 unsigned int dlen);
60180 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
60181 -};
60182 +} __no_const;
60183
60184 #define crt_ablkcipher crt_u.ablkcipher
60185 #define crt_aead crt_u.aead
60186 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
60187 index 7925bf0..d5143d2 100644
60188 --- a/include/linux/decompress/mm.h
60189 +++ b/include/linux/decompress/mm.h
60190 @@ -77,7 +77,7 @@ static void free(void *where)
60191 * warnings when not needed (indeed large_malloc / large_free are not
60192 * needed by inflate */
60193
60194 -#define malloc(a) kmalloc(a, GFP_KERNEL)
60195 +#define malloc(a) kmalloc((a), GFP_KERNEL)
60196 #define free(a) kfree(a)
60197
60198 #define large_malloc(a) vmalloc(a)
60199 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
60200 index dfc099e..e583e66 100644
60201 --- a/include/linux/dma-mapping.h
60202 +++ b/include/linux/dma-mapping.h
60203 @@ -51,7 +51,7 @@ struct dma_map_ops {
60204 u64 (*get_required_mask)(struct device *dev);
60205 #endif
60206 int is_phys;
60207 -};
60208 +} __do_const;
60209
60210 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
60211
60212 diff --git a/include/linux/efi.h b/include/linux/efi.h
60213 index ec45ccd..9923c32 100644
60214 --- a/include/linux/efi.h
60215 +++ b/include/linux/efi.h
60216 @@ -635,7 +635,7 @@ struct efivar_operations {
60217 efi_get_variable_t *get_variable;
60218 efi_get_next_variable_t *get_next_variable;
60219 efi_set_variable_t *set_variable;
60220 -};
60221 +} __no_const;
60222
60223 struct efivars {
60224 /*
60225 diff --git a/include/linux/elf.h b/include/linux/elf.h
60226 index 999b4f5..57753b4 100644
60227 --- a/include/linux/elf.h
60228 +++ b/include/linux/elf.h
60229 @@ -40,6 +40,17 @@ typedef __s64 Elf64_Sxword;
60230 #define PT_GNU_EH_FRAME 0x6474e550
60231
60232 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
60233 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
60234 +
60235 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
60236 +
60237 +/* Constants for the e_flags field */
60238 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
60239 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
60240 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
60241 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
60242 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
60243 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
60244
60245 /*
60246 * Extended Numbering
60247 @@ -97,6 +108,8 @@ typedef __s64 Elf64_Sxword;
60248 #define DT_DEBUG 21
60249 #define DT_TEXTREL 22
60250 #define DT_JMPREL 23
60251 +#define DT_FLAGS 30
60252 + #define DF_TEXTREL 0x00000004
60253 #define DT_ENCODING 32
60254 #define OLD_DT_LOOS 0x60000000
60255 #define DT_LOOS 0x6000000d
60256 @@ -243,6 +256,19 @@ typedef struct elf64_hdr {
60257 #define PF_W 0x2
60258 #define PF_X 0x1
60259
60260 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
60261 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
60262 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
60263 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
60264 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
60265 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
60266 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
60267 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
60268 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
60269 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
60270 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
60271 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
60272 +
60273 typedef struct elf32_phdr{
60274 Elf32_Word p_type;
60275 Elf32_Off p_offset;
60276 @@ -335,6 +361,8 @@ typedef struct elf64_shdr {
60277 #define EI_OSABI 7
60278 #define EI_PAD 8
60279
60280 +#define EI_PAX 14
60281 +
60282 #define ELFMAG0 0x7f /* EI_MAG */
60283 #define ELFMAG1 'E'
60284 #define ELFMAG2 'L'
60285 @@ -421,6 +449,7 @@ extern Elf32_Dyn _DYNAMIC [];
60286 #define elf_note elf32_note
60287 #define elf_addr_t Elf32_Off
60288 #define Elf_Half Elf32_Half
60289 +#define elf_dyn Elf32_Dyn
60290
60291 #else
60292
60293 @@ -431,6 +460,7 @@ extern Elf64_Dyn _DYNAMIC [];
60294 #define elf_note elf64_note
60295 #define elf_addr_t Elf64_Off
60296 #define Elf_Half Elf64_Half
60297 +#define elf_dyn Elf64_Dyn
60298
60299 #endif
60300
60301 diff --git a/include/linux/filter.h b/include/linux/filter.h
60302 index 8eeb205..d59bfa2 100644
60303 --- a/include/linux/filter.h
60304 +++ b/include/linux/filter.h
60305 @@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
60306
60307 struct sk_buff;
60308 struct sock;
60309 +struct bpf_jit_work;
60310
60311 struct sk_filter
60312 {
60313 @@ -141,6 +142,9 @@ struct sk_filter
60314 unsigned int len; /* Number of filter blocks */
60315 unsigned int (*bpf_func)(const struct sk_buff *skb,
60316 const struct sock_filter *filter);
60317 +#ifdef CONFIG_BPF_JIT
60318 + struct bpf_jit_work *work;
60319 +#endif
60320 struct rcu_head rcu;
60321 struct sock_filter insns[0];
60322 };
60323 diff --git a/include/linux/firewire.h b/include/linux/firewire.h
60324 index cdc9b71..ce69fb5 100644
60325 --- a/include/linux/firewire.h
60326 +++ b/include/linux/firewire.h
60327 @@ -413,7 +413,7 @@ struct fw_iso_context {
60328 union {
60329 fw_iso_callback_t sc;
60330 fw_iso_mc_callback_t mc;
60331 - } callback;
60332 + } __no_const callback;
60333 void *callback_data;
60334 };
60335
60336 diff --git a/include/linux/fs.h b/include/linux/fs.h
60337 index 25c40b9..1bfd4f4 100644
60338 --- a/include/linux/fs.h
60339 +++ b/include/linux/fs.h
60340 @@ -1634,7 +1634,8 @@ struct file_operations {
60341 int (*setlease)(struct file *, long, struct file_lock **);
60342 long (*fallocate)(struct file *file, int mode, loff_t offset,
60343 loff_t len);
60344 -};
60345 +} __do_const;
60346 +typedef struct file_operations __no_const file_operations_no_const;
60347
60348 struct inode_operations {
60349 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
60350 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
60351 index 003dc0f..3c4ea97 100644
60352 --- a/include/linux/fs_struct.h
60353 +++ b/include/linux/fs_struct.h
60354 @@ -6,7 +6,7 @@
60355 #include <linux/seqlock.h>
60356
60357 struct fs_struct {
60358 - int users;
60359 + atomic_t users;
60360 spinlock_t lock;
60361 seqcount_t seq;
60362 int umask;
60363 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
60364 index ce31408..b1ad003 100644
60365 --- a/include/linux/fscache-cache.h
60366 +++ b/include/linux/fscache-cache.h
60367 @@ -102,7 +102,7 @@ struct fscache_operation {
60368 fscache_operation_release_t release;
60369 };
60370
60371 -extern atomic_t fscache_op_debug_id;
60372 +extern atomic_unchecked_t fscache_op_debug_id;
60373 extern void fscache_op_work_func(struct work_struct *work);
60374
60375 extern void fscache_enqueue_operation(struct fscache_operation *);
60376 @@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
60377 {
60378 INIT_WORK(&op->work, fscache_op_work_func);
60379 atomic_set(&op->usage, 1);
60380 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
60381 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
60382 op->processor = processor;
60383 op->release = release;
60384 INIT_LIST_HEAD(&op->pend_link);
60385 diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
60386 index a6dfe69..569586df 100644
60387 --- a/include/linux/fsnotify.h
60388 +++ b/include/linux/fsnotify.h
60389 @@ -315,7 +315,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
60390 */
60391 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
60392 {
60393 - return kstrdup(name, GFP_KERNEL);
60394 + return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
60395 }
60396
60397 /*
60398 diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
60399 index 91d0e0a3..035666b 100644
60400 --- a/include/linux/fsnotify_backend.h
60401 +++ b/include/linux/fsnotify_backend.h
60402 @@ -105,6 +105,7 @@ struct fsnotify_ops {
60403 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
60404 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
60405 };
60406 +typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
60407
60408 /*
60409 * A group is a "thing" that wants to receive notification about filesystem
60410 diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
60411 index 176a939..1462211 100644
60412 --- a/include/linux/ftrace_event.h
60413 +++ b/include/linux/ftrace_event.h
60414 @@ -97,7 +97,7 @@ struct trace_event_functions {
60415 trace_print_func raw;
60416 trace_print_func hex;
60417 trace_print_func binary;
60418 -};
60419 +} __no_const;
60420
60421 struct trace_event {
60422 struct hlist_node node;
60423 @@ -263,7 +263,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
60424 extern int trace_add_event_call(struct ftrace_event_call *call);
60425 extern void trace_remove_event_call(struct ftrace_event_call *call);
60426
60427 -#define is_signed_type(type) (((type)(-1)) < 0)
60428 +#define is_signed_type(type) (((type)(-1)) < (type)1)
60429
60430 int trace_set_clr_event(const char *system, const char *event, int set);
60431
60432 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
60433 index 017a7fb..33a8507 100644
60434 --- a/include/linux/genhd.h
60435 +++ b/include/linux/genhd.h
60436 @@ -185,7 +185,7 @@ struct gendisk {
60437 struct kobject *slave_dir;
60438
60439 struct timer_rand_state *random;
60440 - atomic_t sync_io; /* RAID */
60441 + atomic_unchecked_t sync_io; /* RAID */
60442 struct disk_events *ev;
60443 #ifdef CONFIG_BLK_DEV_INTEGRITY
60444 struct blk_integrity *integrity;
60445 diff --git a/include/linux/gfp.h b/include/linux/gfp.h
60446 index 581e74b..1dbda57 100644
60447 --- a/include/linux/gfp.h
60448 +++ b/include/linux/gfp.h
60449 @@ -38,6 +38,12 @@ struct vm_area_struct;
60450 #define ___GFP_OTHER_NODE 0x800000u
60451 #define ___GFP_WRITE 0x1000000u
60452
60453 +#ifdef CONFIG_PAX_USERCOPY
60454 +#define ___GFP_USERCOPY 0x2000000u
60455 +#else
60456 +#define ___GFP_USERCOPY 0
60457 +#endif
60458 +
60459 /*
60460 * GFP bitmasks..
60461 *
60462 @@ -87,6 +93,7 @@ struct vm_area_struct;
60463 #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
60464 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
60465 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
60466 +#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
60467
60468 /*
60469 * This may seem redundant, but it's a way of annotating false positives vs.
60470 @@ -94,7 +101,7 @@ struct vm_area_struct;
60471 */
60472 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
60473
60474 -#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
60475 +#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
60476 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
60477
60478 /* This equals 0, but use constants in case they ever change */
60479 @@ -148,6 +155,8 @@ struct vm_area_struct;
60480 /* 4GB DMA on some platforms */
60481 #define GFP_DMA32 __GFP_DMA32
60482
60483 +#define GFP_USERCOPY __GFP_USERCOPY
60484 +
60485 /* Convert GFP flags to their corresponding migrate type */
60486 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
60487 {
60488 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
60489 new file mode 100644
60490 index 0000000..c938b1f
60491 --- /dev/null
60492 +++ b/include/linux/gracl.h
60493 @@ -0,0 +1,319 @@
60494 +#ifndef GR_ACL_H
60495 +#define GR_ACL_H
60496 +
60497 +#include <linux/grdefs.h>
60498 +#include <linux/resource.h>
60499 +#include <linux/capability.h>
60500 +#include <linux/dcache.h>
60501 +#include <asm/resource.h>
60502 +
60503 +/* Major status information */
60504 +
60505 +#define GR_VERSION "grsecurity 2.9.1"
60506 +#define GRSECURITY_VERSION 0x2901
60507 +
60508 +enum {
60509 + GR_SHUTDOWN = 0,
60510 + GR_ENABLE = 1,
60511 + GR_SPROLE = 2,
60512 + GR_RELOAD = 3,
60513 + GR_SEGVMOD = 4,
60514 + GR_STATUS = 5,
60515 + GR_UNSPROLE = 6,
60516 + GR_PASSSET = 7,
60517 + GR_SPROLEPAM = 8,
60518 +};
60519 +
60520 +/* Password setup definitions
60521 + * kernel/grhash.c */
60522 +enum {
60523 + GR_PW_LEN = 128,
60524 + GR_SALT_LEN = 16,
60525 + GR_SHA_LEN = 32,
60526 +};
60527 +
60528 +enum {
60529 + GR_SPROLE_LEN = 64,
60530 +};
60531 +
60532 +enum {
60533 + GR_NO_GLOB = 0,
60534 + GR_REG_GLOB,
60535 + GR_CREATE_GLOB
60536 +};
60537 +
60538 +#define GR_NLIMITS 32
60539 +
60540 +/* Begin Data Structures */
60541 +
60542 +struct sprole_pw {
60543 + unsigned char *rolename;
60544 + unsigned char salt[GR_SALT_LEN];
60545 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
60546 +};
60547 +
60548 +struct name_entry {
60549 + __u32 key;
60550 + ino_t inode;
60551 + dev_t device;
60552 + char *name;
60553 + __u16 len;
60554 + __u8 deleted;
60555 + struct name_entry *prev;
60556 + struct name_entry *next;
60557 +};
60558 +
60559 +struct inodev_entry {
60560 + struct name_entry *nentry;
60561 + struct inodev_entry *prev;
60562 + struct inodev_entry *next;
60563 +};
60564 +
60565 +struct acl_role_db {
60566 + struct acl_role_label **r_hash;
60567 + __u32 r_size;
60568 +};
60569 +
60570 +struct inodev_db {
60571 + struct inodev_entry **i_hash;
60572 + __u32 i_size;
60573 +};
60574 +
60575 +struct name_db {
60576 + struct name_entry **n_hash;
60577 + __u32 n_size;
60578 +};
60579 +
60580 +struct crash_uid {
60581 + uid_t uid;
60582 + unsigned long expires;
60583 +};
60584 +
60585 +struct gr_hash_struct {
60586 + void **table;
60587 + void **nametable;
60588 + void *first;
60589 + __u32 table_size;
60590 + __u32 used_size;
60591 + int type;
60592 +};
60593 +
60594 +/* Userspace Grsecurity ACL data structures */
60595 +
60596 +struct acl_subject_label {
60597 + char *filename;
60598 + ino_t inode;
60599 + dev_t device;
60600 + __u32 mode;
60601 + kernel_cap_t cap_mask;
60602 + kernel_cap_t cap_lower;
60603 + kernel_cap_t cap_invert_audit;
60604 +
60605 + struct rlimit res[GR_NLIMITS];
60606 + __u32 resmask;
60607 +
60608 + __u8 user_trans_type;
60609 + __u8 group_trans_type;
60610 + uid_t *user_transitions;
60611 + gid_t *group_transitions;
60612 + __u16 user_trans_num;
60613 + __u16 group_trans_num;
60614 +
60615 + __u32 sock_families[2];
60616 + __u32 ip_proto[8];
60617 + __u32 ip_type;
60618 + struct acl_ip_label **ips;
60619 + __u32 ip_num;
60620 + __u32 inaddr_any_override;
60621 +
60622 + __u32 crashes;
60623 + unsigned long expires;
60624 +
60625 + struct acl_subject_label *parent_subject;
60626 + struct gr_hash_struct *hash;
60627 + struct acl_subject_label *prev;
60628 + struct acl_subject_label *next;
60629 +
60630 + struct acl_object_label **obj_hash;
60631 + __u32 obj_hash_size;
60632 + __u16 pax_flags;
60633 +};
60634 +
60635 +struct role_allowed_ip {
60636 + __u32 addr;
60637 + __u32 netmask;
60638 +
60639 + struct role_allowed_ip *prev;
60640 + struct role_allowed_ip *next;
60641 +};
60642 +
60643 +struct role_transition {
60644 + char *rolename;
60645 +
60646 + struct role_transition *prev;
60647 + struct role_transition *next;
60648 +};
60649 +
60650 +struct acl_role_label {
60651 + char *rolename;
60652 + uid_t uidgid;
60653 + __u16 roletype;
60654 +
60655 + __u16 auth_attempts;
60656 + unsigned long expires;
60657 +
60658 + struct acl_subject_label *root_label;
60659 + struct gr_hash_struct *hash;
60660 +
60661 + struct acl_role_label *prev;
60662 + struct acl_role_label *next;
60663 +
60664 + struct role_transition *transitions;
60665 + struct role_allowed_ip *allowed_ips;
60666 + uid_t *domain_children;
60667 + __u16 domain_child_num;
60668 +
60669 + umode_t umask;
60670 +
60671 + struct acl_subject_label **subj_hash;
60672 + __u32 subj_hash_size;
60673 +};
60674 +
60675 +struct user_acl_role_db {
60676 + struct acl_role_label **r_table;
60677 + __u32 num_pointers; /* Number of allocations to track */
60678 + __u32 num_roles; /* Number of roles */
60679 + __u32 num_domain_children; /* Number of domain children */
60680 + __u32 num_subjects; /* Number of subjects */
60681 + __u32 num_objects; /* Number of objects */
60682 +};
60683 +
60684 +struct acl_object_label {
60685 + char *filename;
60686 + ino_t inode;
60687 + dev_t device;
60688 + __u32 mode;
60689 +
60690 + struct acl_subject_label *nested;
60691 + struct acl_object_label *globbed;
60692 +
60693 + /* next two structures not used */
60694 +
60695 + struct acl_object_label *prev;
60696 + struct acl_object_label *next;
60697 +};
60698 +
60699 +struct acl_ip_label {
60700 + char *iface;
60701 + __u32 addr;
60702 + __u32 netmask;
60703 + __u16 low, high;
60704 + __u8 mode;
60705 + __u32 type;
60706 + __u32 proto[8];
60707 +
60708 + /* next two structures not used */
60709 +
60710 + struct acl_ip_label *prev;
60711 + struct acl_ip_label *next;
60712 +};
60713 +
60714 +struct gr_arg {
60715 + struct user_acl_role_db role_db;
60716 + unsigned char pw[GR_PW_LEN];
60717 + unsigned char salt[GR_SALT_LEN];
60718 + unsigned char sum[GR_SHA_LEN];
60719 + unsigned char sp_role[GR_SPROLE_LEN];
60720 + struct sprole_pw *sprole_pws;
60721 + dev_t segv_device;
60722 + ino_t segv_inode;
60723 + uid_t segv_uid;
60724 + __u16 num_sprole_pws;
60725 + __u16 mode;
60726 +};
60727 +
60728 +struct gr_arg_wrapper {
60729 + struct gr_arg *arg;
60730 + __u32 version;
60731 + __u32 size;
60732 +};
60733 +
60734 +struct subject_map {
60735 + struct acl_subject_label *user;
60736 + struct acl_subject_label *kernel;
60737 + struct subject_map *prev;
60738 + struct subject_map *next;
60739 +};
60740 +
60741 +struct acl_subj_map_db {
60742 + struct subject_map **s_hash;
60743 + __u32 s_size;
60744 +};
60745 +
60746 +/* End Data Structures Section */
60747 +
60748 +/* Hash functions generated by empirical testing by Brad Spengler
60749 + Makes good use of the low bits of the inode. Generally 0-1 times
60750 + in loop for successful match. 0-3 for unsuccessful match.
60751 + Shift/add algorithm with modulus of table size and an XOR*/
60752 +
60753 +static __inline__ unsigned int
60754 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
60755 +{
60756 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
60757 +}
60758 +
60759 + static __inline__ unsigned int
60760 +shash(const struct acl_subject_label *userp, const unsigned int sz)
60761 +{
60762 + return ((const unsigned long)userp % sz);
60763 +}
60764 +
60765 +static __inline__ unsigned int
60766 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
60767 +{
60768 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
60769 +}
60770 +
60771 +static __inline__ unsigned int
60772 +nhash(const char *name, const __u16 len, const unsigned int sz)
60773 +{
60774 + return full_name_hash((const unsigned char *)name, len) % sz;
60775 +}
60776 +
60777 +#define FOR_EACH_ROLE_START(role) \
60778 + role = role_list; \
60779 + while (role) {
60780 +
60781 +#define FOR_EACH_ROLE_END(role) \
60782 + role = role->prev; \
60783 + }
60784 +
60785 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
60786 + subj = NULL; \
60787 + iter = 0; \
60788 + while (iter < role->subj_hash_size) { \
60789 + if (subj == NULL) \
60790 + subj = role->subj_hash[iter]; \
60791 + if (subj == NULL) { \
60792 + iter++; \
60793 + continue; \
60794 + }
60795 +
60796 +#define FOR_EACH_SUBJECT_END(subj,iter) \
60797 + subj = subj->next; \
60798 + if (subj == NULL) \
60799 + iter++; \
60800 + }
60801 +
60802 +
60803 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
60804 + subj = role->hash->first; \
60805 + while (subj != NULL) {
60806 +
60807 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
60808 + subj = subj->next; \
60809 + }
60810 +
60811 +#endif
60812 +
60813 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
60814 new file mode 100644
60815 index 0000000..323ecf2
60816 --- /dev/null
60817 +++ b/include/linux/gralloc.h
60818 @@ -0,0 +1,9 @@
60819 +#ifndef __GRALLOC_H
60820 +#define __GRALLOC_H
60821 +
60822 +void acl_free_all(void);
60823 +int acl_alloc_stack_init(unsigned long size);
60824 +void *acl_alloc(unsigned long len);
60825 +void *acl_alloc_num(unsigned long num, unsigned long len);
60826 +
60827 +#endif
60828 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
60829 new file mode 100644
60830 index 0000000..b30e9bc
60831 --- /dev/null
60832 +++ b/include/linux/grdefs.h
60833 @@ -0,0 +1,140 @@
60834 +#ifndef GRDEFS_H
60835 +#define GRDEFS_H
60836 +
60837 +/* Begin grsecurity status declarations */
60838 +
60839 +enum {
60840 + GR_READY = 0x01,
60841 + GR_STATUS_INIT = 0x00 // disabled state
60842 +};
60843 +
60844 +/* Begin ACL declarations */
60845 +
60846 +/* Role flags */
60847 +
60848 +enum {
60849 + GR_ROLE_USER = 0x0001,
60850 + GR_ROLE_GROUP = 0x0002,
60851 + GR_ROLE_DEFAULT = 0x0004,
60852 + GR_ROLE_SPECIAL = 0x0008,
60853 + GR_ROLE_AUTH = 0x0010,
60854 + GR_ROLE_NOPW = 0x0020,
60855 + GR_ROLE_GOD = 0x0040,
60856 + GR_ROLE_LEARN = 0x0080,
60857 + GR_ROLE_TPE = 0x0100,
60858 + GR_ROLE_DOMAIN = 0x0200,
60859 + GR_ROLE_PAM = 0x0400,
60860 + GR_ROLE_PERSIST = 0x0800
60861 +};
60862 +
60863 +/* ACL Subject and Object mode flags */
60864 +enum {
60865 + GR_DELETED = 0x80000000
60866 +};
60867 +
60868 +/* ACL Object-only mode flags */
60869 +enum {
60870 + GR_READ = 0x00000001,
60871 + GR_APPEND = 0x00000002,
60872 + GR_WRITE = 0x00000004,
60873 + GR_EXEC = 0x00000008,
60874 + GR_FIND = 0x00000010,
60875 + GR_INHERIT = 0x00000020,
60876 + GR_SETID = 0x00000040,
60877 + GR_CREATE = 0x00000080,
60878 + GR_DELETE = 0x00000100,
60879 + GR_LINK = 0x00000200,
60880 + GR_AUDIT_READ = 0x00000400,
60881 + GR_AUDIT_APPEND = 0x00000800,
60882 + GR_AUDIT_WRITE = 0x00001000,
60883 + GR_AUDIT_EXEC = 0x00002000,
60884 + GR_AUDIT_FIND = 0x00004000,
60885 + GR_AUDIT_INHERIT= 0x00008000,
60886 + GR_AUDIT_SETID = 0x00010000,
60887 + GR_AUDIT_CREATE = 0x00020000,
60888 + GR_AUDIT_DELETE = 0x00040000,
60889 + GR_AUDIT_LINK = 0x00080000,
60890 + GR_PTRACERD = 0x00100000,
60891 + GR_NOPTRACE = 0x00200000,
60892 + GR_SUPPRESS = 0x00400000,
60893 + GR_NOLEARN = 0x00800000,
60894 + GR_INIT_TRANSFER= 0x01000000
60895 +};
60896 +
60897 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
60898 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
60899 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
60900 +
60901 +/* ACL subject-only mode flags */
60902 +enum {
60903 + GR_KILL = 0x00000001,
60904 + GR_VIEW = 0x00000002,
60905 + GR_PROTECTED = 0x00000004,
60906 + GR_LEARN = 0x00000008,
60907 + GR_OVERRIDE = 0x00000010,
60908 + /* just a placeholder, this mode is only used in userspace */
60909 + GR_DUMMY = 0x00000020,
60910 + GR_PROTSHM = 0x00000040,
60911 + GR_KILLPROC = 0x00000080,
60912 + GR_KILLIPPROC = 0x00000100,
60913 + /* just a placeholder, this mode is only used in userspace */
60914 + GR_NOTROJAN = 0x00000200,
60915 + GR_PROTPROCFD = 0x00000400,
60916 + GR_PROCACCT = 0x00000800,
60917 + GR_RELAXPTRACE = 0x00001000,
60918 + GR_NESTED = 0x00002000,
60919 + GR_INHERITLEARN = 0x00004000,
60920 + GR_PROCFIND = 0x00008000,
60921 + GR_POVERRIDE = 0x00010000,
60922 + GR_KERNELAUTH = 0x00020000,
60923 + GR_ATSECURE = 0x00040000,
60924 + GR_SHMEXEC = 0x00080000
60925 +};
60926 +
60927 +enum {
60928 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
60929 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
60930 + GR_PAX_ENABLE_MPROTECT = 0x0004,
60931 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
60932 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
60933 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
60934 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
60935 + GR_PAX_DISABLE_MPROTECT = 0x0400,
60936 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
60937 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
60938 +};
60939 +
60940 +enum {
60941 + GR_ID_USER = 0x01,
60942 + GR_ID_GROUP = 0x02,
60943 +};
60944 +
60945 +enum {
60946 + GR_ID_ALLOW = 0x01,
60947 + GR_ID_DENY = 0x02,
60948 +};
60949 +
60950 +#define GR_CRASH_RES 31
60951 +#define GR_UIDTABLE_MAX 500
60952 +
60953 +/* begin resource learning section */
60954 +enum {
60955 + GR_RLIM_CPU_BUMP = 60,
60956 + GR_RLIM_FSIZE_BUMP = 50000,
60957 + GR_RLIM_DATA_BUMP = 10000,
60958 + GR_RLIM_STACK_BUMP = 1000,
60959 + GR_RLIM_CORE_BUMP = 10000,
60960 + GR_RLIM_RSS_BUMP = 500000,
60961 + GR_RLIM_NPROC_BUMP = 1,
60962 + GR_RLIM_NOFILE_BUMP = 5,
60963 + GR_RLIM_MEMLOCK_BUMP = 50000,
60964 + GR_RLIM_AS_BUMP = 500000,
60965 + GR_RLIM_LOCKS_BUMP = 2,
60966 + GR_RLIM_SIGPENDING_BUMP = 5,
60967 + GR_RLIM_MSGQUEUE_BUMP = 10000,
60968 + GR_RLIM_NICE_BUMP = 1,
60969 + GR_RLIM_RTPRIO_BUMP = 1,
60970 + GR_RLIM_RTTIME_BUMP = 1000000
60971 +};
60972 +
60973 +#endif
60974 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
60975 new file mode 100644
60976 index 0000000..c9292f7
60977 --- /dev/null
60978 +++ b/include/linux/grinternal.h
60979 @@ -0,0 +1,223 @@
60980 +#ifndef __GRINTERNAL_H
60981 +#define __GRINTERNAL_H
60982 +
60983 +#ifdef CONFIG_GRKERNSEC
60984 +
60985 +#include <linux/fs.h>
60986 +#include <linux/mnt_namespace.h>
60987 +#include <linux/nsproxy.h>
60988 +#include <linux/gracl.h>
60989 +#include <linux/grdefs.h>
60990 +#include <linux/grmsg.h>
60991 +
60992 +void gr_add_learn_entry(const char *fmt, ...)
60993 + __attribute__ ((format (printf, 1, 2)));
60994 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
60995 + const struct vfsmount *mnt);
60996 +__u32 gr_check_create(const struct dentry *new_dentry,
60997 + const struct dentry *parent,
60998 + const struct vfsmount *mnt, const __u32 mode);
60999 +int gr_check_protected_task(const struct task_struct *task);
61000 +__u32 to_gr_audit(const __u32 reqmode);
61001 +int gr_set_acls(const int type);
61002 +int gr_apply_subject_to_task(struct task_struct *task);
61003 +int gr_acl_is_enabled(void);
61004 +char gr_roletype_to_char(void);
61005 +
61006 +void gr_handle_alertkill(struct task_struct *task);
61007 +char *gr_to_filename(const struct dentry *dentry,
61008 + const struct vfsmount *mnt);
61009 +char *gr_to_filename1(const struct dentry *dentry,
61010 + const struct vfsmount *mnt);
61011 +char *gr_to_filename2(const struct dentry *dentry,
61012 + const struct vfsmount *mnt);
61013 +char *gr_to_filename3(const struct dentry *dentry,
61014 + const struct vfsmount *mnt);
61015 +
61016 +extern int grsec_enable_ptrace_readexec;
61017 +extern int grsec_enable_harden_ptrace;
61018 +extern int grsec_enable_link;
61019 +extern int grsec_enable_fifo;
61020 +extern int grsec_enable_execve;
61021 +extern int grsec_enable_shm;
61022 +extern int grsec_enable_execlog;
61023 +extern int grsec_enable_signal;
61024 +extern int grsec_enable_audit_ptrace;
61025 +extern int grsec_enable_forkfail;
61026 +extern int grsec_enable_time;
61027 +extern int grsec_enable_rofs;
61028 +extern int grsec_enable_chroot_shmat;
61029 +extern int grsec_enable_chroot_mount;
61030 +extern int grsec_enable_chroot_double;
61031 +extern int grsec_enable_chroot_pivot;
61032 +extern int grsec_enable_chroot_chdir;
61033 +extern int grsec_enable_chroot_chmod;
61034 +extern int grsec_enable_chroot_mknod;
61035 +extern int grsec_enable_chroot_fchdir;
61036 +extern int grsec_enable_chroot_nice;
61037 +extern int grsec_enable_chroot_execlog;
61038 +extern int grsec_enable_chroot_caps;
61039 +extern int grsec_enable_chroot_sysctl;
61040 +extern int grsec_enable_chroot_unix;
61041 +extern int grsec_enable_symlinkown;
61042 +extern int grsec_symlinkown_gid;
61043 +extern int grsec_enable_tpe;
61044 +extern int grsec_tpe_gid;
61045 +extern int grsec_enable_tpe_all;
61046 +extern int grsec_enable_tpe_invert;
61047 +extern int grsec_enable_socket_all;
61048 +extern int grsec_socket_all_gid;
61049 +extern int grsec_enable_socket_client;
61050 +extern int grsec_socket_client_gid;
61051 +extern int grsec_enable_socket_server;
61052 +extern int grsec_socket_server_gid;
61053 +extern int grsec_audit_gid;
61054 +extern int grsec_enable_group;
61055 +extern int grsec_enable_audit_textrel;
61056 +extern int grsec_enable_log_rwxmaps;
61057 +extern int grsec_enable_mount;
61058 +extern int grsec_enable_chdir;
61059 +extern int grsec_resource_logging;
61060 +extern int grsec_enable_blackhole;
61061 +extern int grsec_lastack_retries;
61062 +extern int grsec_enable_brute;
61063 +extern int grsec_lock;
61064 +
61065 +extern spinlock_t grsec_alert_lock;
61066 +extern unsigned long grsec_alert_wtime;
61067 +extern unsigned long grsec_alert_fyet;
61068 +
61069 +extern spinlock_t grsec_audit_lock;
61070 +
61071 +extern rwlock_t grsec_exec_file_lock;
61072 +
61073 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
61074 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
61075 + (tsk)->exec_file->f_vfsmnt) : "/")
61076 +
61077 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
61078 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
61079 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
61080 +
61081 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
61082 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
61083 + (tsk)->exec_file->f_vfsmnt) : "/")
61084 +
61085 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
61086 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
61087 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
61088 +
61089 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
61090 +
61091 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
61092 +
61093 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
61094 + (task)->pid, (cred)->uid, \
61095 + (cred)->euid, (cred)->gid, (cred)->egid, \
61096 + gr_parent_task_fullpath(task), \
61097 + (task)->real_parent->comm, (task)->real_parent->pid, \
61098 + (pcred)->uid, (pcred)->euid, \
61099 + (pcred)->gid, (pcred)->egid
61100 +
61101 +#define GR_CHROOT_CAPS {{ \
61102 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
61103 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
61104 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
61105 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
61106 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
61107 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
61108 + CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
61109 +
61110 +#define security_learn(normal_msg,args...) \
61111 +({ \
61112 + read_lock(&grsec_exec_file_lock); \
61113 + gr_add_learn_entry(normal_msg "\n", ## args); \
61114 + read_unlock(&grsec_exec_file_lock); \
61115 +})
61116 +
61117 +enum {
61118 + GR_DO_AUDIT,
61119 + GR_DONT_AUDIT,
61120 + /* used for non-audit messages that we shouldn't kill the task on */
61121 + GR_DONT_AUDIT_GOOD
61122 +};
61123 +
61124 +enum {
61125 + GR_TTYSNIFF,
61126 + GR_RBAC,
61127 + GR_RBAC_STR,
61128 + GR_STR_RBAC,
61129 + GR_RBAC_MODE2,
61130 + GR_RBAC_MODE3,
61131 + GR_FILENAME,
61132 + GR_SYSCTL_HIDDEN,
61133 + GR_NOARGS,
61134 + GR_ONE_INT,
61135 + GR_ONE_INT_TWO_STR,
61136 + GR_ONE_STR,
61137 + GR_STR_INT,
61138 + GR_TWO_STR_INT,
61139 + GR_TWO_INT,
61140 + GR_TWO_U64,
61141 + GR_THREE_INT,
61142 + GR_FIVE_INT_TWO_STR,
61143 + GR_TWO_STR,
61144 + GR_THREE_STR,
61145 + GR_FOUR_STR,
61146 + GR_STR_FILENAME,
61147 + GR_FILENAME_STR,
61148 + GR_FILENAME_TWO_INT,
61149 + GR_FILENAME_TWO_INT_STR,
61150 + GR_TEXTREL,
61151 + GR_PTRACE,
61152 + GR_RESOURCE,
61153 + GR_CAP,
61154 + GR_SIG,
61155 + GR_SIG2,
61156 + GR_CRASH1,
61157 + GR_CRASH2,
61158 + GR_PSACCT,
61159 + GR_RWXMAP
61160 +};
61161 +
61162 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
61163 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
61164 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
61165 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
61166 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
61167 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
61168 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
61169 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
61170 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
61171 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
61172 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
61173 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
61174 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
61175 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
61176 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
61177 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
61178 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
61179 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
61180 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
61181 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
61182 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
61183 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
61184 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
61185 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
61186 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
61187 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
61188 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
61189 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
61190 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
61191 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
61192 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
61193 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
61194 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
61195 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
61196 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
61197 +
61198 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
61199 +
61200 +#endif
61201 +
61202 +#endif
61203 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
61204 new file mode 100644
61205 index 0000000..54f4e85
61206 --- /dev/null
61207 +++ b/include/linux/grmsg.h
61208 @@ -0,0 +1,110 @@
61209 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
61210 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
61211 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
61212 +#define GR_STOPMOD_MSG "denied modification of module state by "
61213 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
61214 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
61215 +#define GR_IOPERM_MSG "denied use of ioperm() by "
61216 +#define GR_IOPL_MSG "denied use of iopl() by "
61217 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
61218 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
61219 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
61220 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
61221 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
61222 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
61223 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
61224 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
61225 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
61226 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
61227 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
61228 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
61229 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
61230 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
61231 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
61232 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
61233 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
61234 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
61235 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
61236 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
61237 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
61238 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
61239 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
61240 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
61241 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
61242 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
61243 +#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
61244 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
61245 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
61246 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
61247 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
61248 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
61249 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
61250 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
61251 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
61252 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
61253 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
61254 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
61255 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
61256 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
61257 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
61258 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
61259 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
61260 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
61261 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
61262 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
61263 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
61264 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
61265 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
61266 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
61267 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
61268 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
61269 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
61270 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
61271 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
61272 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
61273 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
61274 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
61275 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
61276 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
61277 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
61278 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
61279 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
61280 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
61281 +#define GR_NICE_CHROOT_MSG "denied priority change by "
61282 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
61283 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
61284 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
61285 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
61286 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
61287 +#define GR_TIME_MSG "time set by "
61288 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
61289 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
61290 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
61291 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
61292 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
61293 +#define GR_BIND_MSG "denied bind() by "
61294 +#define GR_CONNECT_MSG "denied connect() by "
61295 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
61296 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
61297 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
61298 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
61299 +#define GR_CAP_ACL_MSG "use of %s denied for "
61300 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
61301 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
61302 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
61303 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
61304 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
61305 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
61306 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
61307 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
61308 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
61309 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
61310 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
61311 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
61312 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
61313 +#define GR_VM86_MSG "denied use of vm86 by "
61314 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
61315 +#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
61316 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
61317 +#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
61318 +#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
61319 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
61320 new file mode 100644
61321 index 0000000..38bfb04
61322 --- /dev/null
61323 +++ b/include/linux/grsecurity.h
61324 @@ -0,0 +1,233 @@
61325 +#ifndef GR_SECURITY_H
61326 +#define GR_SECURITY_H
61327 +#include <linux/fs.h>
61328 +#include <linux/fs_struct.h>
61329 +#include <linux/binfmts.h>
61330 +#include <linux/gracl.h>
61331 +
61332 +/* notify of brain-dead configs */
61333 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61334 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
61335 +#endif
61336 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
61337 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
61338 +#endif
61339 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
61340 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
61341 +#endif
61342 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
61343 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
61344 +#endif
61345 +
61346 +#include <linux/compat.h>
61347 +
61348 +struct user_arg_ptr {
61349 +#ifdef CONFIG_COMPAT
61350 + bool is_compat;
61351 +#endif
61352 + union {
61353 + const char __user *const __user *native;
61354 +#ifdef CONFIG_COMPAT
61355 + compat_uptr_t __user *compat;
61356 +#endif
61357 + } ptr;
61358 +};
61359 +
61360 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
61361 +void gr_handle_brute_check(void);
61362 +void gr_handle_kernel_exploit(void);
61363 +int gr_process_user_ban(void);
61364 +
61365 +char gr_roletype_to_char(void);
61366 +
61367 +int gr_acl_enable_at_secure(void);
61368 +
61369 +int gr_check_user_change(int real, int effective, int fs);
61370 +int gr_check_group_change(int real, int effective, int fs);
61371 +
61372 +void gr_del_task_from_ip_table(struct task_struct *p);
61373 +
61374 +int gr_pid_is_chrooted(struct task_struct *p);
61375 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
61376 +int gr_handle_chroot_nice(void);
61377 +int gr_handle_chroot_sysctl(const int op);
61378 +int gr_handle_chroot_setpriority(struct task_struct *p,
61379 + const int niceval);
61380 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
61381 +int gr_handle_chroot_chroot(const struct dentry *dentry,
61382 + const struct vfsmount *mnt);
61383 +void gr_handle_chroot_chdir(struct path *path);
61384 +int gr_handle_chroot_chmod(const struct dentry *dentry,
61385 + const struct vfsmount *mnt, const int mode);
61386 +int gr_handle_chroot_mknod(const struct dentry *dentry,
61387 + const struct vfsmount *mnt, const int mode);
61388 +int gr_handle_chroot_mount(const struct dentry *dentry,
61389 + const struct vfsmount *mnt,
61390 + const char *dev_name);
61391 +int gr_handle_chroot_pivot(void);
61392 +int gr_handle_chroot_unix(const pid_t pid);
61393 +
61394 +int gr_handle_rawio(const struct inode *inode);
61395 +
61396 +void gr_handle_ioperm(void);
61397 +void gr_handle_iopl(void);
61398 +
61399 +umode_t gr_acl_umask(void);
61400 +
61401 +int gr_tpe_allow(const struct file *file);
61402 +
61403 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
61404 +void gr_clear_chroot_entries(struct task_struct *task);
61405 +
61406 +void gr_log_forkfail(const int retval);
61407 +void gr_log_timechange(void);
61408 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
61409 +void gr_log_chdir(const struct dentry *dentry,
61410 + const struct vfsmount *mnt);
61411 +void gr_log_chroot_exec(const struct dentry *dentry,
61412 + const struct vfsmount *mnt);
61413 +void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
61414 +void gr_log_remount(const char *devname, const int retval);
61415 +void gr_log_unmount(const char *devname, const int retval);
61416 +void gr_log_mount(const char *from, const char *to, const int retval);
61417 +void gr_log_textrel(struct vm_area_struct *vma);
61418 +void gr_log_rwxmmap(struct file *file);
61419 +void gr_log_rwxmprotect(struct file *file);
61420 +
61421 +int gr_handle_follow_link(const struct inode *parent,
61422 + const struct inode *inode,
61423 + const struct dentry *dentry,
61424 + const struct vfsmount *mnt);
61425 +int gr_handle_fifo(const struct dentry *dentry,
61426 + const struct vfsmount *mnt,
61427 + const struct dentry *dir, const int flag,
61428 + const int acc_mode);
61429 +int gr_handle_hardlink(const struct dentry *dentry,
61430 + const struct vfsmount *mnt,
61431 + struct inode *inode,
61432 + const int mode, const char *to);
61433 +
61434 +int gr_is_capable(const int cap);
61435 +int gr_is_capable_nolog(const int cap);
61436 +int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
61437 +int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
61438 +
61439 +void gr_learn_resource(const struct task_struct *task, const int limit,
61440 + const unsigned long wanted, const int gt);
61441 +void gr_copy_label(struct task_struct *tsk);
61442 +void gr_handle_crash(struct task_struct *task, const int sig);
61443 +int gr_handle_signal(const struct task_struct *p, const int sig);
61444 +int gr_check_crash_uid(const uid_t uid);
61445 +int gr_check_protected_task(const struct task_struct *task);
61446 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
61447 +int gr_acl_handle_mmap(const struct file *file,
61448 + const unsigned long prot);
61449 +int gr_acl_handle_mprotect(const struct file *file,
61450 + const unsigned long prot);
61451 +int gr_check_hidden_task(const struct task_struct *tsk);
61452 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
61453 + const struct vfsmount *mnt);
61454 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
61455 + const struct vfsmount *mnt);
61456 +__u32 gr_acl_handle_access(const struct dentry *dentry,
61457 + const struct vfsmount *mnt, const int fmode);
61458 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
61459 + const struct vfsmount *mnt, umode_t *mode);
61460 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
61461 + const struct vfsmount *mnt);
61462 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
61463 + const struct vfsmount *mnt);
61464 +int gr_handle_ptrace(struct task_struct *task, const long request);
61465 +int gr_handle_proc_ptrace(struct task_struct *task);
61466 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
61467 + const struct vfsmount *mnt);
61468 +int gr_check_crash_exec(const struct file *filp);
61469 +int gr_acl_is_enabled(void);
61470 +void gr_set_kernel_label(struct task_struct *task);
61471 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
61472 + const gid_t gid);
61473 +int gr_set_proc_label(const struct dentry *dentry,
61474 + const struct vfsmount *mnt,
61475 + const int unsafe_flags);
61476 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
61477 + const struct vfsmount *mnt);
61478 +__u32 gr_acl_handle_open(const struct dentry *dentry,
61479 + const struct vfsmount *mnt, int acc_mode);
61480 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
61481 + const struct dentry *p_dentry,
61482 + const struct vfsmount *p_mnt,
61483 + int open_flags, int acc_mode, const int imode);
61484 +void gr_handle_create(const struct dentry *dentry,
61485 + const struct vfsmount *mnt);
61486 +void gr_handle_proc_create(const struct dentry *dentry,
61487 + const struct inode *inode);
61488 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
61489 + const struct dentry *parent_dentry,
61490 + const struct vfsmount *parent_mnt,
61491 + const int mode);
61492 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
61493 + const struct dentry *parent_dentry,
61494 + const struct vfsmount *parent_mnt);
61495 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
61496 + const struct vfsmount *mnt);
61497 +void gr_handle_delete(const ino_t ino, const dev_t dev);
61498 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
61499 + const struct vfsmount *mnt);
61500 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
61501 + const struct dentry *parent_dentry,
61502 + const struct vfsmount *parent_mnt,
61503 + const char *from);
61504 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
61505 + const struct dentry *parent_dentry,
61506 + const struct vfsmount *parent_mnt,
61507 + const struct dentry *old_dentry,
61508 + const struct vfsmount *old_mnt, const char *to);
61509 +int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
61510 +int gr_acl_handle_rename(struct dentry *new_dentry,
61511 + struct dentry *parent_dentry,
61512 + const struct vfsmount *parent_mnt,
61513 + struct dentry *old_dentry,
61514 + struct inode *old_parent_inode,
61515 + struct vfsmount *old_mnt, const char *newname);
61516 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
61517 + struct dentry *old_dentry,
61518 + struct dentry *new_dentry,
61519 + struct vfsmount *mnt, const __u8 replace);
61520 +__u32 gr_check_link(const struct dentry *new_dentry,
61521 + const struct dentry *parent_dentry,
61522 + const struct vfsmount *parent_mnt,
61523 + const struct dentry *old_dentry,
61524 + const struct vfsmount *old_mnt);
61525 +int gr_acl_handle_filldir(const struct file *file, const char *name,
61526 + const unsigned int namelen, const ino_t ino);
61527 +
61528 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
61529 + const struct vfsmount *mnt);
61530 +void gr_acl_handle_exit(void);
61531 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
61532 +int gr_acl_handle_procpidmem(const struct task_struct *task);
61533 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
61534 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
61535 +void gr_audit_ptrace(struct task_struct *task);
61536 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
61537 +
61538 +int gr_ptrace_readexec(struct file *file, int unsafe_flags);
61539 +
61540 +#ifdef CONFIG_GRKERNSEC
61541 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
61542 +void gr_handle_vm86(void);
61543 +void gr_handle_mem_readwrite(u64 from, u64 to);
61544 +
61545 +void gr_log_badprocpid(const char *entry);
61546 +
61547 +extern int grsec_enable_dmesg;
61548 +extern int grsec_disable_privio;
61549 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
61550 +extern int grsec_enable_chroot_findtask;
61551 +#endif
61552 +#ifdef CONFIG_GRKERNSEC_SETXID
61553 +extern int grsec_enable_setxid;
61554 +#endif
61555 +#endif
61556 +
61557 +#endif
61558 diff --git a/include/linux/grsock.h b/include/linux/grsock.h
61559 new file mode 100644
61560 index 0000000..e7ffaaf
61561 --- /dev/null
61562 +++ b/include/linux/grsock.h
61563 @@ -0,0 +1,19 @@
61564 +#ifndef __GRSOCK_H
61565 +#define __GRSOCK_H
61566 +
61567 +extern void gr_attach_curr_ip(const struct sock *sk);
61568 +extern int gr_handle_sock_all(const int family, const int type,
61569 + const int protocol);
61570 +extern int gr_handle_sock_server(const struct sockaddr *sck);
61571 +extern int gr_handle_sock_server_other(const struct sock *sck);
61572 +extern int gr_handle_sock_client(const struct sockaddr *sck);
61573 +extern int gr_search_connect(struct socket * sock,
61574 + struct sockaddr_in * addr);
61575 +extern int gr_search_bind(struct socket * sock,
61576 + struct sockaddr_in * addr);
61577 +extern int gr_search_listen(struct socket * sock);
61578 +extern int gr_search_accept(struct socket * sock);
61579 +extern int gr_search_socket(const int domain, const int type,
61580 + const int protocol);
61581 +
61582 +#endif
61583 diff --git a/include/linux/hid.h b/include/linux/hid.h
61584 index 3a95da6..51986f1 100644
61585 --- a/include/linux/hid.h
61586 +++ b/include/linux/hid.h
61587 @@ -696,7 +696,7 @@ struct hid_ll_driver {
61588 unsigned int code, int value);
61589
61590 int (*parse)(struct hid_device *hdev);
61591 -};
61592 +} __no_const;
61593
61594 #define PM_HINT_FULLON 1<<5
61595 #define PM_HINT_NORMAL 1<<1
61596 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
61597 index d3999b4..1304cb4 100644
61598 --- a/include/linux/highmem.h
61599 +++ b/include/linux/highmem.h
61600 @@ -221,6 +221,18 @@ static inline void clear_highpage(struct page *page)
61601 kunmap_atomic(kaddr);
61602 }
61603
61604 +static inline void sanitize_highpage(struct page *page)
61605 +{
61606 + void *kaddr;
61607 + unsigned long flags;
61608 +
61609 + local_irq_save(flags);
61610 + kaddr = kmap_atomic(page);
61611 + clear_page(kaddr);
61612 + kunmap_atomic(kaddr);
61613 + local_irq_restore(flags);
61614 +}
61615 +
61616 static inline void zero_user_segments(struct page *page,
61617 unsigned start1, unsigned end1,
61618 unsigned start2, unsigned end2)
61619 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
61620 index 195d8b3..e20cfab 100644
61621 --- a/include/linux/i2c.h
61622 +++ b/include/linux/i2c.h
61623 @@ -365,6 +365,7 @@ struct i2c_algorithm {
61624 /* To determine what the adapter supports */
61625 u32 (*functionality) (struct i2c_adapter *);
61626 };
61627 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
61628
61629 /*
61630 * i2c_adapter is the structure used to identify a physical i2c bus along
61631 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
61632 index d23c3c2..eb63c81 100644
61633 --- a/include/linux/i2o.h
61634 +++ b/include/linux/i2o.h
61635 @@ -565,7 +565,7 @@ struct i2o_controller {
61636 struct i2o_device *exec; /* Executive */
61637 #if BITS_PER_LONG == 64
61638 spinlock_t context_list_lock; /* lock for context_list */
61639 - atomic_t context_list_counter; /* needed for unique contexts */
61640 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
61641 struct list_head context_list; /* list of context id's
61642 and pointers */
61643 #endif
61644 diff --git a/include/linux/if_team.h b/include/linux/if_team.h
61645 index 58404b0..439ed95 100644
61646 --- a/include/linux/if_team.h
61647 +++ b/include/linux/if_team.h
61648 @@ -64,6 +64,7 @@ struct team_mode_ops {
61649 void (*port_leave)(struct team *team, struct team_port *port);
61650 void (*port_change_mac)(struct team *team, struct team_port *port);
61651 };
61652 +typedef struct team_mode_ops __no_const team_mode_ops_no_const;
61653
61654 enum team_option_type {
61655 TEAM_OPTION_TYPE_U32,
61656 @@ -112,7 +113,7 @@ struct team {
61657 struct list_head option_list;
61658
61659 const struct team_mode *mode;
61660 - struct team_mode_ops ops;
61661 + team_mode_ops_no_const ops;
61662 long mode_priv[TEAM_MODE_PRIV_LONGS];
61663 };
61664
61665 diff --git a/include/linux/init.h b/include/linux/init.h
61666 index 6b95109..4aca62c 100644
61667 --- a/include/linux/init.h
61668 +++ b/include/linux/init.h
61669 @@ -294,13 +294,13 @@ void __init parse_early_options(char *cmdline);
61670
61671 /* Each module must use one module_init(). */
61672 #define module_init(initfn) \
61673 - static inline initcall_t __inittest(void) \
61674 + static inline __used initcall_t __inittest(void) \
61675 { return initfn; } \
61676 int init_module(void) __attribute__((alias(#initfn)));
61677
61678 /* This is only required if you want to be unloadable. */
61679 #define module_exit(exitfn) \
61680 - static inline exitcall_t __exittest(void) \
61681 + static inline __used exitcall_t __exittest(void) \
61682 { return exitfn; } \
61683 void cleanup_module(void) __attribute__((alias(#exitfn)));
61684
61685 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
61686 index e4baff5..83bb175 100644
61687 --- a/include/linux/init_task.h
61688 +++ b/include/linux/init_task.h
61689 @@ -134,6 +134,12 @@ extern struct cred init_cred;
61690
61691 #define INIT_TASK_COMM "swapper"
61692
61693 +#ifdef CONFIG_X86
61694 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
61695 +#else
61696 +#define INIT_TASK_THREAD_INFO
61697 +#endif
61698 +
61699 /*
61700 * INIT_TASK is used to set up the first task table, touch at
61701 * your own risk!. Base=0, limit=0x1fffff (=2MB)
61702 @@ -172,6 +178,7 @@ extern struct cred init_cred;
61703 RCU_INIT_POINTER(.cred, &init_cred), \
61704 .comm = INIT_TASK_COMM, \
61705 .thread = INIT_THREAD, \
61706 + INIT_TASK_THREAD_INFO \
61707 .fs = &init_fs, \
61708 .files = &init_files, \
61709 .signal = &init_signals, \
61710 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
61711 index e6ca56d..8583707 100644
61712 --- a/include/linux/intel-iommu.h
61713 +++ b/include/linux/intel-iommu.h
61714 @@ -296,7 +296,7 @@ struct iommu_flush {
61715 u8 fm, u64 type);
61716 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
61717 unsigned int size_order, u64 type);
61718 -};
61719 +} __no_const;
61720
61721 enum {
61722 SR_DMAR_FECTL_REG,
61723 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
61724 index 2aea5d2..0b82f0c 100644
61725 --- a/include/linux/interrupt.h
61726 +++ b/include/linux/interrupt.h
61727 @@ -439,7 +439,7 @@ enum
61728 /* map softirq index to softirq name. update 'softirq_to_name' in
61729 * kernel/softirq.c when adding a new softirq.
61730 */
61731 -extern char *softirq_to_name[NR_SOFTIRQS];
61732 +extern const char * const softirq_to_name[NR_SOFTIRQS];
61733
61734 /* softirq mask and active fields moved to irq_cpustat_t in
61735 * asm/hardirq.h to get better cache usage. KAO
61736 @@ -447,12 +447,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
61737
61738 struct softirq_action
61739 {
61740 - void (*action)(struct softirq_action *);
61741 + void (*action)(void);
61742 };
61743
61744 asmlinkage void do_softirq(void);
61745 asmlinkage void __do_softirq(void);
61746 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
61747 +extern void open_softirq(int nr, void (*action)(void));
61748 extern void softirq_init(void);
61749 extern void __raise_softirq_irqoff(unsigned int nr);
61750
61751 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
61752 index 3875719..4cd454c 100644
61753 --- a/include/linux/kallsyms.h
61754 +++ b/include/linux/kallsyms.h
61755 @@ -15,7 +15,8 @@
61756
61757 struct module;
61758
61759 -#ifdef CONFIG_KALLSYMS
61760 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
61761 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
61762 /* Lookup the address for a symbol. Returns 0 if not found. */
61763 unsigned long kallsyms_lookup_name(const char *name);
61764
61765 @@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
61766 /* Stupid that this does nothing, but I didn't create this mess. */
61767 #define __print_symbol(fmt, addr)
61768 #endif /*CONFIG_KALLSYMS*/
61769 +#else /* when included by kallsyms.c, vsnprintf.c, or
61770 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
61771 +extern void __print_symbol(const char *fmt, unsigned long address);
61772 +extern int sprint_backtrace(char *buffer, unsigned long address);
61773 +extern int sprint_symbol(char *buffer, unsigned long address);
61774 +const char *kallsyms_lookup(unsigned long addr,
61775 + unsigned long *symbolsize,
61776 + unsigned long *offset,
61777 + char **modname, char *namebuf);
61778 +#endif
61779
61780 /* This macro allows us to keep printk typechecking */
61781 static __printf(1, 2)
61782 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
61783 index c4d2fc1..5df9c19 100644
61784 --- a/include/linux/kgdb.h
61785 +++ b/include/linux/kgdb.h
61786 @@ -53,7 +53,7 @@ extern int kgdb_connected;
61787 extern int kgdb_io_module_registered;
61788
61789 extern atomic_t kgdb_setting_breakpoint;
61790 -extern atomic_t kgdb_cpu_doing_single_step;
61791 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
61792
61793 extern struct task_struct *kgdb_usethread;
61794 extern struct task_struct *kgdb_contthread;
61795 @@ -252,7 +252,7 @@ struct kgdb_arch {
61796 void (*disable_hw_break)(struct pt_regs *regs);
61797 void (*remove_all_hw_break)(void);
61798 void (*correct_hw_break)(void);
61799 -};
61800 +} __do_const;
61801
61802 /**
61803 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
61804 @@ -277,7 +277,7 @@ struct kgdb_io {
61805 void (*pre_exception) (void);
61806 void (*post_exception) (void);
61807 int is_console;
61808 -};
61809 +} __do_const;
61810
61811 extern struct kgdb_arch arch_kgdb_ops;
61812
61813 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
61814 index dd99c32..da06047 100644
61815 --- a/include/linux/kmod.h
61816 +++ b/include/linux/kmod.h
61817 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
61818 * usually useless though. */
61819 extern __printf(2, 3)
61820 int __request_module(bool wait, const char *name, ...);
61821 +extern __printf(3, 4)
61822 +int ___request_module(bool wait, char *param_name, const char *name, ...);
61823 #define request_module(mod...) __request_module(true, mod)
61824 #define request_module_nowait(mod...) __request_module(false, mod)
61825 #define try_then_request_module(x, mod...) \
61826 diff --git a/include/linux/kref.h b/include/linux/kref.h
61827 index 9c07dce..a92fa71 100644
61828 --- a/include/linux/kref.h
61829 +++ b/include/linux/kref.h
61830 @@ -63,7 +63,7 @@ static inline void kref_get(struct kref *kref)
61831 static inline int kref_sub(struct kref *kref, unsigned int count,
61832 void (*release)(struct kref *kref))
61833 {
61834 - WARN_ON(release == NULL);
61835 + BUG_ON(release == NULL);
61836
61837 if (atomic_sub_and_test((int) count, &kref->refcount)) {
61838 release(kref);
61839 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
61840 index 72cbf08..dd0201d 100644
61841 --- a/include/linux/kvm_host.h
61842 +++ b/include/linux/kvm_host.h
61843 @@ -322,7 +322,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
61844 void vcpu_load(struct kvm_vcpu *vcpu);
61845 void vcpu_put(struct kvm_vcpu *vcpu);
61846
61847 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
61848 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
61849 struct module *module);
61850 void kvm_exit(void);
61851
61852 @@ -486,7 +486,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
61853 struct kvm_guest_debug *dbg);
61854 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
61855
61856 -int kvm_arch_init(void *opaque);
61857 +int kvm_arch_init(const void *opaque);
61858 void kvm_arch_exit(void);
61859
61860 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
61861 diff --git a/include/linux/libata.h b/include/linux/libata.h
61862 index 6e887c7..4539601 100644
61863 --- a/include/linux/libata.h
61864 +++ b/include/linux/libata.h
61865 @@ -910,7 +910,7 @@ struct ata_port_operations {
61866 * fields must be pointers.
61867 */
61868 const struct ata_port_operations *inherits;
61869 -};
61870 +} __do_const;
61871
61872 struct ata_port_info {
61873 unsigned long flags;
61874 diff --git a/include/linux/mca.h b/include/linux/mca.h
61875 index 3797270..7765ede 100644
61876 --- a/include/linux/mca.h
61877 +++ b/include/linux/mca.h
61878 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
61879 int region);
61880 void * (*mca_transform_memory)(struct mca_device *,
61881 void *memory);
61882 -};
61883 +} __no_const;
61884
61885 struct mca_bus {
61886 u64 default_dma_mask;
61887 diff --git a/include/linux/memory.h b/include/linux/memory.h
61888 index 1ac7f6e..a5794d0 100644
61889 --- a/include/linux/memory.h
61890 +++ b/include/linux/memory.h
61891 @@ -143,7 +143,7 @@ struct memory_accessor {
61892 size_t count);
61893 ssize_t (*write)(struct memory_accessor *, const char *buf,
61894 off_t offset, size_t count);
61895 -};
61896 +} __no_const;
61897
61898 /*
61899 * Kernel text modification mutex, used for code patching. Users of this lock
61900 diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
61901 index ee96cd5..7823c3a 100644
61902 --- a/include/linux/mfd/abx500.h
61903 +++ b/include/linux/mfd/abx500.h
61904 @@ -455,6 +455,7 @@ struct abx500_ops {
61905 int (*event_registers_startup_state_get) (struct device *, u8 *);
61906 int (*startup_irq_enabled) (struct device *, unsigned int);
61907 };
61908 +typedef struct abx500_ops __no_const abx500_ops_no_const;
61909
61910 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
61911 void abx500_remove_ops(struct device *dev);
61912 diff --git a/include/linux/mfd/abx500/ux500_chargalg.h b/include/linux/mfd/abx500/ux500_chargalg.h
61913 index 9b07725..3d55001 100644
61914 --- a/include/linux/mfd/abx500/ux500_chargalg.h
61915 +++ b/include/linux/mfd/abx500/ux500_chargalg.h
61916 @@ -19,7 +19,7 @@ struct ux500_charger_ops {
61917 int (*enable) (struct ux500_charger *, int, int, int);
61918 int (*kick_wd) (struct ux500_charger *);
61919 int (*update_curr) (struct ux500_charger *, int);
61920 -};
61921 +} __no_const;
61922
61923 /**
61924 * struct ux500_charger - power supply ux500 charger sub class
61925 diff --git a/include/linux/mm.h b/include/linux/mm.h
61926 index 74aa71b..4ae97ba 100644
61927 --- a/include/linux/mm.h
61928 +++ b/include/linux/mm.h
61929 @@ -116,7 +116,14 @@ extern unsigned int kobjsize(const void *objp);
61930
61931 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
61932 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
61933 +
61934 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
61935 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
61936 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
61937 +#else
61938 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
61939 +#endif
61940 +
61941 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
61942 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
61943
61944 @@ -1013,34 +1020,6 @@ int set_page_dirty(struct page *page);
61945 int set_page_dirty_lock(struct page *page);
61946 int clear_page_dirty_for_io(struct page *page);
61947
61948 -/* Is the vma a continuation of the stack vma above it? */
61949 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
61950 -{
61951 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
61952 -}
61953 -
61954 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
61955 - unsigned long addr)
61956 -{
61957 - return (vma->vm_flags & VM_GROWSDOWN) &&
61958 - (vma->vm_start == addr) &&
61959 - !vma_growsdown(vma->vm_prev, addr);
61960 -}
61961 -
61962 -/* Is the vma a continuation of the stack vma below it? */
61963 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
61964 -{
61965 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
61966 -}
61967 -
61968 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
61969 - unsigned long addr)
61970 -{
61971 - return (vma->vm_flags & VM_GROWSUP) &&
61972 - (vma->vm_end == addr) &&
61973 - !vma_growsup(vma->vm_next, addr);
61974 -}
61975 -
61976 extern pid_t
61977 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
61978
61979 @@ -1139,6 +1118,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
61980 }
61981 #endif
61982
61983 +#ifdef CONFIG_MMU
61984 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
61985 +#else
61986 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
61987 +{
61988 + return __pgprot(0);
61989 +}
61990 +#endif
61991 +
61992 int vma_wants_writenotify(struct vm_area_struct *vma);
61993
61994 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
61995 @@ -1157,8 +1145,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
61996 {
61997 return 0;
61998 }
61999 +
62000 +static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
62001 + unsigned long address)
62002 +{
62003 + return 0;
62004 +}
62005 #else
62006 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
62007 +int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
62008 #endif
62009
62010 #ifdef __PAGETABLE_PMD_FOLDED
62011 @@ -1167,8 +1162,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
62012 {
62013 return 0;
62014 }
62015 +
62016 +static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
62017 + unsigned long address)
62018 +{
62019 + return 0;
62020 +}
62021 #else
62022 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
62023 +int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
62024 #endif
62025
62026 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
62027 @@ -1186,11 +1188,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
62028 NULL: pud_offset(pgd, address);
62029 }
62030
62031 +static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
62032 +{
62033 + return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
62034 + NULL: pud_offset(pgd, address);
62035 +}
62036 +
62037 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
62038 {
62039 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
62040 NULL: pmd_offset(pud, address);
62041 }
62042 +
62043 +static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
62044 +{
62045 + return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
62046 + NULL: pmd_offset(pud, address);
62047 +}
62048 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
62049
62050 #if USE_SPLIT_PTLOCKS
62051 @@ -1400,6 +1414,7 @@ extern unsigned long do_mmap(struct file *, unsigned long,
62052 unsigned long, unsigned long,
62053 unsigned long, unsigned long);
62054 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
62055 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
62056
62057 /* These take the mm semaphore themselves */
62058 extern unsigned long vm_brk(unsigned long, unsigned long);
62059 @@ -1462,6 +1477,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
62060 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
62061 struct vm_area_struct **pprev);
62062
62063 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
62064 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
62065 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
62066 +
62067 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
62068 NULL if none. Assume start_addr < end_addr. */
62069 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
62070 @@ -1490,15 +1509,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
62071 return vma;
62072 }
62073
62074 -#ifdef CONFIG_MMU
62075 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
62076 -#else
62077 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
62078 -{
62079 - return __pgprot(0);
62080 -}
62081 -#endif
62082 -
62083 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
62084 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
62085 unsigned long pfn, unsigned long size, pgprot_t);
62086 @@ -1602,7 +1612,7 @@ extern int unpoison_memory(unsigned long pfn);
62087 extern int sysctl_memory_failure_early_kill;
62088 extern int sysctl_memory_failure_recovery;
62089 extern void shake_page(struct page *p, int access);
62090 -extern atomic_long_t mce_bad_pages;
62091 +extern atomic_long_unchecked_t mce_bad_pages;
62092 extern int soft_offline_page(struct page *page, int flags);
62093
62094 extern void dump_page(struct page *page);
62095 @@ -1633,5 +1643,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
62096 static inline bool page_is_guard(struct page *page) { return false; }
62097 #endif /* CONFIG_DEBUG_PAGEALLOC */
62098
62099 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
62100 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
62101 +#else
62102 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
62103 +#endif
62104 +
62105 #endif /* __KERNEL__ */
62106 #endif /* _LINUX_MM_H */
62107 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
62108 index b35752f..41075a0 100644
62109 --- a/include/linux/mm_types.h
62110 +++ b/include/linux/mm_types.h
62111 @@ -262,6 +262,8 @@ struct vm_area_struct {
62112 #ifdef CONFIG_NUMA
62113 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
62114 #endif
62115 +
62116 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
62117 };
62118
62119 struct core_thread {
62120 @@ -336,7 +338,7 @@ struct mm_struct {
62121 unsigned long def_flags;
62122 unsigned long nr_ptes; /* Page table pages */
62123 unsigned long start_code, end_code, start_data, end_data;
62124 - unsigned long start_brk, brk, start_stack;
62125 + unsigned long brk_gap, start_brk, brk, start_stack;
62126 unsigned long arg_start, arg_end, env_start, env_end;
62127
62128 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
62129 @@ -398,6 +400,24 @@ struct mm_struct {
62130 #ifdef CONFIG_CPUMASK_OFFSTACK
62131 struct cpumask cpumask_allocation;
62132 #endif
62133 +
62134 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS) || defined(CONFIG_PAX_HAVE_ACL_FLAGS) || defined(CONFIG_PAX_HOOK_ACL_FLAGS)
62135 + unsigned long pax_flags;
62136 +#endif
62137 +
62138 +#ifdef CONFIG_PAX_DLRESOLVE
62139 + unsigned long call_dl_resolve;
62140 +#endif
62141 +
62142 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
62143 + unsigned long call_syscall;
62144 +#endif
62145 +
62146 +#ifdef CONFIG_PAX_ASLR
62147 + unsigned long delta_mmap; /* randomized offset */
62148 + unsigned long delta_stack; /* randomized offset */
62149 +#endif
62150 +
62151 };
62152
62153 static inline void mm_init_cpumask(struct mm_struct *mm)
62154 diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
62155 index 1d1b1e1..2a13c78 100644
62156 --- a/include/linux/mmu_notifier.h
62157 +++ b/include/linux/mmu_notifier.h
62158 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
62159 */
62160 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
62161 ({ \
62162 - pte_t __pte; \
62163 + pte_t ___pte; \
62164 struct vm_area_struct *___vma = __vma; \
62165 unsigned long ___address = __address; \
62166 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
62167 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
62168 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
62169 - __pte; \
62170 + ___pte; \
62171 })
62172
62173 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
62174 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
62175 index 5f6806b..49db2b2 100644
62176 --- a/include/linux/mmzone.h
62177 +++ b/include/linux/mmzone.h
62178 @@ -380,7 +380,7 @@ struct zone {
62179 unsigned long flags; /* zone flags, see below */
62180
62181 /* Zone statistics */
62182 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
62183 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
62184
62185 /*
62186 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
62187 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
62188 index 501da4c..ba79bb4 100644
62189 --- a/include/linux/mod_devicetable.h
62190 +++ b/include/linux/mod_devicetable.h
62191 @@ -12,7 +12,7 @@
62192 typedef unsigned long kernel_ulong_t;
62193 #endif
62194
62195 -#define PCI_ANY_ID (~0)
62196 +#define PCI_ANY_ID ((__u16)~0)
62197
62198 struct pci_device_id {
62199 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
62200 @@ -131,7 +131,7 @@ struct usb_device_id {
62201 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
62202 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
62203
62204 -#define HID_ANY_ID (~0)
62205 +#define HID_ANY_ID (~0U)
62206
62207 struct hid_device_id {
62208 __u16 bus;
62209 diff --git a/include/linux/module.h b/include/linux/module.h
62210 index fbcafe2..e5d9587 100644
62211 --- a/include/linux/module.h
62212 +++ b/include/linux/module.h
62213 @@ -17,6 +17,7 @@
62214 #include <linux/moduleparam.h>
62215 #include <linux/tracepoint.h>
62216 #include <linux/export.h>
62217 +#include <linux/fs.h>
62218
62219 #include <linux/percpu.h>
62220 #include <asm/module.h>
62221 @@ -273,19 +274,16 @@ struct module
62222 int (*init)(void);
62223
62224 /* If this is non-NULL, vfree after init() returns */
62225 - void *module_init;
62226 + void *module_init_rx, *module_init_rw;
62227
62228 /* Here is the actual code + data, vfree'd on unload. */
62229 - void *module_core;
62230 + void *module_core_rx, *module_core_rw;
62231
62232 /* Here are the sizes of the init and core sections */
62233 - unsigned int init_size, core_size;
62234 + unsigned int init_size_rw, core_size_rw;
62235
62236 /* The size of the executable code in each section. */
62237 - unsigned int init_text_size, core_text_size;
62238 -
62239 - /* Size of RO sections of the module (text+rodata) */
62240 - unsigned int init_ro_size, core_ro_size;
62241 + unsigned int init_size_rx, core_size_rx;
62242
62243 /* Arch-specific module values */
62244 struct mod_arch_specific arch;
62245 @@ -341,6 +339,10 @@ struct module
62246 #ifdef CONFIG_EVENT_TRACING
62247 struct ftrace_event_call **trace_events;
62248 unsigned int num_trace_events;
62249 + struct file_operations trace_id;
62250 + struct file_operations trace_enable;
62251 + struct file_operations trace_format;
62252 + struct file_operations trace_filter;
62253 #endif
62254 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
62255 unsigned int num_ftrace_callsites;
62256 @@ -388,16 +390,46 @@ bool is_module_address(unsigned long addr);
62257 bool is_module_percpu_address(unsigned long addr);
62258 bool is_module_text_address(unsigned long addr);
62259
62260 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
62261 +{
62262 +
62263 +#ifdef CONFIG_PAX_KERNEXEC
62264 + if (ktla_ktva(addr) >= (unsigned long)start &&
62265 + ktla_ktva(addr) < (unsigned long)start + size)
62266 + return 1;
62267 +#endif
62268 +
62269 + return ((void *)addr >= start && (void *)addr < start + size);
62270 +}
62271 +
62272 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
62273 +{
62274 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
62275 +}
62276 +
62277 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
62278 +{
62279 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
62280 +}
62281 +
62282 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
62283 +{
62284 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
62285 +}
62286 +
62287 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
62288 +{
62289 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
62290 +}
62291 +
62292 static inline int within_module_core(unsigned long addr, struct module *mod)
62293 {
62294 - return (unsigned long)mod->module_core <= addr &&
62295 - addr < (unsigned long)mod->module_core + mod->core_size;
62296 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
62297 }
62298
62299 static inline int within_module_init(unsigned long addr, struct module *mod)
62300 {
62301 - return (unsigned long)mod->module_init <= addr &&
62302 - addr < (unsigned long)mod->module_init + mod->init_size;
62303 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
62304 }
62305
62306 /* Search for module by name: must hold module_mutex. */
62307 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
62308 index b2be02e..72d2f78 100644
62309 --- a/include/linux/moduleloader.h
62310 +++ b/include/linux/moduleloader.h
62311 @@ -23,11 +23,23 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
62312
62313 /* Allocator used for allocating struct module, core sections and init
62314 sections. Returns NULL on failure. */
62315 -void *module_alloc(unsigned long size);
62316 +void *module_alloc(unsigned long size) __size_overflow(1);
62317 +
62318 +#ifdef CONFIG_PAX_KERNEXEC
62319 +void *module_alloc_exec(unsigned long size) __size_overflow(1);
62320 +#else
62321 +#define module_alloc_exec(x) module_alloc(x)
62322 +#endif
62323
62324 /* Free memory returned from module_alloc. */
62325 void module_free(struct module *mod, void *module_region);
62326
62327 +#ifdef CONFIG_PAX_KERNEXEC
62328 +void module_free_exec(struct module *mod, void *module_region);
62329 +#else
62330 +#define module_free_exec(x, y) module_free((x), (y))
62331 +#endif
62332 +
62333 /* Apply the given relocation to the (simplified) ELF. Return -error
62334 or 0. */
62335 int apply_relocate(Elf_Shdr *sechdrs,
62336 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
62337 index 944bc18..042d291 100644
62338 --- a/include/linux/moduleparam.h
62339 +++ b/include/linux/moduleparam.h
62340 @@ -286,7 +286,7 @@ static inline void __kernel_param_unlock(void)
62341 * @len is usually just sizeof(string).
62342 */
62343 #define module_param_string(name, string, len, perm) \
62344 - static const struct kparam_string __param_string_##name \
62345 + static const struct kparam_string __param_string_##name __used \
62346 = { len, string }; \
62347 __module_param_call(MODULE_PARAM_PREFIX, name, \
62348 &param_ops_string, \
62349 @@ -424,7 +424,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
62350 */
62351 #define module_param_array_named(name, array, type, nump, perm) \
62352 param_check_##type(name, &(array)[0]); \
62353 - static const struct kparam_array __param_arr_##name \
62354 + static const struct kparam_array __param_arr_##name __used \
62355 = { .max = ARRAY_SIZE(array), .num = nump, \
62356 .ops = &param_ops_##type, \
62357 .elemsize = sizeof(array[0]), .elem = array }; \
62358 diff --git a/include/linux/namei.h b/include/linux/namei.h
62359 index ffc0213..2c1f2cb 100644
62360 --- a/include/linux/namei.h
62361 +++ b/include/linux/namei.h
62362 @@ -24,7 +24,7 @@ struct nameidata {
62363 unsigned seq;
62364 int last_type;
62365 unsigned depth;
62366 - char *saved_names[MAX_NESTED_LINKS + 1];
62367 + const char *saved_names[MAX_NESTED_LINKS + 1];
62368
62369 /* Intent data */
62370 union {
62371 @@ -94,12 +94,12 @@ extern int follow_up(struct path *);
62372 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
62373 extern void unlock_rename(struct dentry *, struct dentry *);
62374
62375 -static inline void nd_set_link(struct nameidata *nd, char *path)
62376 +static inline void nd_set_link(struct nameidata *nd, const char *path)
62377 {
62378 nd->saved_names[nd->depth] = path;
62379 }
62380
62381 -static inline char *nd_get_link(struct nameidata *nd)
62382 +static inline const char *nd_get_link(const struct nameidata *nd)
62383 {
62384 return nd->saved_names[nd->depth];
62385 }
62386 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
62387 index 33900a5..2072000 100644
62388 --- a/include/linux/netdevice.h
62389 +++ b/include/linux/netdevice.h
62390 @@ -1003,6 +1003,7 @@ struct net_device_ops {
62391 int (*ndo_neigh_construct)(struct neighbour *n);
62392 void (*ndo_neigh_destroy)(struct neighbour *n);
62393 };
62394 +typedef struct net_device_ops __no_const net_device_ops_no_const;
62395
62396 /*
62397 * The DEVICE structure.
62398 @@ -1064,7 +1065,7 @@ struct net_device {
62399 int iflink;
62400
62401 struct net_device_stats stats;
62402 - atomic_long_t rx_dropped; /* dropped packets by core network
62403 + atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
62404 * Do not use this in drivers.
62405 */
62406
62407 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
62408 new file mode 100644
62409 index 0000000..33f4af8
62410 --- /dev/null
62411 +++ b/include/linux/netfilter/xt_gradm.h
62412 @@ -0,0 +1,9 @@
62413 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
62414 +#define _LINUX_NETFILTER_XT_GRADM_H 1
62415 +
62416 +struct xt_gradm_mtinfo {
62417 + __u16 flags;
62418 + __u16 invflags;
62419 +};
62420 +
62421 +#endif
62422 diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
62423 index c65a18a..0c05f3a 100644
62424 --- a/include/linux/of_pdt.h
62425 +++ b/include/linux/of_pdt.h
62426 @@ -32,7 +32,7 @@ struct of_pdt_ops {
62427
62428 /* return 0 on success; fill in 'len' with number of bytes in path */
62429 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
62430 -};
62431 +} __no_const;
62432
62433 extern void *prom_early_alloc(unsigned long size);
62434
62435 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
62436 index a4c5624..79d6d88 100644
62437 --- a/include/linux/oprofile.h
62438 +++ b/include/linux/oprofile.h
62439 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
62440 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
62441 char const * name, ulong * val);
62442
62443 -/** Create a file for read-only access to an atomic_t. */
62444 +/** Create a file for read-only access to an atomic_unchecked_t. */
62445 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
62446 - char const * name, atomic_t * val);
62447 + char const * name, atomic_unchecked_t * val);
62448
62449 /** create a directory */
62450 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
62451 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
62452 index ddbb6a9..be1680e 100644
62453 --- a/include/linux/perf_event.h
62454 +++ b/include/linux/perf_event.h
62455 @@ -879,8 +879,8 @@ struct perf_event {
62456
62457 enum perf_event_active_state state;
62458 unsigned int attach_state;
62459 - local64_t count;
62460 - atomic64_t child_count;
62461 + local64_t count; /* PaX: fix it one day */
62462 + atomic64_unchecked_t child_count;
62463
62464 /*
62465 * These are the total time in nanoseconds that the event
62466 @@ -931,8 +931,8 @@ struct perf_event {
62467 * These accumulate total time (in nanoseconds) that children
62468 * events have been enabled and running, respectively.
62469 */
62470 - atomic64_t child_total_time_enabled;
62471 - atomic64_t child_total_time_running;
62472 + atomic64_unchecked_t child_total_time_enabled;
62473 + atomic64_unchecked_t child_total_time_running;
62474
62475 /*
62476 * Protect attach/detach and child_list:
62477 diff --git a/include/linux/personality.h b/include/linux/personality.h
62478 index 8fc7dd1a..c19d89e 100644
62479 --- a/include/linux/personality.h
62480 +++ b/include/linux/personality.h
62481 @@ -44,6 +44,7 @@ enum {
62482 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
62483 ADDR_NO_RANDOMIZE | \
62484 ADDR_COMPAT_LAYOUT | \
62485 + ADDR_LIMIT_3GB | \
62486 MMAP_PAGE_ZERO)
62487
62488 /*
62489 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
62490 index e1ac1ce..0675fed 100644
62491 --- a/include/linux/pipe_fs_i.h
62492 +++ b/include/linux/pipe_fs_i.h
62493 @@ -45,9 +45,9 @@ struct pipe_buffer {
62494 struct pipe_inode_info {
62495 wait_queue_head_t wait;
62496 unsigned int nrbufs, curbuf, buffers;
62497 - unsigned int readers;
62498 - unsigned int writers;
62499 - unsigned int waiting_writers;
62500 + atomic_t readers;
62501 + atomic_t writers;
62502 + atomic_t waiting_writers;
62503 unsigned int r_counter;
62504 unsigned int w_counter;
62505 struct page *tmp_page;
62506 diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
62507 index 609daae..5392427 100644
62508 --- a/include/linux/pm_runtime.h
62509 +++ b/include/linux/pm_runtime.h
62510 @@ -97,7 +97,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
62511
62512 static inline void pm_runtime_mark_last_busy(struct device *dev)
62513 {
62514 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
62515 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
62516 }
62517
62518 #else /* !CONFIG_PM_RUNTIME */
62519 diff --git a/include/linux/poison.h b/include/linux/poison.h
62520 index 2110a81..13a11bb 100644
62521 --- a/include/linux/poison.h
62522 +++ b/include/linux/poison.h
62523 @@ -19,8 +19,8 @@
62524 * under normal circumstances, used to verify that nobody uses
62525 * non-initialized list entries.
62526 */
62527 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
62528 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
62529 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
62530 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
62531
62532 /********** include/linux/timer.h **********/
62533 /*
62534 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
62535 index 5a710b9..0b0dab9 100644
62536 --- a/include/linux/preempt.h
62537 +++ b/include/linux/preempt.h
62538 @@ -126,7 +126,7 @@ struct preempt_ops {
62539 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
62540 void (*sched_out)(struct preempt_notifier *notifier,
62541 struct task_struct *next);
62542 -};
62543 +} __no_const;
62544
62545 /**
62546 * preempt_notifier - key for installing preemption notifiers
62547 diff --git a/include/linux/printk.h b/include/linux/printk.h
62548 index 0525927..a5388b6 100644
62549 --- a/include/linux/printk.h
62550 +++ b/include/linux/printk.h
62551 @@ -94,6 +94,8 @@ void early_printk(const char *fmt, ...);
62552 extern int printk_needs_cpu(int cpu);
62553 extern void printk_tick(void);
62554
62555 +extern int kptr_restrict;
62556 +
62557 #ifdef CONFIG_PRINTK
62558 asmlinkage __printf(1, 0)
62559 int vprintk(const char *fmt, va_list args);
62560 @@ -117,7 +119,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
62561
62562 extern int printk_delay_msec;
62563 extern int dmesg_restrict;
62564 -extern int kptr_restrict;
62565
62566 void log_buf_kexec_setup(void);
62567 void __init setup_log_buf(int early);
62568 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
62569 index 85c5073..51fac8b 100644
62570 --- a/include/linux/proc_fs.h
62571 +++ b/include/linux/proc_fs.h
62572 @@ -155,6 +155,18 @@ static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
62573 return proc_create_data(name, mode, parent, proc_fops, NULL);
62574 }
62575
62576 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
62577 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
62578 +{
62579 +#ifdef CONFIG_GRKERNSEC_PROC_USER
62580 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
62581 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62582 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
62583 +#else
62584 + return proc_create_data(name, mode, parent, proc_fops, NULL);
62585 +#endif
62586 +}
62587 +
62588 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
62589 umode_t mode, struct proc_dir_entry *base,
62590 read_proc_t *read_proc, void * data)
62591 @@ -258,7 +270,7 @@ union proc_op {
62592 int (*proc_show)(struct seq_file *m,
62593 struct pid_namespace *ns, struct pid *pid,
62594 struct task_struct *task);
62595 -};
62596 +} __no_const;
62597
62598 struct ctl_table_header;
62599 struct ctl_table;
62600 diff --git a/include/linux/random.h b/include/linux/random.h
62601 index 8f74538..de61694 100644
62602 --- a/include/linux/random.h
62603 +++ b/include/linux/random.h
62604 @@ -54,6 +54,10 @@ extern void add_input_randomness(unsigned int type, unsigned int code,
62605 unsigned int value);
62606 extern void add_interrupt_randomness(int irq);
62607
62608 +#ifdef CONFIG_PAX_LATENT_ENTROPY
62609 +extern void transfer_latent_entropy(void);
62610 +#endif
62611 +
62612 extern void get_random_bytes(void *buf, int nbytes);
62613 void generate_random_uuid(unsigned char uuid_out[16]);
62614
62615 @@ -69,12 +73,17 @@ void srandom32(u32 seed);
62616
62617 u32 prandom32(struct rnd_state *);
62618
62619 +static inline unsigned long pax_get_random_long(void)
62620 +{
62621 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
62622 +}
62623 +
62624 /*
62625 * Handle minimum values for seeds
62626 */
62627 static inline u32 __seed(u32 x, u32 m)
62628 {
62629 - return (x < m) ? x + m : x;
62630 + return (x <= m) ? x + m + 1 : x;
62631 }
62632
62633 /**
62634 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
62635 index e0879a7..a12f962 100644
62636 --- a/include/linux/reboot.h
62637 +++ b/include/linux/reboot.h
62638 @@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
62639 * Architecture-specific implementations of sys_reboot commands.
62640 */
62641
62642 -extern void machine_restart(char *cmd);
62643 -extern void machine_halt(void);
62644 -extern void machine_power_off(void);
62645 +extern void machine_restart(char *cmd) __noreturn;
62646 +extern void machine_halt(void) __noreturn;
62647 +extern void machine_power_off(void) __noreturn;
62648
62649 extern void machine_shutdown(void);
62650 struct pt_regs;
62651 @@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
62652 */
62653
62654 extern void kernel_restart_prepare(char *cmd);
62655 -extern void kernel_restart(char *cmd);
62656 -extern void kernel_halt(void);
62657 -extern void kernel_power_off(void);
62658 +extern void kernel_restart(char *cmd) __noreturn;
62659 +extern void kernel_halt(void) __noreturn;
62660 +extern void kernel_power_off(void) __noreturn;
62661
62662 extern int C_A_D; /* for sysctl */
62663 void ctrl_alt_del(void);
62664 @@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
62665 * Emergency restart, callable from an interrupt handler.
62666 */
62667
62668 -extern void emergency_restart(void);
62669 +extern void emergency_restart(void) __noreturn;
62670 #include <asm/emergency-restart.h>
62671
62672 #endif
62673 diff --git a/include/linux/relay.h b/include/linux/relay.h
62674 index 91cacc3..b55ff74 100644
62675 --- a/include/linux/relay.h
62676 +++ b/include/linux/relay.h
62677 @@ -160,7 +160,7 @@ struct rchan_callbacks
62678 * The callback should return 0 if successful, negative if not.
62679 */
62680 int (*remove_buf_file)(struct dentry *dentry);
62681 -};
62682 +} __no_const;
62683
62684 /*
62685 * CONFIG_RELAY kernel API, kernel/relay.c
62686 diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
62687 index 6fdf027..ff72610 100644
62688 --- a/include/linux/rfkill.h
62689 +++ b/include/linux/rfkill.h
62690 @@ -147,6 +147,7 @@ struct rfkill_ops {
62691 void (*query)(struct rfkill *rfkill, void *data);
62692 int (*set_block)(void *data, bool blocked);
62693 };
62694 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
62695
62696 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
62697 /**
62698 diff --git a/include/linux/rio.h b/include/linux/rio.h
62699 index 4d50611..c6858a2 100644
62700 --- a/include/linux/rio.h
62701 +++ b/include/linux/rio.h
62702 @@ -315,7 +315,7 @@ struct rio_ops {
62703 int mbox, void *buffer, size_t len);
62704 int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
62705 void *(*get_inb_message)(struct rio_mport *mport, int mbox);
62706 -};
62707 +} __no_const;
62708
62709 #define RIO_RESOURCE_MEM 0x00000100
62710 #define RIO_RESOURCE_DOORBELL 0x00000200
62711 diff --git a/include/linux/rmap.h b/include/linux/rmap.h
62712 index fd07c45..4676b8e 100644
62713 --- a/include/linux/rmap.h
62714 +++ b/include/linux/rmap.h
62715 @@ -119,9 +119,9 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
62716 void anon_vma_init(void); /* create anon_vma_cachep */
62717 int anon_vma_prepare(struct vm_area_struct *);
62718 void unlink_anon_vmas(struct vm_area_struct *);
62719 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
62720 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
62721 void anon_vma_moveto_tail(struct vm_area_struct *);
62722 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
62723 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
62724
62725 static inline void anon_vma_merge(struct vm_area_struct *vma,
62726 struct vm_area_struct *next)
62727 diff --git a/include/linux/sched.h b/include/linux/sched.h
62728 index 7b06169..da44f01 100644
62729 --- a/include/linux/sched.h
62730 +++ b/include/linux/sched.h
62731 @@ -100,6 +100,7 @@ struct bio_list;
62732 struct fs_struct;
62733 struct perf_event_context;
62734 struct blk_plug;
62735 +struct linux_binprm;
62736
62737 /*
62738 * List of flags we want to share for kernel threads,
62739 @@ -382,10 +383,13 @@ struct user_namespace;
62740 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
62741
62742 extern int sysctl_max_map_count;
62743 +extern unsigned long sysctl_heap_stack_gap;
62744
62745 #include <linux/aio.h>
62746
62747 #ifdef CONFIG_MMU
62748 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
62749 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
62750 extern void arch_pick_mmap_layout(struct mm_struct *mm);
62751 extern unsigned long
62752 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
62753 @@ -643,6 +647,17 @@ struct signal_struct {
62754 #ifdef CONFIG_TASKSTATS
62755 struct taskstats *stats;
62756 #endif
62757 +
62758 +#ifdef CONFIG_GRKERNSEC
62759 + u32 curr_ip;
62760 + u32 saved_ip;
62761 + u32 gr_saddr;
62762 + u32 gr_daddr;
62763 + u16 gr_sport;
62764 + u16 gr_dport;
62765 + u8 used_accept:1;
62766 +#endif
62767 +
62768 #ifdef CONFIG_AUDIT
62769 unsigned audit_tty;
62770 struct tty_audit_buf *tty_audit_buf;
62771 @@ -726,6 +741,11 @@ struct user_struct {
62772 struct key *session_keyring; /* UID's default session keyring */
62773 #endif
62774
62775 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
62776 + unsigned int banned;
62777 + unsigned long ban_expires;
62778 +#endif
62779 +
62780 /* Hash table maintenance information */
62781 struct hlist_node uidhash_node;
62782 uid_t uid;
62783 @@ -1386,8 +1406,8 @@ struct task_struct {
62784 struct list_head thread_group;
62785
62786 struct completion *vfork_done; /* for vfork() */
62787 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
62788 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
62789 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
62790 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
62791
62792 cputime_t utime, stime, utimescaled, stimescaled;
62793 cputime_t gtime;
62794 @@ -1403,13 +1423,6 @@ struct task_struct {
62795 struct task_cputime cputime_expires;
62796 struct list_head cpu_timers[3];
62797
62798 -/* process credentials */
62799 - const struct cred __rcu *real_cred; /* objective and real subjective task
62800 - * credentials (COW) */
62801 - const struct cred __rcu *cred; /* effective (overridable) subjective task
62802 - * credentials (COW) */
62803 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
62804 -
62805 char comm[TASK_COMM_LEN]; /* executable name excluding path
62806 - access with [gs]et_task_comm (which lock
62807 it with task_lock())
62808 @@ -1426,8 +1439,16 @@ struct task_struct {
62809 #endif
62810 /* CPU-specific state of this task */
62811 struct thread_struct thread;
62812 +/* thread_info moved to task_struct */
62813 +#ifdef CONFIG_X86
62814 + struct thread_info tinfo;
62815 +#endif
62816 /* filesystem information */
62817 struct fs_struct *fs;
62818 +
62819 + const struct cred __rcu *cred; /* effective (overridable) subjective task
62820 + * credentials (COW) */
62821 +
62822 /* open file information */
62823 struct files_struct *files;
62824 /* namespaces */
62825 @@ -1469,6 +1490,11 @@ struct task_struct {
62826 struct rt_mutex_waiter *pi_blocked_on;
62827 #endif
62828
62829 +/* process credentials */
62830 + const struct cred __rcu *real_cred; /* objective and real subjective task
62831 + * credentials (COW) */
62832 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
62833 +
62834 #ifdef CONFIG_DEBUG_MUTEXES
62835 /* mutex deadlock detection */
62836 struct mutex_waiter *blocked_on;
62837 @@ -1585,6 +1611,27 @@ struct task_struct {
62838 unsigned long default_timer_slack_ns;
62839
62840 struct list_head *scm_work_list;
62841 +
62842 +#ifdef CONFIG_GRKERNSEC
62843 + /* grsecurity */
62844 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62845 + u64 exec_id;
62846 +#endif
62847 +#ifdef CONFIG_GRKERNSEC_SETXID
62848 + const struct cred *delayed_cred;
62849 +#endif
62850 + struct dentry *gr_chroot_dentry;
62851 + struct acl_subject_label *acl;
62852 + struct acl_role_label *role;
62853 + struct file *exec_file;
62854 + u16 acl_role_id;
62855 + /* is this the task that authenticated to the special role */
62856 + u8 acl_sp_role;
62857 + u8 is_writable;
62858 + u8 brute;
62859 + u8 gr_is_chrooted;
62860 +#endif
62861 +
62862 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
62863 /* Index of current stored address in ret_stack */
62864 int curr_ret_stack;
62865 @@ -1619,6 +1666,51 @@ struct task_struct {
62866 #endif
62867 };
62868
62869 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
62870 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
62871 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
62872 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
62873 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
62874 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
62875 +
62876 +#ifdef CONFIG_PAX_SOFTMODE
62877 +extern int pax_softmode;
62878 +#endif
62879 +
62880 +extern int pax_check_flags(unsigned long *);
62881 +
62882 +/* if tsk != current then task_lock must be held on it */
62883 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
62884 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
62885 +{
62886 + if (likely(tsk->mm))
62887 + return tsk->mm->pax_flags;
62888 + else
62889 + return 0UL;
62890 +}
62891 +
62892 +/* if tsk != current then task_lock must be held on it */
62893 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
62894 +{
62895 + if (likely(tsk->mm)) {
62896 + tsk->mm->pax_flags = flags;
62897 + return 0;
62898 + }
62899 + return -EINVAL;
62900 +}
62901 +#endif
62902 +
62903 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
62904 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
62905 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
62906 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
62907 +#endif
62908 +
62909 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
62910 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
62911 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
62912 +extern __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type);
62913 +
62914 /* Future-safe accessor for struct task_struct's cpus_allowed. */
62915 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
62916
62917 @@ -2146,7 +2238,9 @@ void yield(void);
62918 extern struct exec_domain default_exec_domain;
62919
62920 union thread_union {
62921 +#ifndef CONFIG_X86
62922 struct thread_info thread_info;
62923 +#endif
62924 unsigned long stack[THREAD_SIZE/sizeof(long)];
62925 };
62926
62927 @@ -2179,6 +2273,7 @@ extern struct pid_namespace init_pid_ns;
62928 */
62929
62930 extern struct task_struct *find_task_by_vpid(pid_t nr);
62931 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
62932 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
62933 struct pid_namespace *ns);
62934
62935 @@ -2322,7 +2417,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
62936 extern void exit_itimers(struct signal_struct *);
62937 extern void flush_itimer_signals(void);
62938
62939 -extern void do_group_exit(int);
62940 +extern __noreturn void do_group_exit(int);
62941
62942 extern void daemonize(const char *, ...);
62943 extern int allow_signal(int);
62944 @@ -2523,13 +2618,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
62945
62946 #endif
62947
62948 -static inline int object_is_on_stack(void *obj)
62949 +static inline int object_starts_on_stack(void *obj)
62950 {
62951 - void *stack = task_stack_page(current);
62952 + const void *stack = task_stack_page(current);
62953
62954 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
62955 }
62956
62957 +#ifdef CONFIG_PAX_USERCOPY
62958 +extern int object_is_on_stack(const void *obj, unsigned long len);
62959 +#endif
62960 +
62961 extern void thread_info_cache_init(void);
62962
62963 #ifdef CONFIG_DEBUG_STACK_USAGE
62964 diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
62965 index 899fbb4..1cb4138 100644
62966 --- a/include/linux/screen_info.h
62967 +++ b/include/linux/screen_info.h
62968 @@ -43,7 +43,8 @@ struct screen_info {
62969 __u16 pages; /* 0x32 */
62970 __u16 vesa_attributes; /* 0x34 */
62971 __u32 capabilities; /* 0x36 */
62972 - __u8 _reserved[6]; /* 0x3a */
62973 + __u16 vesapm_size; /* 0x3a */
62974 + __u8 _reserved[4]; /* 0x3c */
62975 } __attribute__((packed));
62976
62977 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
62978 diff --git a/include/linux/security.h b/include/linux/security.h
62979 index 673afbb..2b7454b 100644
62980 --- a/include/linux/security.h
62981 +++ b/include/linux/security.h
62982 @@ -26,6 +26,7 @@
62983 #include <linux/capability.h>
62984 #include <linux/slab.h>
62985 #include <linux/err.h>
62986 +#include <linux/grsecurity.h>
62987
62988 struct linux_binprm;
62989 struct cred;
62990 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
62991 index fc61854..d7c490b 100644
62992 --- a/include/linux/seq_file.h
62993 +++ b/include/linux/seq_file.h
62994 @@ -25,6 +25,9 @@ struct seq_file {
62995 struct mutex lock;
62996 const struct seq_operations *op;
62997 int poll_event;
62998 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
62999 + u64 exec_id;
63000 +#endif
63001 void *private;
63002 };
63003
63004 @@ -34,6 +37,7 @@ struct seq_operations {
63005 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
63006 int (*show) (struct seq_file *m, void *v);
63007 };
63008 +typedef struct seq_operations __no_const seq_operations_no_const;
63009
63010 #define SEQ_SKIP 1
63011
63012 diff --git a/include/linux/shm.h b/include/linux/shm.h
63013 index 92808b8..c28cac4 100644
63014 --- a/include/linux/shm.h
63015 +++ b/include/linux/shm.h
63016 @@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
63017
63018 /* The task created the shm object. NULL if the task is dead. */
63019 struct task_struct *shm_creator;
63020 +#ifdef CONFIG_GRKERNSEC
63021 + time_t shm_createtime;
63022 + pid_t shm_lapid;
63023 +#endif
63024 };
63025
63026 /* shm_mode upper byte flags */
63027 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
63028 index c1bae8d..2dbcd31 100644
63029 --- a/include/linux/skbuff.h
63030 +++ b/include/linux/skbuff.h
63031 @@ -663,7 +663,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
63032 */
63033 static inline int skb_queue_empty(const struct sk_buff_head *list)
63034 {
63035 - return list->next == (struct sk_buff *)list;
63036 + return list->next == (const struct sk_buff *)list;
63037 }
63038
63039 /**
63040 @@ -676,7 +676,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
63041 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
63042 const struct sk_buff *skb)
63043 {
63044 - return skb->next == (struct sk_buff *)list;
63045 + return skb->next == (const struct sk_buff *)list;
63046 }
63047
63048 /**
63049 @@ -689,7 +689,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
63050 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
63051 const struct sk_buff *skb)
63052 {
63053 - return skb->prev == (struct sk_buff *)list;
63054 + return skb->prev == (const struct sk_buff *)list;
63055 }
63056
63057 /**
63058 @@ -1584,7 +1584,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
63059 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
63060 */
63061 #ifndef NET_SKB_PAD
63062 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
63063 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
63064 #endif
63065
63066 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
63067 diff --git a/include/linux/slab.h b/include/linux/slab.h
63068 index a595dce..a0116fb 100644
63069 --- a/include/linux/slab.h
63070 +++ b/include/linux/slab.h
63071 @@ -11,12 +11,20 @@
63072
63073 #include <linux/gfp.h>
63074 #include <linux/types.h>
63075 +#include <linux/err.h>
63076
63077 /*
63078 * Flags to pass to kmem_cache_create().
63079 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
63080 */
63081 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
63082 +
63083 +#ifdef CONFIG_PAX_USERCOPY
63084 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
63085 +#else
63086 +#define SLAB_USERCOPY 0x00000000UL
63087 +#endif
63088 +
63089 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
63090 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
63091 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
63092 @@ -87,10 +95,13 @@
63093 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
63094 * Both make kfree a no-op.
63095 */
63096 -#define ZERO_SIZE_PTR ((void *)16)
63097 +#define ZERO_SIZE_PTR \
63098 +({ \
63099 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
63100 + (void *)(-MAX_ERRNO-1L); \
63101 +})
63102
63103 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
63104 - (unsigned long)ZERO_SIZE_PTR)
63105 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
63106
63107 /*
63108 * struct kmem_cache related prototypes
63109 @@ -161,6 +172,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
63110 void kfree(const void *);
63111 void kzfree(const void *);
63112 size_t ksize(const void *);
63113 +void check_object_size(const void *ptr, unsigned long n, bool to);
63114 +bool is_usercopy_alloc(const void *ptr);
63115
63116 /*
63117 * Allocator specific definitions. These are mainly used to establish optimized
63118 @@ -240,6 +253,7 @@ size_t ksize(const void *);
63119 * for general use, and so are not documented here. For a full list of
63120 * potential flags, always refer to linux/gfp.h.
63121 */
63122 +static void *kmalloc_array(size_t n, size_t size, gfp_t flags) __size_overflow(1, 2);
63123 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
63124 {
63125 if (size != 0 && n > ULONG_MAX / size)
63126 @@ -298,7 +312,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
63127 */
63128 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
63129 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
63130 -extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
63131 +extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
63132 #define kmalloc_track_caller(size, flags) \
63133 __kmalloc_track_caller(size, flags, _RET_IP_)
63134 #else
63135 @@ -317,7 +331,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
63136 */
63137 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
63138 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
63139 -extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
63140 +extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
63141 #define kmalloc_node_track_caller(size, flags, node) \
63142 __kmalloc_node_track_caller(size, flags, node, \
63143 _RET_IP_)
63144 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
63145 index fbd1117..5affe9e 100644
63146 --- a/include/linux/slab_def.h
63147 +++ b/include/linux/slab_def.h
63148 @@ -66,10 +66,10 @@ struct kmem_cache {
63149 unsigned long node_allocs;
63150 unsigned long node_frees;
63151 unsigned long node_overflow;
63152 - atomic_t allochit;
63153 - atomic_t allocmiss;
63154 - atomic_t freehit;
63155 - atomic_t freemiss;
63156 + atomic_unchecked_t allochit;
63157 + atomic_unchecked_t allocmiss;
63158 + atomic_unchecked_t freehit;
63159 + atomic_unchecked_t freemiss;
63160
63161 /*
63162 * If debugging is enabled, then the allocator can add additional
63163 @@ -103,11 +103,16 @@ struct cache_sizes {
63164 #ifdef CONFIG_ZONE_DMA
63165 struct kmem_cache *cs_dmacachep;
63166 #endif
63167 +
63168 +#ifdef CONFIG_PAX_USERCOPY
63169 + struct kmem_cache *cs_usercopycachep;
63170 +#endif
63171 +
63172 };
63173 extern struct cache_sizes malloc_sizes[];
63174
63175 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
63176 -void *__kmalloc(size_t size, gfp_t flags);
63177 +void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
63178
63179 #ifdef CONFIG_TRACING
63180 extern void *kmem_cache_alloc_trace(size_t size,
63181 @@ -150,6 +155,13 @@ found:
63182 cachep = malloc_sizes[i].cs_dmacachep;
63183 else
63184 #endif
63185 +
63186 +#ifdef CONFIG_PAX_USERCOPY
63187 + if (flags & GFP_USERCOPY)
63188 + cachep = malloc_sizes[i].cs_usercopycachep;
63189 + else
63190 +#endif
63191 +
63192 cachep = malloc_sizes[i].cs_cachep;
63193
63194 ret = kmem_cache_alloc_trace(size, cachep, flags);
63195 @@ -160,7 +172,7 @@ found:
63196 }
63197
63198 #ifdef CONFIG_NUMA
63199 -extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
63200 +extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
63201 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
63202
63203 #ifdef CONFIG_TRACING
63204 @@ -203,6 +215,13 @@ found:
63205 cachep = malloc_sizes[i].cs_dmacachep;
63206 else
63207 #endif
63208 +
63209 +#ifdef CONFIG_PAX_USERCOPY
63210 + if (flags & GFP_USERCOPY)
63211 + cachep = malloc_sizes[i].cs_usercopycachep;
63212 + else
63213 +#endif
63214 +
63215 cachep = malloc_sizes[i].cs_cachep;
63216
63217 return kmem_cache_alloc_node_trace(size, cachep, flags, node);
63218 diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
63219 index 0ec00b3..39cb7fc 100644
63220 --- a/include/linux/slob_def.h
63221 +++ b/include/linux/slob_def.h
63222 @@ -9,7 +9,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
63223 return kmem_cache_alloc_node(cachep, flags, -1);
63224 }
63225
63226 -void *__kmalloc_node(size_t size, gfp_t flags, int node);
63227 +void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
63228
63229 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
63230 {
63231 @@ -29,6 +29,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
63232 return __kmalloc_node(size, flags, -1);
63233 }
63234
63235 +static __always_inline void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
63236 static __always_inline void *__kmalloc(size_t size, gfp_t flags)
63237 {
63238 return kmalloc(size, flags);
63239 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
63240 index c2f8c8b..be9e036 100644
63241 --- a/include/linux/slub_def.h
63242 +++ b/include/linux/slub_def.h
63243 @@ -92,7 +92,7 @@ struct kmem_cache {
63244 struct kmem_cache_order_objects max;
63245 struct kmem_cache_order_objects min;
63246 gfp_t allocflags; /* gfp flags to use on each alloc */
63247 - int refcount; /* Refcount for slab cache destroy */
63248 + atomic_t refcount; /* Refcount for slab cache destroy */
63249 void (*ctor)(void *);
63250 int inuse; /* Offset to metadata */
63251 int align; /* Alignment */
63252 @@ -153,6 +153,7 @@ extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
63253 * Sorry that the following has to be that ugly but some versions of GCC
63254 * have trouble with constant propagation and loops.
63255 */
63256 +static __always_inline int kmalloc_index(size_t size) __size_overflow(1);
63257 static __always_inline int kmalloc_index(size_t size)
63258 {
63259 if (!size)
63260 @@ -218,7 +219,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
63261 }
63262
63263 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
63264 -void *__kmalloc(size_t size, gfp_t flags);
63265 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
63266
63267 static __always_inline void *
63268 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
63269 @@ -259,6 +260,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
63270 }
63271 #endif
63272
63273 +static __always_inline void *kmalloc_large(size_t size, gfp_t flags) __size_overflow(1);
63274 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
63275 {
63276 unsigned int order = get_order(size);
63277 @@ -284,7 +286,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
63278 }
63279
63280 #ifdef CONFIG_NUMA
63281 -void *__kmalloc_node(size_t size, gfp_t flags, int node);
63282 +void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
63283 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
63284
63285 #ifdef CONFIG_TRACING
63286 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
63287 index de8832d..0147b46 100644
63288 --- a/include/linux/sonet.h
63289 +++ b/include/linux/sonet.h
63290 @@ -61,7 +61,7 @@ struct sonet_stats {
63291 #include <linux/atomic.h>
63292
63293 struct k_sonet_stats {
63294 -#define __HANDLE_ITEM(i) atomic_t i
63295 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
63296 __SONET_ITEMS
63297 #undef __HANDLE_ITEM
63298 };
63299 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
63300 index 523547e..2cb7140 100644
63301 --- a/include/linux/sunrpc/clnt.h
63302 +++ b/include/linux/sunrpc/clnt.h
63303 @@ -174,9 +174,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
63304 {
63305 switch (sap->sa_family) {
63306 case AF_INET:
63307 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
63308 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
63309 case AF_INET6:
63310 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
63311 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
63312 }
63313 return 0;
63314 }
63315 @@ -209,7 +209,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
63316 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
63317 const struct sockaddr *src)
63318 {
63319 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
63320 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
63321 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
63322
63323 dsin->sin_family = ssin->sin_family;
63324 @@ -312,7 +312,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
63325 if (sa->sa_family != AF_INET6)
63326 return 0;
63327
63328 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
63329 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
63330 }
63331
63332 #endif /* __KERNEL__ */
63333 diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
63334 index dc0c3cc..8503fb6 100644
63335 --- a/include/linux/sunrpc/sched.h
63336 +++ b/include/linux/sunrpc/sched.h
63337 @@ -106,6 +106,7 @@ struct rpc_call_ops {
63338 void (*rpc_count_stats)(struct rpc_task *, void *);
63339 void (*rpc_release)(void *);
63340 };
63341 +typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
63342
63343 struct rpc_task_setup {
63344 struct rpc_task *task;
63345 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
63346 index 0b8e3e6..33e0a01 100644
63347 --- a/include/linux/sunrpc/svc_rdma.h
63348 +++ b/include/linux/sunrpc/svc_rdma.h
63349 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
63350 extern unsigned int svcrdma_max_requests;
63351 extern unsigned int svcrdma_max_req_size;
63352
63353 -extern atomic_t rdma_stat_recv;
63354 -extern atomic_t rdma_stat_read;
63355 -extern atomic_t rdma_stat_write;
63356 -extern atomic_t rdma_stat_sq_starve;
63357 -extern atomic_t rdma_stat_rq_starve;
63358 -extern atomic_t rdma_stat_rq_poll;
63359 -extern atomic_t rdma_stat_rq_prod;
63360 -extern atomic_t rdma_stat_sq_poll;
63361 -extern atomic_t rdma_stat_sq_prod;
63362 +extern atomic_unchecked_t rdma_stat_recv;
63363 +extern atomic_unchecked_t rdma_stat_read;
63364 +extern atomic_unchecked_t rdma_stat_write;
63365 +extern atomic_unchecked_t rdma_stat_sq_starve;
63366 +extern atomic_unchecked_t rdma_stat_rq_starve;
63367 +extern atomic_unchecked_t rdma_stat_rq_poll;
63368 +extern atomic_unchecked_t rdma_stat_rq_prod;
63369 +extern atomic_unchecked_t rdma_stat_sq_poll;
63370 +extern atomic_unchecked_t rdma_stat_sq_prod;
63371
63372 #define RPCRDMA_VERSION 1
63373
63374 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
63375 index c34b4c8..a65b67d 100644
63376 --- a/include/linux/sysctl.h
63377 +++ b/include/linux/sysctl.h
63378 @@ -155,7 +155,11 @@ enum
63379 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
63380 };
63381
63382 -
63383 +#ifdef CONFIG_PAX_SOFTMODE
63384 +enum {
63385 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
63386 +};
63387 +#endif
63388
63389 /* CTL_VM names: */
63390 enum
63391 @@ -948,6 +952,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
63392
63393 extern int proc_dostring(struct ctl_table *, int,
63394 void __user *, size_t *, loff_t *);
63395 +extern int proc_dostring_modpriv(struct ctl_table *, int,
63396 + void __user *, size_t *, loff_t *);
63397 extern int proc_dointvec(struct ctl_table *, int,
63398 void __user *, size_t *, loff_t *);
63399 extern int proc_dointvec_minmax(struct ctl_table *, int,
63400 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
63401 index ff7dc08..893e1bd 100644
63402 --- a/include/linux/tty_ldisc.h
63403 +++ b/include/linux/tty_ldisc.h
63404 @@ -148,7 +148,7 @@ struct tty_ldisc_ops {
63405
63406 struct module *owner;
63407
63408 - int refcount;
63409 + atomic_t refcount;
63410 };
63411
63412 struct tty_ldisc {
63413 diff --git a/include/linux/types.h b/include/linux/types.h
63414 index 7f480db..175c256 100644
63415 --- a/include/linux/types.h
63416 +++ b/include/linux/types.h
63417 @@ -220,10 +220,26 @@ typedef struct {
63418 int counter;
63419 } atomic_t;
63420
63421 +#ifdef CONFIG_PAX_REFCOUNT
63422 +typedef struct {
63423 + int counter;
63424 +} atomic_unchecked_t;
63425 +#else
63426 +typedef atomic_t atomic_unchecked_t;
63427 +#endif
63428 +
63429 #ifdef CONFIG_64BIT
63430 typedef struct {
63431 long counter;
63432 } atomic64_t;
63433 +
63434 +#ifdef CONFIG_PAX_REFCOUNT
63435 +typedef struct {
63436 + long counter;
63437 +} atomic64_unchecked_t;
63438 +#else
63439 +typedef atomic64_t atomic64_unchecked_t;
63440 +#endif
63441 #endif
63442
63443 struct list_head {
63444 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
63445 index 5ca0951..ab496a5 100644
63446 --- a/include/linux/uaccess.h
63447 +++ b/include/linux/uaccess.h
63448 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
63449 long ret; \
63450 mm_segment_t old_fs = get_fs(); \
63451 \
63452 - set_fs(KERNEL_DS); \
63453 pagefault_disable(); \
63454 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
63455 - pagefault_enable(); \
63456 + set_fs(KERNEL_DS); \
63457 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
63458 set_fs(old_fs); \
63459 + pagefault_enable(); \
63460 ret; \
63461 })
63462
63463 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
63464 index 99c1b4d..bb94261 100644
63465 --- a/include/linux/unaligned/access_ok.h
63466 +++ b/include/linux/unaligned/access_ok.h
63467 @@ -6,32 +6,32 @@
63468
63469 static inline u16 get_unaligned_le16(const void *p)
63470 {
63471 - return le16_to_cpup((__le16 *)p);
63472 + return le16_to_cpup((const __le16 *)p);
63473 }
63474
63475 static inline u32 get_unaligned_le32(const void *p)
63476 {
63477 - return le32_to_cpup((__le32 *)p);
63478 + return le32_to_cpup((const __le32 *)p);
63479 }
63480
63481 static inline u64 get_unaligned_le64(const void *p)
63482 {
63483 - return le64_to_cpup((__le64 *)p);
63484 + return le64_to_cpup((const __le64 *)p);
63485 }
63486
63487 static inline u16 get_unaligned_be16(const void *p)
63488 {
63489 - return be16_to_cpup((__be16 *)p);
63490 + return be16_to_cpup((const __be16 *)p);
63491 }
63492
63493 static inline u32 get_unaligned_be32(const void *p)
63494 {
63495 - return be32_to_cpup((__be32 *)p);
63496 + return be32_to_cpup((const __be32 *)p);
63497 }
63498
63499 static inline u64 get_unaligned_be64(const void *p)
63500 {
63501 - return be64_to_cpup((__be64 *)p);
63502 + return be64_to_cpup((const __be64 *)p);
63503 }
63504
63505 static inline void put_unaligned_le16(u16 val, void *p)
63506 diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
63507 index 547e59c..db6ad19 100644
63508 --- a/include/linux/usb/renesas_usbhs.h
63509 +++ b/include/linux/usb/renesas_usbhs.h
63510 @@ -39,7 +39,7 @@ enum {
63511 */
63512 struct renesas_usbhs_driver_callback {
63513 int (*notify_hotplug)(struct platform_device *pdev);
63514 -};
63515 +} __no_const;
63516
63517 /*
63518 * callback functions for platform
63519 @@ -97,7 +97,7 @@ struct renesas_usbhs_platform_callback {
63520 * VBUS control is needed for Host
63521 */
63522 int (*set_vbus)(struct platform_device *pdev, int enable);
63523 -};
63524 +} __no_const;
63525
63526 /*
63527 * parameters for renesas usbhs
63528 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
63529 index 6f8fbcf..8259001 100644
63530 --- a/include/linux/vermagic.h
63531 +++ b/include/linux/vermagic.h
63532 @@ -25,9 +25,35 @@
63533 #define MODULE_ARCH_VERMAGIC ""
63534 #endif
63535
63536 +#ifdef CONFIG_PAX_REFCOUNT
63537 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
63538 +#else
63539 +#define MODULE_PAX_REFCOUNT ""
63540 +#endif
63541 +
63542 +#ifdef CONSTIFY_PLUGIN
63543 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
63544 +#else
63545 +#define MODULE_CONSTIFY_PLUGIN ""
63546 +#endif
63547 +
63548 +#ifdef STACKLEAK_PLUGIN
63549 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
63550 +#else
63551 +#define MODULE_STACKLEAK_PLUGIN ""
63552 +#endif
63553 +
63554 +#ifdef CONFIG_GRKERNSEC
63555 +#define MODULE_GRSEC "GRSEC "
63556 +#else
63557 +#define MODULE_GRSEC ""
63558 +#endif
63559 +
63560 #define VERMAGIC_STRING \
63561 UTS_RELEASE " " \
63562 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
63563 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
63564 - MODULE_ARCH_VERMAGIC
63565 + MODULE_ARCH_VERMAGIC \
63566 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
63567 + MODULE_GRSEC
63568
63569 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
63570 index dcdfc2b..ec79ab5 100644
63571 --- a/include/linux/vmalloc.h
63572 +++ b/include/linux/vmalloc.h
63573 @@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
63574 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
63575 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
63576 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
63577 +
63578 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
63579 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
63580 +#endif
63581 +
63582 /* bits [20..32] reserved for arch specific ioremap internals */
63583
63584 /*
63585 @@ -62,7 +67,7 @@ extern void *vmalloc_32_user(unsigned long size);
63586 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
63587 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
63588 unsigned long start, unsigned long end, gfp_t gfp_mask,
63589 - pgprot_t prot, int node, void *caller);
63590 + pgprot_t prot, int node, void *caller) __size_overflow(1);
63591 extern void vfree(const void *addr);
63592
63593 extern void *vmap(struct page **pages, unsigned int count,
63594 @@ -123,8 +128,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
63595 extern void free_vm_area(struct vm_struct *area);
63596
63597 /* for /dev/kmem */
63598 -extern long vread(char *buf, char *addr, unsigned long count);
63599 -extern long vwrite(char *buf, char *addr, unsigned long count);
63600 +extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
63601 +extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
63602
63603 /*
63604 * Internals. Dont't use..
63605 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
63606 index 65efb92..137adbb 100644
63607 --- a/include/linux/vmstat.h
63608 +++ b/include/linux/vmstat.h
63609 @@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
63610 /*
63611 * Zone based page accounting with per cpu differentials.
63612 */
63613 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63614 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63615
63616 static inline void zone_page_state_add(long x, struct zone *zone,
63617 enum zone_stat_item item)
63618 {
63619 - atomic_long_add(x, &zone->vm_stat[item]);
63620 - atomic_long_add(x, &vm_stat[item]);
63621 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
63622 + atomic_long_add_unchecked(x, &vm_stat[item]);
63623 }
63624
63625 static inline unsigned long global_page_state(enum zone_stat_item item)
63626 {
63627 - long x = atomic_long_read(&vm_stat[item]);
63628 + long x = atomic_long_read_unchecked(&vm_stat[item]);
63629 #ifdef CONFIG_SMP
63630 if (x < 0)
63631 x = 0;
63632 @@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
63633 static inline unsigned long zone_page_state(struct zone *zone,
63634 enum zone_stat_item item)
63635 {
63636 - long x = atomic_long_read(&zone->vm_stat[item]);
63637 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
63638 #ifdef CONFIG_SMP
63639 if (x < 0)
63640 x = 0;
63641 @@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
63642 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
63643 enum zone_stat_item item)
63644 {
63645 - long x = atomic_long_read(&zone->vm_stat[item]);
63646 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
63647
63648 #ifdef CONFIG_SMP
63649 int cpu;
63650 @@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
63651
63652 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
63653 {
63654 - atomic_long_inc(&zone->vm_stat[item]);
63655 - atomic_long_inc(&vm_stat[item]);
63656 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
63657 + atomic_long_inc_unchecked(&vm_stat[item]);
63658 }
63659
63660 static inline void __inc_zone_page_state(struct page *page,
63661 @@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
63662
63663 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
63664 {
63665 - atomic_long_dec(&zone->vm_stat[item]);
63666 - atomic_long_dec(&vm_stat[item]);
63667 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
63668 + atomic_long_dec_unchecked(&vm_stat[item]);
63669 }
63670
63671 static inline void __dec_zone_page_state(struct page *page,
63672 diff --git a/include/linux/xattr.h b/include/linux/xattr.h
63673 index e5d1220..ef6e406 100644
63674 --- a/include/linux/xattr.h
63675 +++ b/include/linux/xattr.h
63676 @@ -57,6 +57,11 @@
63677 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
63678 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
63679
63680 +/* User namespace */
63681 +#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
63682 +#define XATTR_PAX_FLAGS_SUFFIX "flags"
63683 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
63684 +
63685 #ifdef __KERNEL__
63686
63687 #include <linux/types.h>
63688 diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
63689 index 4aeff96..b378cdc 100644
63690 --- a/include/media/saa7146_vv.h
63691 +++ b/include/media/saa7146_vv.h
63692 @@ -163,7 +163,7 @@ struct saa7146_ext_vv
63693 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
63694
63695 /* the extension can override this */
63696 - struct v4l2_ioctl_ops ops;
63697 + v4l2_ioctl_ops_no_const ops;
63698 /* pointer to the saa7146 core ops */
63699 const struct v4l2_ioctl_ops *core_ops;
63700
63701 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
63702 index 96d2221..2292f89 100644
63703 --- a/include/media/v4l2-dev.h
63704 +++ b/include/media/v4l2-dev.h
63705 @@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
63706
63707
63708 struct v4l2_file_operations {
63709 - struct module *owner;
63710 + struct module * const owner;
63711 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
63712 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
63713 unsigned int (*poll) (struct file *, struct poll_table_struct *);
63714 @@ -71,6 +71,7 @@ struct v4l2_file_operations {
63715 int (*open) (struct file *);
63716 int (*release) (struct file *);
63717 };
63718 +typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
63719
63720 /*
63721 * Newer version of video_device, handled by videodev2.c
63722 diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
63723 index 3cb939c..f23c6bb 100644
63724 --- a/include/media/v4l2-ioctl.h
63725 +++ b/include/media/v4l2-ioctl.h
63726 @@ -281,7 +281,7 @@ struct v4l2_ioctl_ops {
63727 long (*vidioc_default) (struct file *file, void *fh,
63728 bool valid_prio, int cmd, void *arg);
63729 };
63730 -
63731 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
63732
63733 /* v4l debugging and diagnostics */
63734
63735 diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
63736 index 6db8ecf..8c23861 100644
63737 --- a/include/net/caif/caif_hsi.h
63738 +++ b/include/net/caif/caif_hsi.h
63739 @@ -98,7 +98,7 @@ struct cfhsi_drv {
63740 void (*rx_done_cb) (struct cfhsi_drv *drv);
63741 void (*wake_up_cb) (struct cfhsi_drv *drv);
63742 void (*wake_down_cb) (struct cfhsi_drv *drv);
63743 -};
63744 +} __no_const;
63745
63746 /* Structure implemented by HSI device. */
63747 struct cfhsi_dev {
63748 diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
63749 index 9e5425b..8136ffc 100644
63750 --- a/include/net/caif/cfctrl.h
63751 +++ b/include/net/caif/cfctrl.h
63752 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
63753 void (*radioset_rsp)(void);
63754 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
63755 struct cflayer *client_layer);
63756 -};
63757 +} __no_const;
63758
63759 /* Link Setup Parameters for CAIF-Links. */
63760 struct cfctrl_link_param {
63761 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
63762 struct cfctrl {
63763 struct cfsrvl serv;
63764 struct cfctrl_rsp res;
63765 - atomic_t req_seq_no;
63766 - atomic_t rsp_seq_no;
63767 + atomic_unchecked_t req_seq_no;
63768 + atomic_unchecked_t rsp_seq_no;
63769 struct list_head list;
63770 /* Protects from simultaneous access to first_req list */
63771 spinlock_t info_list_lock;
63772 diff --git a/include/net/flow.h b/include/net/flow.h
63773 index 6c469db..7743b8e 100644
63774 --- a/include/net/flow.h
63775 +++ b/include/net/flow.h
63776 @@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
63777
63778 extern void flow_cache_flush(void);
63779 extern void flow_cache_flush_deferred(void);
63780 -extern atomic_t flow_cache_genid;
63781 +extern atomic_unchecked_t flow_cache_genid;
63782
63783 #endif
63784 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
63785 index 2040bff..f4c0733 100644
63786 --- a/include/net/inetpeer.h
63787 +++ b/include/net/inetpeer.h
63788 @@ -51,8 +51,8 @@ struct inet_peer {
63789 */
63790 union {
63791 struct {
63792 - atomic_t rid; /* Frag reception counter */
63793 - atomic_t ip_id_count; /* IP ID for the next packet */
63794 + atomic_unchecked_t rid; /* Frag reception counter */
63795 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
63796 __u32 tcp_ts;
63797 __u32 tcp_ts_stamp;
63798 };
63799 @@ -118,11 +118,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
63800 more++;
63801 inet_peer_refcheck(p);
63802 do {
63803 - old = atomic_read(&p->ip_id_count);
63804 + old = atomic_read_unchecked(&p->ip_id_count);
63805 new = old + more;
63806 if (!new)
63807 new = 1;
63808 - } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
63809 + } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
63810 return new;
63811 }
63812
63813 diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
63814 index 10422ef..662570f 100644
63815 --- a/include/net/ip_fib.h
63816 +++ b/include/net/ip_fib.h
63817 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
63818
63819 #define FIB_RES_SADDR(net, res) \
63820 ((FIB_RES_NH(res).nh_saddr_genid == \
63821 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
63822 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
63823 FIB_RES_NH(res).nh_saddr : \
63824 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
63825 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
63826 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
63827 index 72522f0..2965e05 100644
63828 --- a/include/net/ip_vs.h
63829 +++ b/include/net/ip_vs.h
63830 @@ -510,7 +510,7 @@ struct ip_vs_conn {
63831 struct ip_vs_conn *control; /* Master control connection */
63832 atomic_t n_control; /* Number of controlled ones */
63833 struct ip_vs_dest *dest; /* real server */
63834 - atomic_t in_pkts; /* incoming packet counter */
63835 + atomic_unchecked_t in_pkts; /* incoming packet counter */
63836
63837 /* packet transmitter for different forwarding methods. If it
63838 mangles the packet, it must return NF_DROP or better NF_STOLEN,
63839 @@ -648,7 +648,7 @@ struct ip_vs_dest {
63840 __be16 port; /* port number of the server */
63841 union nf_inet_addr addr; /* IP address of the server */
63842 volatile unsigned flags; /* dest status flags */
63843 - atomic_t conn_flags; /* flags to copy to conn */
63844 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
63845 atomic_t weight; /* server weight */
63846
63847 atomic_t refcnt; /* reference counter */
63848 @@ -1356,7 +1356,7 @@ static inline void ip_vs_notrack(struct sk_buff *skb)
63849 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
63850
63851 if (!ct || !nf_ct_is_untracked(ct)) {
63852 - nf_reset(skb);
63853 + nf_conntrack_put(skb->nfct);
63854 skb->nfct = &nf_ct_untracked_get()->ct_general;
63855 skb->nfctinfo = IP_CT_NEW;
63856 nf_conntrack_get(skb->nfct);
63857 diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
63858 index 69b610a..fe3962c 100644
63859 --- a/include/net/irda/ircomm_core.h
63860 +++ b/include/net/irda/ircomm_core.h
63861 @@ -51,7 +51,7 @@ typedef struct {
63862 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
63863 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
63864 struct ircomm_info *);
63865 -} call_t;
63866 +} __no_const call_t;
63867
63868 struct ircomm_cb {
63869 irda_queue_t queue;
63870 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
63871 index 59ba38bc..d515662 100644
63872 --- a/include/net/irda/ircomm_tty.h
63873 +++ b/include/net/irda/ircomm_tty.h
63874 @@ -35,6 +35,7 @@
63875 #include <linux/termios.h>
63876 #include <linux/timer.h>
63877 #include <linux/tty.h> /* struct tty_struct */
63878 +#include <asm/local.h>
63879
63880 #include <net/irda/irias_object.h>
63881 #include <net/irda/ircomm_core.h>
63882 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
63883 unsigned short close_delay;
63884 unsigned short closing_wait; /* time to wait before closing */
63885
63886 - int open_count;
63887 - int blocked_open; /* # of blocked opens */
63888 + local_t open_count;
63889 + local_t blocked_open; /* # of blocked opens */
63890
63891 /* Protect concurent access to :
63892 * o self->open_count
63893 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
63894 index cc7c197..9f2da2a 100644
63895 --- a/include/net/iucv/af_iucv.h
63896 +++ b/include/net/iucv/af_iucv.h
63897 @@ -141,7 +141,7 @@ struct iucv_sock {
63898 struct iucv_sock_list {
63899 struct hlist_head head;
63900 rwlock_t lock;
63901 - atomic_t autobind_name;
63902 + atomic_unchecked_t autobind_name;
63903 };
63904
63905 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
63906 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
63907 index 34c996f..bb3b4d4 100644
63908 --- a/include/net/neighbour.h
63909 +++ b/include/net/neighbour.h
63910 @@ -123,7 +123,7 @@ struct neigh_ops {
63911 void (*error_report)(struct neighbour *, struct sk_buff *);
63912 int (*output)(struct neighbour *, struct sk_buff *);
63913 int (*connected_output)(struct neighbour *, struct sk_buff *);
63914 -};
63915 +} __do_const;
63916
63917 struct pneigh_entry {
63918 struct pneigh_entry *next;
63919 diff --git a/include/net/netlink.h b/include/net/netlink.h
63920 index f394fe5..fd073f9 100644
63921 --- a/include/net/netlink.h
63922 +++ b/include/net/netlink.h
63923 @@ -534,7 +534,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
63924 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
63925 {
63926 if (mark)
63927 - skb_trim(skb, (unsigned char *) mark - skb->data);
63928 + skb_trim(skb, (const unsigned char *) mark - skb->data);
63929 }
63930
63931 /**
63932 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
63933 index bbd023a..97c6d0d 100644
63934 --- a/include/net/netns/ipv4.h
63935 +++ b/include/net/netns/ipv4.h
63936 @@ -57,8 +57,8 @@ struct netns_ipv4 {
63937 unsigned int sysctl_ping_group_range[2];
63938 long sysctl_tcp_mem[3];
63939
63940 - atomic_t rt_genid;
63941 - atomic_t dev_addr_genid;
63942 + atomic_unchecked_t rt_genid;
63943 + atomic_unchecked_t dev_addr_genid;
63944
63945 #ifdef CONFIG_IP_MROUTE
63946 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
63947 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
63948 index a2ef814..31a8e3f 100644
63949 --- a/include/net/sctp/sctp.h
63950 +++ b/include/net/sctp/sctp.h
63951 @@ -318,9 +318,9 @@ do { \
63952
63953 #else /* SCTP_DEBUG */
63954
63955 -#define SCTP_DEBUG_PRINTK(whatever...)
63956 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
63957 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
63958 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
63959 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
63960 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
63961 #define SCTP_ENABLE_DEBUG
63962 #define SCTP_DISABLE_DEBUG
63963 #define SCTP_ASSERT(expr, str, func)
63964 diff --git a/include/net/sock.h b/include/net/sock.h
63965 index 5a0a58a..2e3d4d0 100644
63966 --- a/include/net/sock.h
63967 +++ b/include/net/sock.h
63968 @@ -302,7 +302,7 @@ struct sock {
63969 #ifdef CONFIG_RPS
63970 __u32 sk_rxhash;
63971 #endif
63972 - atomic_t sk_drops;
63973 + atomic_unchecked_t sk_drops;
63974 int sk_rcvbuf;
63975
63976 struct sk_filter __rcu *sk_filter;
63977 @@ -1691,7 +1691,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
63978 }
63979
63980 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
63981 - char __user *from, char *to,
63982 + char __user *from, unsigned char *to,
63983 int copy, int offset)
63984 {
63985 if (skb->ip_summed == CHECKSUM_NONE) {
63986 diff --git a/include/net/tcp.h b/include/net/tcp.h
63987 index f75a04d..702cf06 100644
63988 --- a/include/net/tcp.h
63989 +++ b/include/net/tcp.h
63990 @@ -1425,7 +1425,7 @@ struct tcp_seq_afinfo {
63991 char *name;
63992 sa_family_t family;
63993 const struct file_operations *seq_fops;
63994 - struct seq_operations seq_ops;
63995 + seq_operations_no_const seq_ops;
63996 };
63997
63998 struct tcp_iter_state {
63999 diff --git a/include/net/udp.h b/include/net/udp.h
64000 index 5d606d9..e879f7b 100644
64001 --- a/include/net/udp.h
64002 +++ b/include/net/udp.h
64003 @@ -244,7 +244,7 @@ struct udp_seq_afinfo {
64004 sa_family_t family;
64005 struct udp_table *udp_table;
64006 const struct file_operations *seq_fops;
64007 - struct seq_operations seq_ops;
64008 + seq_operations_no_const seq_ops;
64009 };
64010
64011 struct udp_iter_state {
64012 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
64013 index 96239e7..c85b032 100644
64014 --- a/include/net/xfrm.h
64015 +++ b/include/net/xfrm.h
64016 @@ -505,7 +505,7 @@ struct xfrm_policy {
64017 struct timer_list timer;
64018
64019 struct flow_cache_object flo;
64020 - atomic_t genid;
64021 + atomic_unchecked_t genid;
64022 u32 priority;
64023 u32 index;
64024 struct xfrm_mark mark;
64025 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
64026 index 1a046b1..ee0bef0 100644
64027 --- a/include/rdma/iw_cm.h
64028 +++ b/include/rdma/iw_cm.h
64029 @@ -122,7 +122,7 @@ struct iw_cm_verbs {
64030 int backlog);
64031
64032 int (*destroy_listen)(struct iw_cm_id *cm_id);
64033 -};
64034 +} __no_const;
64035
64036 /**
64037 * iw_create_cm_id - Create an IW CM identifier.
64038 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
64039 index 8f9dfba..610ab6c 100644
64040 --- a/include/scsi/libfc.h
64041 +++ b/include/scsi/libfc.h
64042 @@ -756,6 +756,7 @@ struct libfc_function_template {
64043 */
64044 void (*disc_stop_final) (struct fc_lport *);
64045 };
64046 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
64047
64048 /**
64049 * struct fc_disc - Discovery context
64050 @@ -861,7 +862,7 @@ struct fc_lport {
64051 struct fc_vport *vport;
64052
64053 /* Operational Information */
64054 - struct libfc_function_template tt;
64055 + libfc_function_template_no_const tt;
64056 u8 link_up;
64057 u8 qfull;
64058 enum fc_lport_state state;
64059 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
64060 index ba96988..ecf2eb9 100644
64061 --- a/include/scsi/scsi_device.h
64062 +++ b/include/scsi/scsi_device.h
64063 @@ -163,9 +163,9 @@ struct scsi_device {
64064 unsigned int max_device_blocked; /* what device_blocked counts down from */
64065 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
64066
64067 - atomic_t iorequest_cnt;
64068 - atomic_t iodone_cnt;
64069 - atomic_t ioerr_cnt;
64070 + atomic_unchecked_t iorequest_cnt;
64071 + atomic_unchecked_t iodone_cnt;
64072 + atomic_unchecked_t ioerr_cnt;
64073
64074 struct device sdev_gendev,
64075 sdev_dev;
64076 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
64077 index 719faf1..d1154d4 100644
64078 --- a/include/scsi/scsi_transport_fc.h
64079 +++ b/include/scsi/scsi_transport_fc.h
64080 @@ -739,7 +739,7 @@ struct fc_function_template {
64081 unsigned long show_host_system_hostname:1;
64082
64083 unsigned long disable_target_scan:1;
64084 -};
64085 +} __do_const;
64086
64087
64088 /**
64089 diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
64090 index 030b87c..98a6954 100644
64091 --- a/include/sound/ak4xxx-adda.h
64092 +++ b/include/sound/ak4xxx-adda.h
64093 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
64094 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
64095 unsigned char val);
64096 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
64097 -};
64098 +} __no_const;
64099
64100 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
64101
64102 diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
64103 index 8c05e47..2b5df97 100644
64104 --- a/include/sound/hwdep.h
64105 +++ b/include/sound/hwdep.h
64106 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
64107 struct snd_hwdep_dsp_status *status);
64108 int (*dsp_load)(struct snd_hwdep *hw,
64109 struct snd_hwdep_dsp_image *image);
64110 -};
64111 +} __no_const;
64112
64113 struct snd_hwdep {
64114 struct snd_card *card;
64115 diff --git a/include/sound/info.h b/include/sound/info.h
64116 index 9ca1a49..aba1728 100644
64117 --- a/include/sound/info.h
64118 +++ b/include/sound/info.h
64119 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
64120 struct snd_info_buffer *buffer);
64121 void (*write)(struct snd_info_entry *entry,
64122 struct snd_info_buffer *buffer);
64123 -};
64124 +} __no_const;
64125
64126 struct snd_info_entry_ops {
64127 int (*open)(struct snd_info_entry *entry,
64128 diff --git a/include/sound/pcm.h b/include/sound/pcm.h
64129 index 0d11128..814178e 100644
64130 --- a/include/sound/pcm.h
64131 +++ b/include/sound/pcm.h
64132 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
64133 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
64134 int (*ack)(struct snd_pcm_substream *substream);
64135 };
64136 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
64137
64138 /*
64139 *
64140 diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
64141 index af1b49e..a5d55a5 100644
64142 --- a/include/sound/sb16_csp.h
64143 +++ b/include/sound/sb16_csp.h
64144 @@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
64145 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
64146 int (*csp_stop) (struct snd_sb_csp * p);
64147 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
64148 -};
64149 +} __no_const;
64150
64151 /*
64152 * CSP private data
64153 diff --git a/include/sound/soc.h b/include/sound/soc.h
64154 index 2ebf787..0276839 100644
64155 --- a/include/sound/soc.h
64156 +++ b/include/sound/soc.h
64157 @@ -711,7 +711,7 @@ struct snd_soc_platform_driver {
64158 /* platform IO - used for platform DAPM */
64159 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
64160 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
64161 -};
64162 +} __do_const;
64163
64164 struct snd_soc_platform {
64165 const char *name;
64166 @@ -887,7 +887,7 @@ struct snd_soc_pcm_runtime {
64167 struct snd_soc_dai_link *dai_link;
64168 struct mutex pcm_mutex;
64169 enum snd_soc_pcm_subclass pcm_subclass;
64170 - struct snd_pcm_ops ops;
64171 + snd_pcm_ops_no_const ops;
64172
64173 unsigned int complete:1;
64174 unsigned int dev_registered:1;
64175 diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
64176 index 4119966..1a4671c 100644
64177 --- a/include/sound/ymfpci.h
64178 +++ b/include/sound/ymfpci.h
64179 @@ -358,7 +358,7 @@ struct snd_ymfpci {
64180 spinlock_t reg_lock;
64181 spinlock_t voice_lock;
64182 wait_queue_head_t interrupt_sleep;
64183 - atomic_t interrupt_sleep_count;
64184 + atomic_unchecked_t interrupt_sleep_count;
64185 struct snd_info_entry *proc_entry;
64186 const struct firmware *dsp_microcode;
64187 const struct firmware *controller_microcode;
64188 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
64189 index aaccc5f..092d568 100644
64190 --- a/include/target/target_core_base.h
64191 +++ b/include/target/target_core_base.h
64192 @@ -447,7 +447,7 @@ struct t10_reservation_ops {
64193 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
64194 int (*t10_pr_register)(struct se_cmd *);
64195 int (*t10_pr_clear)(struct se_cmd *);
64196 -};
64197 +} __no_const;
64198
64199 struct t10_reservation {
64200 /* Reservation effects all target ports */
64201 @@ -576,7 +576,7 @@ struct se_cmd {
64202 atomic_t t_se_count;
64203 atomic_t t_task_cdbs_left;
64204 atomic_t t_task_cdbs_ex_left;
64205 - atomic_t t_task_cdbs_sent;
64206 + atomic_unchecked_t t_task_cdbs_sent;
64207 unsigned int transport_state;
64208 #define CMD_T_ABORTED (1 << 0)
64209 #define CMD_T_ACTIVE (1 << 1)
64210 @@ -802,7 +802,7 @@ struct se_device {
64211 spinlock_t stats_lock;
64212 /* Active commands on this virtual SE device */
64213 atomic_t simple_cmds;
64214 - atomic_t dev_ordered_id;
64215 + atomic_unchecked_t dev_ordered_id;
64216 atomic_t execute_tasks;
64217 atomic_t dev_ordered_sync;
64218 atomic_t dev_qf_count;
64219 diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
64220 new file mode 100644
64221 index 0000000..2efe49d
64222 --- /dev/null
64223 +++ b/include/trace/events/fs.h
64224 @@ -0,0 +1,53 @@
64225 +#undef TRACE_SYSTEM
64226 +#define TRACE_SYSTEM fs
64227 +
64228 +#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
64229 +#define _TRACE_FS_H
64230 +
64231 +#include <linux/fs.h>
64232 +#include <linux/tracepoint.h>
64233 +
64234 +TRACE_EVENT(do_sys_open,
64235 +
64236 + TP_PROTO(char *filename, int flags, int mode),
64237 +
64238 + TP_ARGS(filename, flags, mode),
64239 +
64240 + TP_STRUCT__entry(
64241 + __string( filename, filename )
64242 + __field( int, flags )
64243 + __field( int, mode )
64244 + ),
64245 +
64246 + TP_fast_assign(
64247 + __assign_str(filename, filename);
64248 + __entry->flags = flags;
64249 + __entry->mode = mode;
64250 + ),
64251 +
64252 + TP_printk("\"%s\" %x %o",
64253 + __get_str(filename), __entry->flags, __entry->mode)
64254 +);
64255 +
64256 +TRACE_EVENT(open_exec,
64257 +
64258 + TP_PROTO(const char *filename),
64259 +
64260 + TP_ARGS(filename),
64261 +
64262 + TP_STRUCT__entry(
64263 + __string( filename, filename )
64264 + ),
64265 +
64266 + TP_fast_assign(
64267 + __assign_str(filename, filename);
64268 + ),
64269 +
64270 + TP_printk("\"%s\"",
64271 + __get_str(filename))
64272 +);
64273 +
64274 +#endif /* _TRACE_FS_H */
64275 +
64276 +/* This part must be outside protection */
64277 +#include <trace/define_trace.h>
64278 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
64279 index 1c09820..7f5ec79 100644
64280 --- a/include/trace/events/irq.h
64281 +++ b/include/trace/events/irq.h
64282 @@ -36,7 +36,7 @@ struct softirq_action;
64283 */
64284 TRACE_EVENT(irq_handler_entry,
64285
64286 - TP_PROTO(int irq, struct irqaction *action),
64287 + TP_PROTO(int irq, const struct irqaction *action),
64288
64289 TP_ARGS(irq, action),
64290
64291 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
64292 */
64293 TRACE_EVENT(irq_handler_exit,
64294
64295 - TP_PROTO(int irq, struct irqaction *action, int ret),
64296 + TP_PROTO(int irq, const struct irqaction *action, int ret),
64297
64298 TP_ARGS(irq, action, ret),
64299
64300 diff --git a/include/video/udlfb.h b/include/video/udlfb.h
64301 index f9466fa..f4e2b81 100644
64302 --- a/include/video/udlfb.h
64303 +++ b/include/video/udlfb.h
64304 @@ -53,10 +53,10 @@ struct dlfb_data {
64305 u32 pseudo_palette[256];
64306 int blank_mode; /*one of FB_BLANK_ */
64307 /* blit-only rendering path metrics, exposed through sysfs */
64308 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
64309 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
64310 - atomic_t bytes_sent; /* to usb, after compression including overhead */
64311 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
64312 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
64313 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
64314 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
64315 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
64316 };
64317
64318 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
64319 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
64320 index 0993a22..32ba2fe 100644
64321 --- a/include/video/uvesafb.h
64322 +++ b/include/video/uvesafb.h
64323 @@ -177,6 +177,7 @@ struct uvesafb_par {
64324 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
64325 u8 pmi_setpal; /* PMI for palette changes */
64326 u16 *pmi_base; /* protected mode interface location */
64327 + u8 *pmi_code; /* protected mode code location */
64328 void *pmi_start;
64329 void *pmi_pal;
64330 u8 *vbe_state_orig; /*
64331 diff --git a/init/Kconfig b/init/Kconfig
64332 index 6cfd71d..16006e6 100644
64333 --- a/init/Kconfig
64334 +++ b/init/Kconfig
64335 @@ -790,6 +790,7 @@ endif # CGROUPS
64336
64337 config CHECKPOINT_RESTORE
64338 bool "Checkpoint/restore support" if EXPERT
64339 + depends on !GRKERNSEC
64340 default n
64341 help
64342 Enables additional kernel features in a sake of checkpoint/restore.
64343 @@ -1240,7 +1241,7 @@ config SLUB_DEBUG
64344
64345 config COMPAT_BRK
64346 bool "Disable heap randomization"
64347 - default y
64348 + default n
64349 help
64350 Randomizing heap placement makes heap exploits harder, but it
64351 also breaks ancient binaries (including anything libc5 based).
64352 @@ -1423,7 +1424,7 @@ config INIT_ALL_POSSIBLE
64353 config STOP_MACHINE
64354 bool
64355 default y
64356 - depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
64357 + depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
64358 help
64359 Need stop_machine() primitive.
64360
64361 diff --git a/init/do_mounts.c b/init/do_mounts.c
64362 index 42b0707..c06eef4 100644
64363 --- a/init/do_mounts.c
64364 +++ b/init/do_mounts.c
64365 @@ -326,11 +326,11 @@ static void __init get_fs_names(char *page)
64366 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
64367 {
64368 struct super_block *s;
64369 - int err = sys_mount(name, "/root", fs, flags, data);
64370 + int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
64371 if (err)
64372 return err;
64373
64374 - sys_chdir((const char __user __force *)"/root");
64375 + sys_chdir((const char __force_user *)"/root");
64376 s = current->fs->pwd.dentry->d_sb;
64377 ROOT_DEV = s->s_dev;
64378 printk(KERN_INFO
64379 @@ -450,18 +450,18 @@ void __init change_floppy(char *fmt, ...)
64380 va_start(args, fmt);
64381 vsprintf(buf, fmt, args);
64382 va_end(args);
64383 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
64384 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
64385 if (fd >= 0) {
64386 sys_ioctl(fd, FDEJECT, 0);
64387 sys_close(fd);
64388 }
64389 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
64390 - fd = sys_open("/dev/console", O_RDWR, 0);
64391 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
64392 if (fd >= 0) {
64393 sys_ioctl(fd, TCGETS, (long)&termios);
64394 termios.c_lflag &= ~ICANON;
64395 sys_ioctl(fd, TCSETSF, (long)&termios);
64396 - sys_read(fd, &c, 1);
64397 + sys_read(fd, (char __user *)&c, 1);
64398 termios.c_lflag |= ICANON;
64399 sys_ioctl(fd, TCSETSF, (long)&termios);
64400 sys_close(fd);
64401 @@ -555,6 +555,6 @@ void __init prepare_namespace(void)
64402 mount_root();
64403 out:
64404 devtmpfs_mount("dev");
64405 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
64406 - sys_chroot((const char __user __force *)".");
64407 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
64408 + sys_chroot((const char __force_user *)".");
64409 }
64410 diff --git a/init/do_mounts.h b/init/do_mounts.h
64411 index f5b978a..69dbfe8 100644
64412 --- a/init/do_mounts.h
64413 +++ b/init/do_mounts.h
64414 @@ -15,15 +15,15 @@ extern int root_mountflags;
64415
64416 static inline int create_dev(char *name, dev_t dev)
64417 {
64418 - sys_unlink(name);
64419 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
64420 + sys_unlink((char __force_user *)name);
64421 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
64422 }
64423
64424 #if BITS_PER_LONG == 32
64425 static inline u32 bstat(char *name)
64426 {
64427 struct stat64 stat;
64428 - if (sys_stat64(name, &stat) != 0)
64429 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
64430 return 0;
64431 if (!S_ISBLK(stat.st_mode))
64432 return 0;
64433 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
64434 static inline u32 bstat(char *name)
64435 {
64436 struct stat stat;
64437 - if (sys_newstat(name, &stat) != 0)
64438 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
64439 return 0;
64440 if (!S_ISBLK(stat.st_mode))
64441 return 0;
64442 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
64443 index 9047330..de0d1fb 100644
64444 --- a/init/do_mounts_initrd.c
64445 +++ b/init/do_mounts_initrd.c
64446 @@ -43,13 +43,13 @@ static void __init handle_initrd(void)
64447 create_dev("/dev/root.old", Root_RAM0);
64448 /* mount initrd on rootfs' /root */
64449 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
64450 - sys_mkdir("/old", 0700);
64451 - root_fd = sys_open("/", 0, 0);
64452 - old_fd = sys_open("/old", 0, 0);
64453 + sys_mkdir((const char __force_user *)"/old", 0700);
64454 + root_fd = sys_open((const char __force_user *)"/", 0, 0);
64455 + old_fd = sys_open((const char __force_user *)"/old", 0, 0);
64456 /* move initrd over / and chdir/chroot in initrd root */
64457 - sys_chdir("/root");
64458 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
64459 - sys_chroot(".");
64460 + sys_chdir((const char __force_user *)"/root");
64461 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
64462 + sys_chroot((const char __force_user *)".");
64463
64464 /*
64465 * In case that a resume from disk is carried out by linuxrc or one of
64466 @@ -66,15 +66,15 @@ static void __init handle_initrd(void)
64467
64468 /* move initrd to rootfs' /old */
64469 sys_fchdir(old_fd);
64470 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
64471 + sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
64472 /* switch root and cwd back to / of rootfs */
64473 sys_fchdir(root_fd);
64474 - sys_chroot(".");
64475 + sys_chroot((const char __force_user *)".");
64476 sys_close(old_fd);
64477 sys_close(root_fd);
64478
64479 if (new_decode_dev(real_root_dev) == Root_RAM0) {
64480 - sys_chdir("/old");
64481 + sys_chdir((const char __force_user *)"/old");
64482 return;
64483 }
64484
64485 @@ -82,17 +82,17 @@ static void __init handle_initrd(void)
64486 mount_root();
64487
64488 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
64489 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
64490 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
64491 if (!error)
64492 printk("okay\n");
64493 else {
64494 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
64495 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
64496 if (error == -ENOENT)
64497 printk("/initrd does not exist. Ignored.\n");
64498 else
64499 printk("failed\n");
64500 printk(KERN_NOTICE "Unmounting old root\n");
64501 - sys_umount("/old", MNT_DETACH);
64502 + sys_umount((char __force_user *)"/old", MNT_DETACH);
64503 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
64504 if (fd < 0) {
64505 error = fd;
64506 @@ -115,11 +115,11 @@ int __init initrd_load(void)
64507 * mounted in the normal path.
64508 */
64509 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
64510 - sys_unlink("/initrd.image");
64511 + sys_unlink((const char __force_user *)"/initrd.image");
64512 handle_initrd();
64513 return 1;
64514 }
64515 }
64516 - sys_unlink("/initrd.image");
64517 + sys_unlink((const char __force_user *)"/initrd.image");
64518 return 0;
64519 }
64520 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
64521 index 32c4799..c27ee74 100644
64522 --- a/init/do_mounts_md.c
64523 +++ b/init/do_mounts_md.c
64524 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
64525 partitioned ? "_d" : "", minor,
64526 md_setup_args[ent].device_names);
64527
64528 - fd = sys_open(name, 0, 0);
64529 + fd = sys_open((char __force_user *)name, 0, 0);
64530 if (fd < 0) {
64531 printk(KERN_ERR "md: open failed - cannot start "
64532 "array %s\n", name);
64533 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
64534 * array without it
64535 */
64536 sys_close(fd);
64537 - fd = sys_open(name, 0, 0);
64538 + fd = sys_open((char __force_user *)name, 0, 0);
64539 sys_ioctl(fd, BLKRRPART, 0);
64540 }
64541 sys_close(fd);
64542 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
64543
64544 wait_for_device_probe();
64545
64546 - fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
64547 + fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
64548 if (fd >= 0) {
64549 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
64550 sys_close(fd);
64551 diff --git a/init/initramfs.c b/init/initramfs.c
64552 index 8216c30..25e8e32 100644
64553 --- a/init/initramfs.c
64554 +++ b/init/initramfs.c
64555 @@ -74,7 +74,7 @@ static void __init free_hash(void)
64556 }
64557 }
64558
64559 -static long __init do_utime(char __user *filename, time_t mtime)
64560 +static long __init do_utime(__force char __user *filename, time_t mtime)
64561 {
64562 struct timespec t[2];
64563
64564 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
64565 struct dir_entry *de, *tmp;
64566 list_for_each_entry_safe(de, tmp, &dir_list, list) {
64567 list_del(&de->list);
64568 - do_utime(de->name, de->mtime);
64569 + do_utime((char __force_user *)de->name, de->mtime);
64570 kfree(de->name);
64571 kfree(de);
64572 }
64573 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
64574 if (nlink >= 2) {
64575 char *old = find_link(major, minor, ino, mode, collected);
64576 if (old)
64577 - return (sys_link(old, collected) < 0) ? -1 : 1;
64578 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
64579 }
64580 return 0;
64581 }
64582 @@ -280,11 +280,11 @@ static void __init clean_path(char *path, umode_t mode)
64583 {
64584 struct stat st;
64585
64586 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
64587 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
64588 if (S_ISDIR(st.st_mode))
64589 - sys_rmdir(path);
64590 + sys_rmdir((char __force_user *)path);
64591 else
64592 - sys_unlink(path);
64593 + sys_unlink((char __force_user *)path);
64594 }
64595 }
64596
64597 @@ -305,7 +305,7 @@ static int __init do_name(void)
64598 int openflags = O_WRONLY|O_CREAT;
64599 if (ml != 1)
64600 openflags |= O_TRUNC;
64601 - wfd = sys_open(collected, openflags, mode);
64602 + wfd = sys_open((char __force_user *)collected, openflags, mode);
64603
64604 if (wfd >= 0) {
64605 sys_fchown(wfd, uid, gid);
64606 @@ -317,17 +317,17 @@ static int __init do_name(void)
64607 }
64608 }
64609 } else if (S_ISDIR(mode)) {
64610 - sys_mkdir(collected, mode);
64611 - sys_chown(collected, uid, gid);
64612 - sys_chmod(collected, mode);
64613 + sys_mkdir((char __force_user *)collected, mode);
64614 + sys_chown((char __force_user *)collected, uid, gid);
64615 + sys_chmod((char __force_user *)collected, mode);
64616 dir_add(collected, mtime);
64617 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
64618 S_ISFIFO(mode) || S_ISSOCK(mode)) {
64619 if (maybe_link() == 0) {
64620 - sys_mknod(collected, mode, rdev);
64621 - sys_chown(collected, uid, gid);
64622 - sys_chmod(collected, mode);
64623 - do_utime(collected, mtime);
64624 + sys_mknod((char __force_user *)collected, mode, rdev);
64625 + sys_chown((char __force_user *)collected, uid, gid);
64626 + sys_chmod((char __force_user *)collected, mode);
64627 + do_utime((char __force_user *)collected, mtime);
64628 }
64629 }
64630 return 0;
64631 @@ -336,15 +336,15 @@ static int __init do_name(void)
64632 static int __init do_copy(void)
64633 {
64634 if (count >= body_len) {
64635 - sys_write(wfd, victim, body_len);
64636 + sys_write(wfd, (char __force_user *)victim, body_len);
64637 sys_close(wfd);
64638 - do_utime(vcollected, mtime);
64639 + do_utime((char __force_user *)vcollected, mtime);
64640 kfree(vcollected);
64641 eat(body_len);
64642 state = SkipIt;
64643 return 0;
64644 } else {
64645 - sys_write(wfd, victim, count);
64646 + sys_write(wfd, (char __force_user *)victim, count);
64647 body_len -= count;
64648 eat(count);
64649 return 1;
64650 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
64651 {
64652 collected[N_ALIGN(name_len) + body_len] = '\0';
64653 clean_path(collected, 0);
64654 - sys_symlink(collected + N_ALIGN(name_len), collected);
64655 - sys_lchown(collected, uid, gid);
64656 - do_utime(collected, mtime);
64657 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
64658 + sys_lchown((char __force_user *)collected, uid, gid);
64659 + do_utime((char __force_user *)collected, mtime);
64660 state = SkipIt;
64661 next_state = Reset;
64662 return 0;
64663 diff --git a/init/main.c b/init/main.c
64664 index b08c5f7..bf65a52 100644
64665 --- a/init/main.c
64666 +++ b/init/main.c
64667 @@ -95,6 +95,8 @@ static inline void mark_rodata_ro(void) { }
64668 extern void tc_init(void);
64669 #endif
64670
64671 +extern void grsecurity_init(void);
64672 +
64673 /*
64674 * Debug helper: via this flag we know that we are in 'early bootup code'
64675 * where only the boot processor is running with IRQ disabled. This means
64676 @@ -148,6 +150,49 @@ static int __init set_reset_devices(char *str)
64677
64678 __setup("reset_devices", set_reset_devices);
64679
64680 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
64681 +extern char pax_enter_kernel_user[];
64682 +extern char pax_exit_kernel_user[];
64683 +extern pgdval_t clone_pgd_mask;
64684 +#endif
64685 +
64686 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
64687 +static int __init setup_pax_nouderef(char *str)
64688 +{
64689 +#ifdef CONFIG_X86_32
64690 + unsigned int cpu;
64691 + struct desc_struct *gdt;
64692 +
64693 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
64694 + gdt = get_cpu_gdt_table(cpu);
64695 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
64696 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
64697 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
64698 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
64699 + }
64700 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
64701 +#else
64702 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
64703 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
64704 + clone_pgd_mask = ~(pgdval_t)0UL;
64705 +#endif
64706 +
64707 + return 0;
64708 +}
64709 +early_param("pax_nouderef", setup_pax_nouderef);
64710 +#endif
64711 +
64712 +#ifdef CONFIG_PAX_SOFTMODE
64713 +int pax_softmode;
64714 +
64715 +static int __init setup_pax_softmode(char *str)
64716 +{
64717 + get_option(&str, &pax_softmode);
64718 + return 1;
64719 +}
64720 +__setup("pax_softmode=", setup_pax_softmode);
64721 +#endif
64722 +
64723 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
64724 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
64725 static const char *panic_later, *panic_param;
64726 @@ -674,6 +719,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
64727 {
64728 int count = preempt_count();
64729 int ret;
64730 + const char *msg1 = "", *msg2 = "";
64731
64732 if (initcall_debug)
64733 ret = do_one_initcall_debug(fn);
64734 @@ -686,15 +732,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
64735 sprintf(msgbuf, "error code %d ", ret);
64736
64737 if (preempt_count() != count) {
64738 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
64739 + msg1 = " preemption imbalance";
64740 preempt_count() = count;
64741 }
64742 if (irqs_disabled()) {
64743 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
64744 + msg2 = " disabled interrupts";
64745 local_irq_enable();
64746 }
64747 - if (msgbuf[0]) {
64748 - printk("initcall %pF returned with %s\n", fn, msgbuf);
64749 + if (msgbuf[0] || *msg1 || *msg2) {
64750 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
64751 }
64752
64753 return ret;
64754 @@ -747,8 +793,14 @@ static void __init do_initcall_level(int level)
64755 level, level,
64756 repair_env_string);
64757
64758 - for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
64759 + for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++) {
64760 do_one_initcall(*fn);
64761 +
64762 +#ifdef CONFIG_PAX_LATENT_ENTROPY
64763 + transfer_latent_entropy();
64764 +#endif
64765 +
64766 + }
64767 }
64768
64769 static void __init do_initcalls(void)
64770 @@ -782,8 +834,14 @@ static void __init do_pre_smp_initcalls(void)
64771 {
64772 initcall_t *fn;
64773
64774 - for (fn = __initcall_start; fn < __initcall0_start; fn++)
64775 + for (fn = __initcall_start; fn < __initcall0_start; fn++) {
64776 do_one_initcall(*fn);
64777 +
64778 +#ifdef CONFIG_PAX_LATENT_ENTROPY
64779 + transfer_latent_entropy();
64780 +#endif
64781 +
64782 + }
64783 }
64784
64785 static void run_init_process(const char *init_filename)
64786 @@ -865,7 +923,7 @@ static int __init kernel_init(void * unused)
64787 do_basic_setup();
64788
64789 /* Open the /dev/console on the rootfs, this should never fail */
64790 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
64791 + if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
64792 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
64793
64794 (void) sys_dup(0);
64795 @@ -878,11 +936,13 @@ static int __init kernel_init(void * unused)
64796 if (!ramdisk_execute_command)
64797 ramdisk_execute_command = "/init";
64798
64799 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
64800 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
64801 ramdisk_execute_command = NULL;
64802 prepare_namespace();
64803 }
64804
64805 + grsecurity_init();
64806 +
64807 /*
64808 * Ok, we have completed the initial bootup, and
64809 * we're essentially up and running. Get rid of the
64810 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
64811 index 28bd64d..c66b72a 100644
64812 --- a/ipc/mqueue.c
64813 +++ b/ipc/mqueue.c
64814 @@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
64815 mq_bytes = (mq_msg_tblsz +
64816 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
64817
64818 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
64819 spin_lock(&mq_lock);
64820 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
64821 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
64822 diff --git a/ipc/msg.c b/ipc/msg.c
64823 index 7385de2..a8180e08 100644
64824 --- a/ipc/msg.c
64825 +++ b/ipc/msg.c
64826 @@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
64827 return security_msg_queue_associate(msq, msgflg);
64828 }
64829
64830 +static struct ipc_ops msg_ops = {
64831 + .getnew = newque,
64832 + .associate = msg_security,
64833 + .more_checks = NULL
64834 +};
64835 +
64836 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
64837 {
64838 struct ipc_namespace *ns;
64839 - struct ipc_ops msg_ops;
64840 struct ipc_params msg_params;
64841
64842 ns = current->nsproxy->ipc_ns;
64843
64844 - msg_ops.getnew = newque;
64845 - msg_ops.associate = msg_security;
64846 - msg_ops.more_checks = NULL;
64847 -
64848 msg_params.key = key;
64849 msg_params.flg = msgflg;
64850
64851 diff --git a/ipc/sem.c b/ipc/sem.c
64852 index 5215a81..cfc0cac 100644
64853 --- a/ipc/sem.c
64854 +++ b/ipc/sem.c
64855 @@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
64856 return 0;
64857 }
64858
64859 +static struct ipc_ops sem_ops = {
64860 + .getnew = newary,
64861 + .associate = sem_security,
64862 + .more_checks = sem_more_checks
64863 +};
64864 +
64865 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
64866 {
64867 struct ipc_namespace *ns;
64868 - struct ipc_ops sem_ops;
64869 struct ipc_params sem_params;
64870
64871 ns = current->nsproxy->ipc_ns;
64872 @@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
64873 if (nsems < 0 || nsems > ns->sc_semmsl)
64874 return -EINVAL;
64875
64876 - sem_ops.getnew = newary;
64877 - sem_ops.associate = sem_security;
64878 - sem_ops.more_checks = sem_more_checks;
64879 -
64880 sem_params.key = key;
64881 sem_params.flg = semflg;
64882 sem_params.u.nsems = nsems;
64883 diff --git a/ipc/shm.c b/ipc/shm.c
64884 index 406c5b2..bc66d67 100644
64885 --- a/ipc/shm.c
64886 +++ b/ipc/shm.c
64887 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
64888 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
64889 #endif
64890
64891 +#ifdef CONFIG_GRKERNSEC
64892 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64893 + const time_t shm_createtime, const uid_t cuid,
64894 + const int shmid);
64895 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64896 + const time_t shm_createtime);
64897 +#endif
64898 +
64899 void shm_init_ns(struct ipc_namespace *ns)
64900 {
64901 ns->shm_ctlmax = SHMMAX;
64902 @@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
64903 shp->shm_lprid = 0;
64904 shp->shm_atim = shp->shm_dtim = 0;
64905 shp->shm_ctim = get_seconds();
64906 +#ifdef CONFIG_GRKERNSEC
64907 + {
64908 + struct timespec timeval;
64909 + do_posix_clock_monotonic_gettime(&timeval);
64910 +
64911 + shp->shm_createtime = timeval.tv_sec;
64912 + }
64913 +#endif
64914 shp->shm_segsz = size;
64915 shp->shm_nattch = 0;
64916 shp->shm_file = file;
64917 @@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
64918 return 0;
64919 }
64920
64921 +static struct ipc_ops shm_ops = {
64922 + .getnew = newseg,
64923 + .associate = shm_security,
64924 + .more_checks = shm_more_checks
64925 +};
64926 +
64927 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
64928 {
64929 struct ipc_namespace *ns;
64930 - struct ipc_ops shm_ops;
64931 struct ipc_params shm_params;
64932
64933 ns = current->nsproxy->ipc_ns;
64934
64935 - shm_ops.getnew = newseg;
64936 - shm_ops.associate = shm_security;
64937 - shm_ops.more_checks = shm_more_checks;
64938 -
64939 shm_params.key = key;
64940 shm_params.flg = shmflg;
64941 shm_params.u.size = size;
64942 @@ -988,6 +1005,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
64943 f_mode = FMODE_READ | FMODE_WRITE;
64944 }
64945 if (shmflg & SHM_EXEC) {
64946 +
64947 +#ifdef CONFIG_PAX_MPROTECT
64948 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
64949 + goto out;
64950 +#endif
64951 +
64952 prot |= PROT_EXEC;
64953 acc_mode |= S_IXUGO;
64954 }
64955 @@ -1011,9 +1034,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
64956 if (err)
64957 goto out_unlock;
64958
64959 +#ifdef CONFIG_GRKERNSEC
64960 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
64961 + shp->shm_perm.cuid, shmid) ||
64962 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
64963 + err = -EACCES;
64964 + goto out_unlock;
64965 + }
64966 +#endif
64967 +
64968 path = shp->shm_file->f_path;
64969 path_get(&path);
64970 shp->shm_nattch++;
64971 +#ifdef CONFIG_GRKERNSEC
64972 + shp->shm_lapid = current->pid;
64973 +#endif
64974 size = i_size_read(path.dentry->d_inode);
64975 shm_unlock(shp);
64976
64977 diff --git a/kernel/acct.c b/kernel/acct.c
64978 index 02e6167..54824f7 100644
64979 --- a/kernel/acct.c
64980 +++ b/kernel/acct.c
64981 @@ -550,7 +550,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
64982 */
64983 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
64984 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
64985 - file->f_op->write(file, (char *)&ac,
64986 + file->f_op->write(file, (char __force_user *)&ac,
64987 sizeof(acct_t), &file->f_pos);
64988 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
64989 set_fs(fs);
64990 diff --git a/kernel/audit.c b/kernel/audit.c
64991 index 1c7f2c6..9ba5359 100644
64992 --- a/kernel/audit.c
64993 +++ b/kernel/audit.c
64994 @@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
64995 3) suppressed due to audit_rate_limit
64996 4) suppressed due to audit_backlog_limit
64997 */
64998 -static atomic_t audit_lost = ATOMIC_INIT(0);
64999 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
65000
65001 /* The netlink socket. */
65002 static struct sock *audit_sock;
65003 @@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
65004 unsigned long now;
65005 int print;
65006
65007 - atomic_inc(&audit_lost);
65008 + atomic_inc_unchecked(&audit_lost);
65009
65010 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
65011
65012 @@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
65013 printk(KERN_WARNING
65014 "audit: audit_lost=%d audit_rate_limit=%d "
65015 "audit_backlog_limit=%d\n",
65016 - atomic_read(&audit_lost),
65017 + atomic_read_unchecked(&audit_lost),
65018 audit_rate_limit,
65019 audit_backlog_limit);
65020 audit_panic(message);
65021 @@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
65022 status_set.pid = audit_pid;
65023 status_set.rate_limit = audit_rate_limit;
65024 status_set.backlog_limit = audit_backlog_limit;
65025 - status_set.lost = atomic_read(&audit_lost);
65026 + status_set.lost = atomic_read_unchecked(&audit_lost);
65027 status_set.backlog = skb_queue_len(&audit_skb_queue);
65028 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
65029 &status_set, sizeof(status_set));
65030 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
65031 index af1de0f..06dfe57 100644
65032 --- a/kernel/auditsc.c
65033 +++ b/kernel/auditsc.c
65034 @@ -2288,7 +2288,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
65035 }
65036
65037 /* global counter which is incremented every time something logs in */
65038 -static atomic_t session_id = ATOMIC_INIT(0);
65039 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
65040
65041 /**
65042 * audit_set_loginuid - set current task's audit_context loginuid
65043 @@ -2312,7 +2312,7 @@ int audit_set_loginuid(uid_t loginuid)
65044 return -EPERM;
65045 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
65046
65047 - sessionid = atomic_inc_return(&session_id);
65048 + sessionid = atomic_inc_return_unchecked(&session_id);
65049 if (context && context->in_syscall) {
65050 struct audit_buffer *ab;
65051
65052 diff --git a/kernel/capability.c b/kernel/capability.c
65053 index 3f1adb6..c564db0 100644
65054 --- a/kernel/capability.c
65055 +++ b/kernel/capability.c
65056 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
65057 * before modification is attempted and the application
65058 * fails.
65059 */
65060 + if (tocopy > ARRAY_SIZE(kdata))
65061 + return -EFAULT;
65062 +
65063 if (copy_to_user(dataptr, kdata, tocopy
65064 * sizeof(struct __user_cap_data_struct))) {
65065 return -EFAULT;
65066 @@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
65067 int ret;
65068
65069 rcu_read_lock();
65070 - ret = security_capable(__task_cred(t), ns, cap);
65071 + ret = security_capable(__task_cred(t), ns, cap) == 0 &&
65072 + gr_task_is_capable(t, __task_cred(t), cap);
65073 rcu_read_unlock();
65074
65075 - return (ret == 0);
65076 + return ret;
65077 }
65078
65079 /**
65080 @@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
65081 int ret;
65082
65083 rcu_read_lock();
65084 - ret = security_capable_noaudit(__task_cred(t), ns, cap);
65085 + ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
65086 rcu_read_unlock();
65087
65088 - return (ret == 0);
65089 + return ret;
65090 }
65091
65092 /**
65093 @@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
65094 BUG();
65095 }
65096
65097 - if (security_capable(current_cred(), ns, cap) == 0) {
65098 + if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
65099 current->flags |= PF_SUPERPRIV;
65100 return true;
65101 }
65102 @@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
65103 }
65104 EXPORT_SYMBOL(ns_capable);
65105
65106 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
65107 +{
65108 + if (unlikely(!cap_valid(cap))) {
65109 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
65110 + BUG();
65111 + }
65112 +
65113 + if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
65114 + current->flags |= PF_SUPERPRIV;
65115 + return true;
65116 + }
65117 + return false;
65118 +}
65119 +EXPORT_SYMBOL(ns_capable_nolog);
65120 +
65121 /**
65122 * capable - Determine if the current task has a superior capability in effect
65123 * @cap: The capability to be tested for
65124 @@ -408,6 +427,12 @@ bool capable(int cap)
65125 }
65126 EXPORT_SYMBOL(capable);
65127
65128 +bool capable_nolog(int cap)
65129 +{
65130 + return ns_capable_nolog(&init_user_ns, cap);
65131 +}
65132 +EXPORT_SYMBOL(capable_nolog);
65133 +
65134 /**
65135 * nsown_capable - Check superior capability to one's own user_ns
65136 * @cap: The capability in question
65137 diff --git a/kernel/compat.c b/kernel/compat.c
65138 index d2c67aa..a629b2e 100644
65139 --- a/kernel/compat.c
65140 +++ b/kernel/compat.c
65141 @@ -13,6 +13,7 @@
65142
65143 #include <linux/linkage.h>
65144 #include <linux/compat.h>
65145 +#include <linux/module.h>
65146 #include <linux/errno.h>
65147 #include <linux/time.h>
65148 #include <linux/signal.h>
65149 @@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
65150 mm_segment_t oldfs;
65151 long ret;
65152
65153 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
65154 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
65155 oldfs = get_fs();
65156 set_fs(KERNEL_DS);
65157 ret = hrtimer_nanosleep_restart(restart);
65158 @@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
65159 oldfs = get_fs();
65160 set_fs(KERNEL_DS);
65161 ret = hrtimer_nanosleep(&tu,
65162 - rmtp ? (struct timespec __user *)&rmt : NULL,
65163 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
65164 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
65165 set_fs(oldfs);
65166
65167 @@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
65168 mm_segment_t old_fs = get_fs();
65169
65170 set_fs(KERNEL_DS);
65171 - ret = sys_sigpending((old_sigset_t __user *) &s);
65172 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
65173 set_fs(old_fs);
65174 if (ret == 0)
65175 ret = put_user(s, set);
65176 @@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
65177 mm_segment_t old_fs = get_fs();
65178
65179 set_fs(KERNEL_DS);
65180 - ret = sys_old_getrlimit(resource, &r);
65181 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
65182 set_fs(old_fs);
65183
65184 if (!ret) {
65185 @@ -523,7 +524,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
65186 mm_segment_t old_fs = get_fs();
65187
65188 set_fs(KERNEL_DS);
65189 - ret = sys_getrusage(who, (struct rusage __user *) &r);
65190 + ret = sys_getrusage(who, (struct rusage __force_user *) &r);
65191 set_fs(old_fs);
65192
65193 if (ret)
65194 @@ -550,8 +551,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
65195 set_fs (KERNEL_DS);
65196 ret = sys_wait4(pid,
65197 (stat_addr ?
65198 - (unsigned int __user *) &status : NULL),
65199 - options, (struct rusage __user *) &r);
65200 + (unsigned int __force_user *) &status : NULL),
65201 + options, (struct rusage __force_user *) &r);
65202 set_fs (old_fs);
65203
65204 if (ret > 0) {
65205 @@ -576,8 +577,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
65206 memset(&info, 0, sizeof(info));
65207
65208 set_fs(KERNEL_DS);
65209 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
65210 - uru ? (struct rusage __user *)&ru : NULL);
65211 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
65212 + uru ? (struct rusage __force_user *)&ru : NULL);
65213 set_fs(old_fs);
65214
65215 if ((ret < 0) || (info.si_signo == 0))
65216 @@ -707,8 +708,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
65217 oldfs = get_fs();
65218 set_fs(KERNEL_DS);
65219 err = sys_timer_settime(timer_id, flags,
65220 - (struct itimerspec __user *) &newts,
65221 - (struct itimerspec __user *) &oldts);
65222 + (struct itimerspec __force_user *) &newts,
65223 + (struct itimerspec __force_user *) &oldts);
65224 set_fs(oldfs);
65225 if (!err && old && put_compat_itimerspec(old, &oldts))
65226 return -EFAULT;
65227 @@ -725,7 +726,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
65228 oldfs = get_fs();
65229 set_fs(KERNEL_DS);
65230 err = sys_timer_gettime(timer_id,
65231 - (struct itimerspec __user *) &ts);
65232 + (struct itimerspec __force_user *) &ts);
65233 set_fs(oldfs);
65234 if (!err && put_compat_itimerspec(setting, &ts))
65235 return -EFAULT;
65236 @@ -744,7 +745,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
65237 oldfs = get_fs();
65238 set_fs(KERNEL_DS);
65239 err = sys_clock_settime(which_clock,
65240 - (struct timespec __user *) &ts);
65241 + (struct timespec __force_user *) &ts);
65242 set_fs(oldfs);
65243 return err;
65244 }
65245 @@ -759,7 +760,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
65246 oldfs = get_fs();
65247 set_fs(KERNEL_DS);
65248 err = sys_clock_gettime(which_clock,
65249 - (struct timespec __user *) &ts);
65250 + (struct timespec __force_user *) &ts);
65251 set_fs(oldfs);
65252 if (!err && put_compat_timespec(&ts, tp))
65253 return -EFAULT;
65254 @@ -779,7 +780,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
65255
65256 oldfs = get_fs();
65257 set_fs(KERNEL_DS);
65258 - ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
65259 + ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
65260 set_fs(oldfs);
65261
65262 err = compat_put_timex(utp, &txc);
65263 @@ -799,7 +800,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
65264 oldfs = get_fs();
65265 set_fs(KERNEL_DS);
65266 err = sys_clock_getres(which_clock,
65267 - (struct timespec __user *) &ts);
65268 + (struct timespec __force_user *) &ts);
65269 set_fs(oldfs);
65270 if (!err && tp && put_compat_timespec(&ts, tp))
65271 return -EFAULT;
65272 @@ -811,9 +812,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
65273 long err;
65274 mm_segment_t oldfs;
65275 struct timespec tu;
65276 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
65277 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
65278
65279 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
65280 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
65281 oldfs = get_fs();
65282 set_fs(KERNEL_DS);
65283 err = clock_nanosleep_restart(restart);
65284 @@ -845,8 +846,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
65285 oldfs = get_fs();
65286 set_fs(KERNEL_DS);
65287 err = sys_clock_nanosleep(which_clock, flags,
65288 - (struct timespec __user *) &in,
65289 - (struct timespec __user *) &out);
65290 + (struct timespec __force_user *) &in,
65291 + (struct timespec __force_user *) &out);
65292 set_fs(oldfs);
65293
65294 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
65295 diff --git a/kernel/configs.c b/kernel/configs.c
65296 index 42e8fa0..9e7406b 100644
65297 --- a/kernel/configs.c
65298 +++ b/kernel/configs.c
65299 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
65300 struct proc_dir_entry *entry;
65301
65302 /* create the current config file */
65303 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
65304 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
65305 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
65306 + &ikconfig_file_ops);
65307 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65308 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
65309 + &ikconfig_file_ops);
65310 +#endif
65311 +#else
65312 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
65313 &ikconfig_file_ops);
65314 +#endif
65315 +
65316 if (!entry)
65317 return -ENOMEM;
65318
65319 diff --git a/kernel/cred.c b/kernel/cred.c
65320 index e70683d..27761b6 100644
65321 --- a/kernel/cred.c
65322 +++ b/kernel/cred.c
65323 @@ -205,6 +205,15 @@ void exit_creds(struct task_struct *tsk)
65324 validate_creds(cred);
65325 put_cred(cred);
65326 }
65327 +
65328 +#ifdef CONFIG_GRKERNSEC_SETXID
65329 + cred = (struct cred *) tsk->delayed_cred;
65330 + if (cred) {
65331 + tsk->delayed_cred = NULL;
65332 + validate_creds(cred);
65333 + put_cred(cred);
65334 + }
65335 +#endif
65336 }
65337
65338 /**
65339 @@ -473,7 +482,7 @@ error_put:
65340 * Always returns 0 thus allowing this function to be tail-called at the end
65341 * of, say, sys_setgid().
65342 */
65343 -int commit_creds(struct cred *new)
65344 +static int __commit_creds(struct cred *new)
65345 {
65346 struct task_struct *task = current;
65347 const struct cred *old = task->real_cred;
65348 @@ -492,6 +501,8 @@ int commit_creds(struct cred *new)
65349
65350 get_cred(new); /* we will require a ref for the subj creds too */
65351
65352 + gr_set_role_label(task, new->uid, new->gid);
65353 +
65354 /* dumpability changes */
65355 if (old->euid != new->euid ||
65356 old->egid != new->egid ||
65357 @@ -541,6 +552,101 @@ int commit_creds(struct cred *new)
65358 put_cred(old);
65359 return 0;
65360 }
65361 +#ifdef CONFIG_GRKERNSEC_SETXID
65362 +extern int set_user(struct cred *new);
65363 +
65364 +void gr_delayed_cred_worker(void)
65365 +{
65366 + const struct cred *new = current->delayed_cred;
65367 + struct cred *ncred;
65368 +
65369 + current->delayed_cred = NULL;
65370 +
65371 + if (current_uid() && new != NULL) {
65372 + // from doing get_cred on it when queueing this
65373 + put_cred(new);
65374 + return;
65375 + } else if (new == NULL)
65376 + return;
65377 +
65378 + ncred = prepare_creds();
65379 + if (!ncred)
65380 + goto die;
65381 + // uids
65382 + ncred->uid = new->uid;
65383 + ncred->euid = new->euid;
65384 + ncred->suid = new->suid;
65385 + ncred->fsuid = new->fsuid;
65386 + // gids
65387 + ncred->gid = new->gid;
65388 + ncred->egid = new->egid;
65389 + ncred->sgid = new->sgid;
65390 + ncred->fsgid = new->fsgid;
65391 + // groups
65392 + if (set_groups(ncred, new->group_info) < 0) {
65393 + abort_creds(ncred);
65394 + goto die;
65395 + }
65396 + // caps
65397 + ncred->securebits = new->securebits;
65398 + ncred->cap_inheritable = new->cap_inheritable;
65399 + ncred->cap_permitted = new->cap_permitted;
65400 + ncred->cap_effective = new->cap_effective;
65401 + ncred->cap_bset = new->cap_bset;
65402 +
65403 + if (set_user(ncred)) {
65404 + abort_creds(ncred);
65405 + goto die;
65406 + }
65407 +
65408 + // from doing get_cred on it when queueing this
65409 + put_cred(new);
65410 +
65411 + __commit_creds(ncred);
65412 + return;
65413 +die:
65414 + // from doing get_cred on it when queueing this
65415 + put_cred(new);
65416 + do_group_exit(SIGKILL);
65417 +}
65418 +#endif
65419 +
65420 +int commit_creds(struct cred *new)
65421 +{
65422 +#ifdef CONFIG_GRKERNSEC_SETXID
65423 + int ret;
65424 + int schedule_it = 0;
65425 + struct task_struct *t;
65426 +
65427 + /* we won't get called with tasklist_lock held for writing
65428 + and interrupts disabled as the cred struct in that case is
65429 + init_cred
65430 + */
65431 + if (grsec_enable_setxid && !current_is_single_threaded() &&
65432 + !current_uid() && new->uid) {
65433 + schedule_it = 1;
65434 + }
65435 + ret = __commit_creds(new);
65436 + if (schedule_it) {
65437 + rcu_read_lock();
65438 + read_lock(&tasklist_lock);
65439 + for (t = next_thread(current); t != current;
65440 + t = next_thread(t)) {
65441 + if (t->delayed_cred == NULL) {
65442 + t->delayed_cred = get_cred(new);
65443 + set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
65444 + set_tsk_need_resched(t);
65445 + }
65446 + }
65447 + read_unlock(&tasklist_lock);
65448 + rcu_read_unlock();
65449 + }
65450 + return ret;
65451 +#else
65452 + return __commit_creds(new);
65453 +#endif
65454 +}
65455 +
65456 EXPORT_SYMBOL(commit_creds);
65457
65458 /**
65459 diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
65460 index 0557f24..1a00d9a 100644
65461 --- a/kernel/debug/debug_core.c
65462 +++ b/kernel/debug/debug_core.c
65463 @@ -122,7 +122,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
65464 */
65465 static atomic_t masters_in_kgdb;
65466 static atomic_t slaves_in_kgdb;
65467 -static atomic_t kgdb_break_tasklet_var;
65468 +static atomic_unchecked_t kgdb_break_tasklet_var;
65469 atomic_t kgdb_setting_breakpoint;
65470
65471 struct task_struct *kgdb_usethread;
65472 @@ -132,7 +132,7 @@ int kgdb_single_step;
65473 static pid_t kgdb_sstep_pid;
65474
65475 /* to keep track of the CPU which is doing the single stepping*/
65476 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
65477 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
65478
65479 /*
65480 * If you are debugging a problem where roundup (the collection of
65481 @@ -540,7 +540,7 @@ return_normal:
65482 * kernel will only try for the value of sstep_tries before
65483 * giving up and continuing on.
65484 */
65485 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
65486 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
65487 (kgdb_info[cpu].task &&
65488 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
65489 atomic_set(&kgdb_active, -1);
65490 @@ -634,8 +634,8 @@ cpu_master_loop:
65491 }
65492
65493 kgdb_restore:
65494 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
65495 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
65496 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
65497 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
65498 if (kgdb_info[sstep_cpu].task)
65499 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
65500 else
65501 @@ -861,18 +861,18 @@ static void kgdb_unregister_callbacks(void)
65502 static void kgdb_tasklet_bpt(unsigned long ing)
65503 {
65504 kgdb_breakpoint();
65505 - atomic_set(&kgdb_break_tasklet_var, 0);
65506 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
65507 }
65508
65509 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
65510
65511 void kgdb_schedule_breakpoint(void)
65512 {
65513 - if (atomic_read(&kgdb_break_tasklet_var) ||
65514 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
65515 atomic_read(&kgdb_active) != -1 ||
65516 atomic_read(&kgdb_setting_breakpoint))
65517 return;
65518 - atomic_inc(&kgdb_break_tasklet_var);
65519 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
65520 tasklet_schedule(&kgdb_tasklet_breakpoint);
65521 }
65522 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
65523 diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
65524 index 67b847d..93834dd 100644
65525 --- a/kernel/debug/kdb/kdb_main.c
65526 +++ b/kernel/debug/kdb/kdb_main.c
65527 @@ -1983,7 +1983,7 @@ static int kdb_lsmod(int argc, const char **argv)
65528 list_for_each_entry(mod, kdb_modules, list) {
65529
65530 kdb_printf("%-20s%8u 0x%p ", mod->name,
65531 - mod->core_size, (void *)mod);
65532 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
65533 #ifdef CONFIG_MODULE_UNLOAD
65534 kdb_printf("%4ld ", module_refcount(mod));
65535 #endif
65536 @@ -1993,7 +1993,7 @@ static int kdb_lsmod(int argc, const char **argv)
65537 kdb_printf(" (Loading)");
65538 else
65539 kdb_printf(" (Live)");
65540 - kdb_printf(" 0x%p", mod->module_core);
65541 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
65542
65543 #ifdef CONFIG_MODULE_UNLOAD
65544 {
65545 diff --git a/kernel/events/core.c b/kernel/events/core.c
65546 index fd126f8..70b755b 100644
65547 --- a/kernel/events/core.c
65548 +++ b/kernel/events/core.c
65549 @@ -181,7 +181,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
65550 return 0;
65551 }
65552
65553 -static atomic64_t perf_event_id;
65554 +static atomic64_unchecked_t perf_event_id;
65555
65556 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
65557 enum event_type_t event_type);
65558 @@ -2659,7 +2659,7 @@ static void __perf_event_read(void *info)
65559
65560 static inline u64 perf_event_count(struct perf_event *event)
65561 {
65562 - return local64_read(&event->count) + atomic64_read(&event->child_count);
65563 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
65564 }
65565
65566 static u64 perf_event_read(struct perf_event *event)
65567 @@ -2983,9 +2983,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
65568 mutex_lock(&event->child_mutex);
65569 total += perf_event_read(event);
65570 *enabled += event->total_time_enabled +
65571 - atomic64_read(&event->child_total_time_enabled);
65572 + atomic64_read_unchecked(&event->child_total_time_enabled);
65573 *running += event->total_time_running +
65574 - atomic64_read(&event->child_total_time_running);
65575 + atomic64_read_unchecked(&event->child_total_time_running);
65576
65577 list_for_each_entry(child, &event->child_list, child_list) {
65578 total += perf_event_read(child);
65579 @@ -3393,10 +3393,10 @@ void perf_event_update_userpage(struct perf_event *event)
65580 userpg->offset -= local64_read(&event->hw.prev_count);
65581
65582 userpg->time_enabled = enabled +
65583 - atomic64_read(&event->child_total_time_enabled);
65584 + atomic64_read_unchecked(&event->child_total_time_enabled);
65585
65586 userpg->time_running = running +
65587 - atomic64_read(&event->child_total_time_running);
65588 + atomic64_read_unchecked(&event->child_total_time_running);
65589
65590 arch_perf_update_userpage(userpg, now);
65591
65592 @@ -3829,11 +3829,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
65593 values[n++] = perf_event_count(event);
65594 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
65595 values[n++] = enabled +
65596 - atomic64_read(&event->child_total_time_enabled);
65597 + atomic64_read_unchecked(&event->child_total_time_enabled);
65598 }
65599 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
65600 values[n++] = running +
65601 - atomic64_read(&event->child_total_time_running);
65602 + atomic64_read_unchecked(&event->child_total_time_running);
65603 }
65604 if (read_format & PERF_FORMAT_ID)
65605 values[n++] = primary_event_id(event);
65606 @@ -4511,12 +4511,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
65607 * need to add enough zero bytes after the string to handle
65608 * the 64bit alignment we do later.
65609 */
65610 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
65611 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
65612 if (!buf) {
65613 name = strncpy(tmp, "//enomem", sizeof(tmp));
65614 goto got_name;
65615 }
65616 - name = d_path(&file->f_path, buf, PATH_MAX);
65617 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
65618 if (IS_ERR(name)) {
65619 name = strncpy(tmp, "//toolong", sizeof(tmp));
65620 goto got_name;
65621 @@ -5929,7 +5929,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
65622 event->parent = parent_event;
65623
65624 event->ns = get_pid_ns(current->nsproxy->pid_ns);
65625 - event->id = atomic64_inc_return(&perf_event_id);
65626 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
65627
65628 event->state = PERF_EVENT_STATE_INACTIVE;
65629
65630 @@ -6491,10 +6491,10 @@ static void sync_child_event(struct perf_event *child_event,
65631 /*
65632 * Add back the child's count to the parent's count:
65633 */
65634 - atomic64_add(child_val, &parent_event->child_count);
65635 - atomic64_add(child_event->total_time_enabled,
65636 + atomic64_add_unchecked(child_val, &parent_event->child_count);
65637 + atomic64_add_unchecked(child_event->total_time_enabled,
65638 &parent_event->child_total_time_enabled);
65639 - atomic64_add(child_event->total_time_running,
65640 + atomic64_add_unchecked(child_event->total_time_running,
65641 &parent_event->child_total_time_running);
65642
65643 /*
65644 diff --git a/kernel/exit.c b/kernel/exit.c
65645 index 9d81012..d7911f1 100644
65646 --- a/kernel/exit.c
65647 +++ b/kernel/exit.c
65648 @@ -59,6 +59,10 @@
65649 #include <asm/pgtable.h>
65650 #include <asm/mmu_context.h>
65651
65652 +#ifdef CONFIG_GRKERNSEC
65653 +extern rwlock_t grsec_exec_file_lock;
65654 +#endif
65655 +
65656 static void exit_mm(struct task_struct * tsk);
65657
65658 static void __unhash_process(struct task_struct *p, bool group_dead)
65659 @@ -170,6 +174,10 @@ void release_task(struct task_struct * p)
65660 struct task_struct *leader;
65661 int zap_leader;
65662 repeat:
65663 +#ifdef CONFIG_NET
65664 + gr_del_task_from_ip_table(p);
65665 +#endif
65666 +
65667 /* don't need to get the RCU readlock here - the process is dead and
65668 * can't be modifying its own credentials. But shut RCU-lockdep up */
65669 rcu_read_lock();
65670 @@ -382,7 +390,7 @@ int allow_signal(int sig)
65671 * know it'll be handled, so that they don't get converted to
65672 * SIGKILL or just silently dropped.
65673 */
65674 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
65675 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
65676 recalc_sigpending();
65677 spin_unlock_irq(&current->sighand->siglock);
65678 return 0;
65679 @@ -418,6 +426,17 @@ void daemonize(const char *name, ...)
65680 vsnprintf(current->comm, sizeof(current->comm), name, args);
65681 va_end(args);
65682
65683 +#ifdef CONFIG_GRKERNSEC
65684 + write_lock(&grsec_exec_file_lock);
65685 + if (current->exec_file) {
65686 + fput(current->exec_file);
65687 + current->exec_file = NULL;
65688 + }
65689 + write_unlock(&grsec_exec_file_lock);
65690 +#endif
65691 +
65692 + gr_set_kernel_label(current);
65693 +
65694 /*
65695 * If we were started as result of loading a module, close all of the
65696 * user space pages. We don't need them, and if we didn't close them
65697 @@ -901,6 +920,8 @@ void do_exit(long code)
65698 struct task_struct *tsk = current;
65699 int group_dead;
65700
65701 + set_fs(USER_DS);
65702 +
65703 profile_task_exit(tsk);
65704
65705 WARN_ON(blk_needs_flush_plug(tsk));
65706 @@ -917,7 +938,6 @@ void do_exit(long code)
65707 * mm_release()->clear_child_tid() from writing to a user-controlled
65708 * kernel address.
65709 */
65710 - set_fs(USER_DS);
65711
65712 ptrace_event(PTRACE_EVENT_EXIT, code);
65713
65714 @@ -978,6 +998,9 @@ void do_exit(long code)
65715 tsk->exit_code = code;
65716 taskstats_exit(tsk, group_dead);
65717
65718 + gr_acl_handle_psacct(tsk, code);
65719 + gr_acl_handle_exit();
65720 +
65721 exit_mm(tsk);
65722
65723 if (group_dead)
65724 @@ -1094,7 +1117,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
65725 * Take down every thread in the group. This is called by fatal signals
65726 * as well as by sys_exit_group (below).
65727 */
65728 -void
65729 +__noreturn void
65730 do_group_exit(int exit_code)
65731 {
65732 struct signal_struct *sig = current->signal;
65733 diff --git a/kernel/fork.c b/kernel/fork.c
65734 index 8163333..aee97f3 100644
65735 --- a/kernel/fork.c
65736 +++ b/kernel/fork.c
65737 @@ -274,19 +274,24 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
65738 }
65739
65740 err = arch_dup_task_struct(tsk, orig);
65741 - if (err)
65742 - goto out;
65743
65744 + /*
65745 + * We defer looking at err, because we will need this setup
65746 + * for the clean up path to work correctly.
65747 + */
65748 tsk->stack = ti;
65749 -
65750 setup_thread_stack(tsk, orig);
65751 +
65752 + if (err)
65753 + goto out;
65754 +
65755 clear_user_return_notifier(tsk);
65756 clear_tsk_need_resched(tsk);
65757 stackend = end_of_stack(tsk);
65758 *stackend = STACK_END_MAGIC; /* for overflow detection */
65759
65760 #ifdef CONFIG_CC_STACKPROTECTOR
65761 - tsk->stack_canary = get_random_int();
65762 + tsk->stack_canary = pax_get_random_long();
65763 #endif
65764
65765 /*
65766 @@ -310,13 +315,78 @@ out:
65767 }
65768
65769 #ifdef CONFIG_MMU
65770 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
65771 +{
65772 + struct vm_area_struct *tmp;
65773 + unsigned long charge;
65774 + struct mempolicy *pol;
65775 + struct file *file;
65776 +
65777 + charge = 0;
65778 + if (mpnt->vm_flags & VM_ACCOUNT) {
65779 + unsigned long len;
65780 + len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
65781 + if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
65782 + goto fail_nomem;
65783 + charge = len;
65784 + }
65785 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65786 + if (!tmp)
65787 + goto fail_nomem;
65788 + *tmp = *mpnt;
65789 + tmp->vm_mm = mm;
65790 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
65791 + pol = mpol_dup(vma_policy(mpnt));
65792 + if (IS_ERR(pol))
65793 + goto fail_nomem_policy;
65794 + vma_set_policy(tmp, pol);
65795 + if (anon_vma_fork(tmp, mpnt))
65796 + goto fail_nomem_anon_vma_fork;
65797 + tmp->vm_flags &= ~VM_LOCKED;
65798 + tmp->vm_next = tmp->vm_prev = NULL;
65799 + tmp->vm_mirror = NULL;
65800 + file = tmp->vm_file;
65801 + if (file) {
65802 + struct inode *inode = file->f_path.dentry->d_inode;
65803 + struct address_space *mapping = file->f_mapping;
65804 +
65805 + get_file(file);
65806 + if (tmp->vm_flags & VM_DENYWRITE)
65807 + atomic_dec(&inode->i_writecount);
65808 + mutex_lock(&mapping->i_mmap_mutex);
65809 + if (tmp->vm_flags & VM_SHARED)
65810 + mapping->i_mmap_writable++;
65811 + flush_dcache_mmap_lock(mapping);
65812 + /* insert tmp into the share list, just after mpnt */
65813 + vma_prio_tree_add(tmp, mpnt);
65814 + flush_dcache_mmap_unlock(mapping);
65815 + mutex_unlock(&mapping->i_mmap_mutex);
65816 + }
65817 +
65818 + /*
65819 + * Clear hugetlb-related page reserves for children. This only
65820 + * affects MAP_PRIVATE mappings. Faults generated by the child
65821 + * are not guaranteed to succeed, even if read-only
65822 + */
65823 + if (is_vm_hugetlb_page(tmp))
65824 + reset_vma_resv_huge_pages(tmp);
65825 +
65826 + return tmp;
65827 +
65828 +fail_nomem_anon_vma_fork:
65829 + mpol_put(pol);
65830 +fail_nomem_policy:
65831 + kmem_cache_free(vm_area_cachep, tmp);
65832 +fail_nomem:
65833 + vm_unacct_memory(charge);
65834 + return NULL;
65835 +}
65836 +
65837 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65838 {
65839 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
65840 struct rb_node **rb_link, *rb_parent;
65841 int retval;
65842 - unsigned long charge;
65843 - struct mempolicy *pol;
65844
65845 down_write(&oldmm->mmap_sem);
65846 flush_cache_dup_mm(oldmm);
65847 @@ -328,8 +398,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65848 mm->locked_vm = 0;
65849 mm->mmap = NULL;
65850 mm->mmap_cache = NULL;
65851 - mm->free_area_cache = oldmm->mmap_base;
65852 - mm->cached_hole_size = ~0UL;
65853 + mm->free_area_cache = oldmm->free_area_cache;
65854 + mm->cached_hole_size = oldmm->cached_hole_size;
65855 mm->map_count = 0;
65856 cpumask_clear(mm_cpumask(mm));
65857 mm->mm_rb = RB_ROOT;
65858 @@ -345,8 +415,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65859
65860 prev = NULL;
65861 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
65862 - struct file *file;
65863 -
65864 if (mpnt->vm_flags & VM_DONTCOPY) {
65865 long pages = vma_pages(mpnt);
65866 mm->total_vm -= pages;
65867 @@ -354,54 +422,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65868 -pages);
65869 continue;
65870 }
65871 - charge = 0;
65872 - if (mpnt->vm_flags & VM_ACCOUNT) {
65873 - unsigned long len;
65874 - len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
65875 - if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
65876 - goto fail_nomem;
65877 - charge = len;
65878 + tmp = dup_vma(mm, oldmm, mpnt);
65879 + if (!tmp) {
65880 + retval = -ENOMEM;
65881 + goto out;
65882 }
65883 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65884 - if (!tmp)
65885 - goto fail_nomem;
65886 - *tmp = *mpnt;
65887 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
65888 - pol = mpol_dup(vma_policy(mpnt));
65889 - retval = PTR_ERR(pol);
65890 - if (IS_ERR(pol))
65891 - goto fail_nomem_policy;
65892 - vma_set_policy(tmp, pol);
65893 - tmp->vm_mm = mm;
65894 - if (anon_vma_fork(tmp, mpnt))
65895 - goto fail_nomem_anon_vma_fork;
65896 - tmp->vm_flags &= ~VM_LOCKED;
65897 - tmp->vm_next = tmp->vm_prev = NULL;
65898 - file = tmp->vm_file;
65899 - if (file) {
65900 - struct inode *inode = file->f_path.dentry->d_inode;
65901 - struct address_space *mapping = file->f_mapping;
65902 -
65903 - get_file(file);
65904 - if (tmp->vm_flags & VM_DENYWRITE)
65905 - atomic_dec(&inode->i_writecount);
65906 - mutex_lock(&mapping->i_mmap_mutex);
65907 - if (tmp->vm_flags & VM_SHARED)
65908 - mapping->i_mmap_writable++;
65909 - flush_dcache_mmap_lock(mapping);
65910 - /* insert tmp into the share list, just after mpnt */
65911 - vma_prio_tree_add(tmp, mpnt);
65912 - flush_dcache_mmap_unlock(mapping);
65913 - mutex_unlock(&mapping->i_mmap_mutex);
65914 - }
65915 -
65916 - /*
65917 - * Clear hugetlb-related page reserves for children. This only
65918 - * affects MAP_PRIVATE mappings. Faults generated by the child
65919 - * are not guaranteed to succeed, even if read-only
65920 - */
65921 - if (is_vm_hugetlb_page(tmp))
65922 - reset_vma_resv_huge_pages(tmp);
65923
65924 /*
65925 * Link in the new vma and copy the page table entries.
65926 @@ -424,6 +449,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
65927 if (retval)
65928 goto out;
65929 }
65930 +
65931 +#ifdef CONFIG_PAX_SEGMEXEC
65932 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
65933 + struct vm_area_struct *mpnt_m;
65934 +
65935 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
65936 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
65937 +
65938 + if (!mpnt->vm_mirror)
65939 + continue;
65940 +
65941 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
65942 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
65943 + mpnt->vm_mirror = mpnt_m;
65944 + } else {
65945 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
65946 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
65947 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
65948 + mpnt->vm_mirror->vm_mirror = mpnt;
65949 + }
65950 + }
65951 + BUG_ON(mpnt_m);
65952 + }
65953 +#endif
65954 +
65955 /* a new mm has just been created */
65956 arch_dup_mmap(oldmm, mm);
65957 retval = 0;
65958 @@ -432,14 +482,6 @@ out:
65959 flush_tlb_mm(oldmm);
65960 up_write(&oldmm->mmap_sem);
65961 return retval;
65962 -fail_nomem_anon_vma_fork:
65963 - mpol_put(pol);
65964 -fail_nomem_policy:
65965 - kmem_cache_free(vm_area_cachep, tmp);
65966 -fail_nomem:
65967 - retval = -ENOMEM;
65968 - vm_unacct_memory(charge);
65969 - goto out;
65970 }
65971
65972 static inline int mm_alloc_pgd(struct mm_struct *mm)
65973 @@ -676,8 +718,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
65974 return ERR_PTR(err);
65975
65976 mm = get_task_mm(task);
65977 - if (mm && mm != current->mm &&
65978 - !ptrace_may_access(task, mode)) {
65979 + if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
65980 + (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
65981 mmput(mm);
65982 mm = ERR_PTR(-EACCES);
65983 }
65984 @@ -899,13 +941,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
65985 spin_unlock(&fs->lock);
65986 return -EAGAIN;
65987 }
65988 - fs->users++;
65989 + atomic_inc(&fs->users);
65990 spin_unlock(&fs->lock);
65991 return 0;
65992 }
65993 tsk->fs = copy_fs_struct(fs);
65994 if (!tsk->fs)
65995 return -ENOMEM;
65996 + gr_set_chroot_entries(tsk, &tsk->fs->root);
65997 return 0;
65998 }
65999
66000 @@ -1172,6 +1215,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
66001 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
66002 #endif
66003 retval = -EAGAIN;
66004 +
66005 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
66006 +
66007 if (atomic_read(&p->real_cred->user->processes) >=
66008 task_rlimit(p, RLIMIT_NPROC)) {
66009 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
66010 @@ -1392,6 +1438,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
66011 /* Need tasklist lock for parent etc handling! */
66012 write_lock_irq(&tasklist_lock);
66013
66014 + /* synchronizes with gr_set_acls() */
66015 + gr_copy_label(p);
66016 +
66017 /* CLONE_PARENT re-uses the old parent */
66018 if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
66019 p->real_parent = current->real_parent;
66020 @@ -1502,6 +1551,8 @@ bad_fork_cleanup_count:
66021 bad_fork_free:
66022 free_task(p);
66023 fork_out:
66024 + gr_log_forkfail(retval);
66025 +
66026 return ERR_PTR(retval);
66027 }
66028
66029 @@ -1602,6 +1653,8 @@ long do_fork(unsigned long clone_flags,
66030 if (clone_flags & CLONE_PARENT_SETTID)
66031 put_user(nr, parent_tidptr);
66032
66033 + gr_handle_brute_check();
66034 +
66035 if (clone_flags & CLONE_VFORK) {
66036 p->vfork_done = &vfork;
66037 init_completion(&vfork);
66038 @@ -1700,7 +1753,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
66039 return 0;
66040
66041 /* don't need lock here; in the worst case we'll do useless copy */
66042 - if (fs->users == 1)
66043 + if (atomic_read(&fs->users) == 1)
66044 return 0;
66045
66046 *new_fsp = copy_fs_struct(fs);
66047 @@ -1789,7 +1842,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
66048 fs = current->fs;
66049 spin_lock(&fs->lock);
66050 current->fs = new_fs;
66051 - if (--fs->users)
66052 + gr_set_chroot_entries(current, &current->fs->root);
66053 + if (atomic_dec_return(&fs->users))
66054 new_fs = NULL;
66055 else
66056 new_fs = fs;
66057 diff --git a/kernel/futex.c b/kernel/futex.c
66058 index e2b0fb9..db818ac 100644
66059 --- a/kernel/futex.c
66060 +++ b/kernel/futex.c
66061 @@ -54,6 +54,7 @@
66062 #include <linux/mount.h>
66063 #include <linux/pagemap.h>
66064 #include <linux/syscalls.h>
66065 +#include <linux/ptrace.h>
66066 #include <linux/signal.h>
66067 #include <linux/export.h>
66068 #include <linux/magic.h>
66069 @@ -239,6 +240,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
66070 struct page *page, *page_head;
66071 int err, ro = 0;
66072
66073 +#ifdef CONFIG_PAX_SEGMEXEC
66074 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
66075 + return -EFAULT;
66076 +#endif
66077 +
66078 /*
66079 * The futex address must be "naturally" aligned.
66080 */
66081 @@ -2711,6 +2717,7 @@ static int __init futex_init(void)
66082 {
66083 u32 curval;
66084 int i;
66085 + mm_segment_t oldfs;
66086
66087 /*
66088 * This will fail and we want it. Some arch implementations do
66089 @@ -2722,8 +2729,11 @@ static int __init futex_init(void)
66090 * implementation, the non-functional ones will return
66091 * -ENOSYS.
66092 */
66093 + oldfs = get_fs();
66094 + set_fs(USER_DS);
66095 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
66096 futex_cmpxchg_enabled = 1;
66097 + set_fs(oldfs);
66098
66099 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
66100 plist_head_init(&futex_queues[i].chain);
66101 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
66102 index 9b22d03..6295b62 100644
66103 --- a/kernel/gcov/base.c
66104 +++ b/kernel/gcov/base.c
66105 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
66106 }
66107
66108 #ifdef CONFIG_MODULES
66109 -static inline int within(void *addr, void *start, unsigned long size)
66110 -{
66111 - return ((addr >= start) && (addr < start + size));
66112 -}
66113 -
66114 /* Update list and generate events when modules are unloaded. */
66115 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
66116 void *data)
66117 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
66118 prev = NULL;
66119 /* Remove entries located in module from linked list. */
66120 for (info = gcov_info_head; info; info = info->next) {
66121 - if (within(info, mod->module_core, mod->core_size)) {
66122 + if (within_module_core_rw((unsigned long)info, mod)) {
66123 if (prev)
66124 prev->next = info->next;
66125 else
66126 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
66127 index 6db7a5e..25b6648 100644
66128 --- a/kernel/hrtimer.c
66129 +++ b/kernel/hrtimer.c
66130 @@ -1407,7 +1407,7 @@ void hrtimer_peek_ahead_timers(void)
66131 local_irq_restore(flags);
66132 }
66133
66134 -static void run_hrtimer_softirq(struct softirq_action *h)
66135 +static void run_hrtimer_softirq(void)
66136 {
66137 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
66138
66139 diff --git a/kernel/jump_label.c b/kernel/jump_label.c
66140 index 4304919..408c4c0 100644
66141 --- a/kernel/jump_label.c
66142 +++ b/kernel/jump_label.c
66143 @@ -13,6 +13,7 @@
66144 #include <linux/sort.h>
66145 #include <linux/err.h>
66146 #include <linux/static_key.h>
66147 +#include <linux/mm.h>
66148
66149 #ifdef HAVE_JUMP_LABEL
66150
66151 @@ -50,7 +51,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
66152
66153 size = (((unsigned long)stop - (unsigned long)start)
66154 / sizeof(struct jump_entry));
66155 + pax_open_kernel();
66156 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
66157 + pax_close_kernel();
66158 }
66159
66160 static void jump_label_update(struct static_key *key, int enable);
66161 @@ -356,10 +359,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
66162 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
66163 struct jump_entry *iter;
66164
66165 + pax_open_kernel();
66166 for (iter = iter_start; iter < iter_stop; iter++) {
66167 if (within_module_init(iter->code, mod))
66168 iter->code = 0;
66169 }
66170 + pax_close_kernel();
66171 }
66172
66173 static int
66174 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
66175 index 079f1d3..4e80e69 100644
66176 --- a/kernel/kallsyms.c
66177 +++ b/kernel/kallsyms.c
66178 @@ -11,6 +11,9 @@
66179 * Changed the compression method from stem compression to "table lookup"
66180 * compression (see scripts/kallsyms.c for a more complete description)
66181 */
66182 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66183 +#define __INCLUDED_BY_HIDESYM 1
66184 +#endif
66185 #include <linux/kallsyms.h>
66186 #include <linux/module.h>
66187 #include <linux/init.h>
66188 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
66189
66190 static inline int is_kernel_inittext(unsigned long addr)
66191 {
66192 + if (system_state != SYSTEM_BOOTING)
66193 + return 0;
66194 +
66195 if (addr >= (unsigned long)_sinittext
66196 && addr <= (unsigned long)_einittext)
66197 return 1;
66198 return 0;
66199 }
66200
66201 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66202 +#ifdef CONFIG_MODULES
66203 +static inline int is_module_text(unsigned long addr)
66204 +{
66205 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
66206 + return 1;
66207 +
66208 + addr = ktla_ktva(addr);
66209 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
66210 +}
66211 +#else
66212 +static inline int is_module_text(unsigned long addr)
66213 +{
66214 + return 0;
66215 +}
66216 +#endif
66217 +#endif
66218 +
66219 static inline int is_kernel_text(unsigned long addr)
66220 {
66221 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
66222 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
66223
66224 static inline int is_kernel(unsigned long addr)
66225 {
66226 +
66227 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66228 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
66229 + return 1;
66230 +
66231 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
66232 +#else
66233 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
66234 +#endif
66235 +
66236 return 1;
66237 return in_gate_area_no_mm(addr);
66238 }
66239
66240 static int is_ksym_addr(unsigned long addr)
66241 {
66242 +
66243 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66244 + if (is_module_text(addr))
66245 + return 0;
66246 +#endif
66247 +
66248 if (all_var)
66249 return is_kernel(addr);
66250
66251 @@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
66252
66253 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
66254 {
66255 - iter->name[0] = '\0';
66256 iter->nameoff = get_symbol_offset(new_pos);
66257 iter->pos = new_pos;
66258 }
66259 @@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
66260 {
66261 struct kallsym_iter *iter = m->private;
66262
66263 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66264 + if (current_uid())
66265 + return 0;
66266 +#endif
66267 +
66268 /* Some debugging symbols have no name. Ignore them. */
66269 if (!iter->name[0])
66270 return 0;
66271 @@ -515,11 +558,22 @@ static int s_show(struct seq_file *m, void *p)
66272 */
66273 type = iter->exported ? toupper(iter->type) :
66274 tolower(iter->type);
66275 +
66276 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66277 + seq_printf(m, "%pP %c %s\t[%s]\n", (void *)iter->value,
66278 + type, iter->name, iter->module_name);
66279 +#else
66280 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
66281 type, iter->name, iter->module_name);
66282 +#endif
66283 } else
66284 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66285 + seq_printf(m, "%pP %c %s\n", (void *)iter->value,
66286 + iter->type, iter->name);
66287 +#else
66288 seq_printf(m, "%pK %c %s\n", (void *)iter->value,
66289 iter->type, iter->name);
66290 +#endif
66291 return 0;
66292 }
66293
66294 @@ -540,7 +594,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
66295 struct kallsym_iter *iter;
66296 int ret;
66297
66298 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
66299 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
66300 if (!iter)
66301 return -ENOMEM;
66302 reset_iter(iter, 0);
66303 diff --git a/kernel/kexec.c b/kernel/kexec.c
66304 index 4e2e472..cd0c7ae 100644
66305 --- a/kernel/kexec.c
66306 +++ b/kernel/kexec.c
66307 @@ -1046,7 +1046,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
66308 unsigned long flags)
66309 {
66310 struct compat_kexec_segment in;
66311 - struct kexec_segment out, __user *ksegments;
66312 + struct kexec_segment out;
66313 + struct kexec_segment __user *ksegments;
66314 unsigned long i, result;
66315
66316 /* Don't allow clients that don't understand the native
66317 diff --git a/kernel/kmod.c b/kernel/kmod.c
66318 index 05698a7..a4c1e3a 100644
66319 --- a/kernel/kmod.c
66320 +++ b/kernel/kmod.c
66321 @@ -66,7 +66,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
66322 kfree(info->argv);
66323 }
66324
66325 -static int call_modprobe(char *module_name, int wait)
66326 +static int call_modprobe(char *module_name, char *module_param, int wait)
66327 {
66328 static char *envp[] = {
66329 "HOME=/",
66330 @@ -75,7 +75,7 @@ static int call_modprobe(char *module_name, int wait)
66331 NULL
66332 };
66333
66334 - char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
66335 + char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
66336 if (!argv)
66337 goto out;
66338
66339 @@ -87,7 +87,8 @@ static int call_modprobe(char *module_name, int wait)
66340 argv[1] = "-q";
66341 argv[2] = "--";
66342 argv[3] = module_name; /* check free_modprobe_argv() */
66343 - argv[4] = NULL;
66344 + argv[4] = module_param;
66345 + argv[5] = NULL;
66346
66347 return call_usermodehelper_fns(modprobe_path, argv, envp,
66348 wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL);
66349 @@ -112,9 +113,8 @@ out:
66350 * If module auto-loading support is disabled then this function
66351 * becomes a no-operation.
66352 */
66353 -int __request_module(bool wait, const char *fmt, ...)
66354 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
66355 {
66356 - va_list args;
66357 char module_name[MODULE_NAME_LEN];
66358 unsigned int max_modprobes;
66359 int ret;
66360 @@ -122,9 +122,7 @@ int __request_module(bool wait, const char *fmt, ...)
66361 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
66362 static int kmod_loop_msg;
66363
66364 - va_start(args, fmt);
66365 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
66366 - va_end(args);
66367 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
66368 if (ret >= MODULE_NAME_LEN)
66369 return -ENAMETOOLONG;
66370
66371 @@ -132,6 +130,20 @@ int __request_module(bool wait, const char *fmt, ...)
66372 if (ret)
66373 return ret;
66374
66375 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66376 + if (!current_uid()) {
66377 + /* hack to workaround consolekit/udisks stupidity */
66378 + read_lock(&tasklist_lock);
66379 + if (!strcmp(current->comm, "mount") &&
66380 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
66381 + read_unlock(&tasklist_lock);
66382 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
66383 + return -EPERM;
66384 + }
66385 + read_unlock(&tasklist_lock);
66386 + }
66387 +#endif
66388 +
66389 /* If modprobe needs a service that is in a module, we get a recursive
66390 * loop. Limit the number of running kmod threads to max_threads/2 or
66391 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
66392 @@ -160,11 +172,52 @@ int __request_module(bool wait, const char *fmt, ...)
66393
66394 trace_module_request(module_name, wait, _RET_IP_);
66395
66396 - ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
66397 + ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
66398
66399 atomic_dec(&kmod_concurrent);
66400 return ret;
66401 }
66402 +
66403 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
66404 +{
66405 + va_list args;
66406 + int ret;
66407 +
66408 + va_start(args, fmt);
66409 + ret = ____request_module(wait, module_param, fmt, args);
66410 + va_end(args);
66411 +
66412 + return ret;
66413 +}
66414 +
66415 +int __request_module(bool wait, const char *fmt, ...)
66416 +{
66417 + va_list args;
66418 + int ret;
66419 +
66420 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66421 + if (current_uid()) {
66422 + char module_param[MODULE_NAME_LEN];
66423 +
66424 + memset(module_param, 0, sizeof(module_param));
66425 +
66426 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
66427 +
66428 + va_start(args, fmt);
66429 + ret = ____request_module(wait, module_param, fmt, args);
66430 + va_end(args);
66431 +
66432 + return ret;
66433 + }
66434 +#endif
66435 +
66436 + va_start(args, fmt);
66437 + ret = ____request_module(wait, NULL, fmt, args);
66438 + va_end(args);
66439 +
66440 + return ret;
66441 +}
66442 +
66443 EXPORT_SYMBOL(__request_module);
66444 #endif /* CONFIG_MODULES */
66445
66446 @@ -267,7 +320,7 @@ static int wait_for_helper(void *data)
66447 *
66448 * Thus the __user pointer cast is valid here.
66449 */
66450 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
66451 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
66452
66453 /*
66454 * If ret is 0, either ____call_usermodehelper failed and the
66455 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
66456 index c62b854..cb67968 100644
66457 --- a/kernel/kprobes.c
66458 +++ b/kernel/kprobes.c
66459 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
66460 * kernel image and loaded module images reside. This is required
66461 * so x86_64 can correctly handle the %rip-relative fixups.
66462 */
66463 - kip->insns = module_alloc(PAGE_SIZE);
66464 + kip->insns = module_alloc_exec(PAGE_SIZE);
66465 if (!kip->insns) {
66466 kfree(kip);
66467 return NULL;
66468 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
66469 */
66470 if (!list_is_singular(&kip->list)) {
66471 list_del(&kip->list);
66472 - module_free(NULL, kip->insns);
66473 + module_free_exec(NULL, kip->insns);
66474 kfree(kip);
66475 }
66476 return 1;
66477 @@ -1955,7 +1955,7 @@ static int __init init_kprobes(void)
66478 {
66479 int i, err = 0;
66480 unsigned long offset = 0, size = 0;
66481 - char *modname, namebuf[128];
66482 + char *modname, namebuf[KSYM_NAME_LEN];
66483 const char *symbol_name;
66484 void *addr;
66485 struct kprobe_blackpoint *kb;
66486 @@ -2081,7 +2081,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
66487 const char *sym = NULL;
66488 unsigned int i = *(loff_t *) v;
66489 unsigned long offset = 0;
66490 - char *modname, namebuf[128];
66491 + char *modname, namebuf[KSYM_NAME_LEN];
66492
66493 head = &kprobe_table[i];
66494 preempt_disable();
66495 diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
66496 index 4e316e1..5501eef 100644
66497 --- a/kernel/ksysfs.c
66498 +++ b/kernel/ksysfs.c
66499 @@ -47,6 +47,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
66500 {
66501 if (count+1 > UEVENT_HELPER_PATH_LEN)
66502 return -ENOENT;
66503 + if (!capable(CAP_SYS_ADMIN))
66504 + return -EPERM;
66505 memcpy(uevent_helper, buf, count);
66506 uevent_helper[count] = '\0';
66507 if (count && uevent_helper[count-1] == '\n')
66508 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
66509 index ea9ee45..67ebc8f 100644
66510 --- a/kernel/lockdep.c
66511 +++ b/kernel/lockdep.c
66512 @@ -590,6 +590,10 @@ static int static_obj(void *obj)
66513 end = (unsigned long) &_end,
66514 addr = (unsigned long) obj;
66515
66516 +#ifdef CONFIG_PAX_KERNEXEC
66517 + start = ktla_ktva(start);
66518 +#endif
66519 +
66520 /*
66521 * static variable?
66522 */
66523 @@ -730,6 +734,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
66524 if (!static_obj(lock->key)) {
66525 debug_locks_off();
66526 printk("INFO: trying to register non-static key.\n");
66527 + printk("lock:%pS key:%pS.\n", lock, lock->key);
66528 printk("the code is fine but needs lockdep annotation.\n");
66529 printk("turning off the locking correctness validator.\n");
66530 dump_stack();
66531 @@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
66532 if (!class)
66533 return 0;
66534 }
66535 - atomic_inc((atomic_t *)&class->ops);
66536 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
66537 if (very_verbose(class)) {
66538 printk("\nacquire class [%p] %s", class->key, class->name);
66539 if (class->name_version > 1)
66540 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
66541 index 91c32a0..b2c71c5 100644
66542 --- a/kernel/lockdep_proc.c
66543 +++ b/kernel/lockdep_proc.c
66544 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
66545
66546 static void print_name(struct seq_file *m, struct lock_class *class)
66547 {
66548 - char str[128];
66549 + char str[KSYM_NAME_LEN];
66550 const char *name = class->name;
66551
66552 if (!name) {
66553 diff --git a/kernel/module.c b/kernel/module.c
66554 index 78ac6ec..e87db0e 100644
66555 --- a/kernel/module.c
66556 +++ b/kernel/module.c
66557 @@ -58,6 +58,7 @@
66558 #include <linux/jump_label.h>
66559 #include <linux/pfn.h>
66560 #include <linux/bsearch.h>
66561 +#include <linux/grsecurity.h>
66562
66563 #define CREATE_TRACE_POINTS
66564 #include <trace/events/module.h>
66565 @@ -114,7 +115,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
66566
66567 /* Bounds of module allocation, for speeding __module_address.
66568 * Protected by module_mutex. */
66569 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
66570 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
66571 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
66572
66573 int register_module_notifier(struct notifier_block * nb)
66574 {
66575 @@ -278,7 +280,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
66576 return true;
66577
66578 list_for_each_entry_rcu(mod, &modules, list) {
66579 - struct symsearch arr[] = {
66580 + struct symsearch modarr[] = {
66581 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
66582 NOT_GPL_ONLY, false },
66583 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
66584 @@ -300,7 +302,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
66585 #endif
66586 };
66587
66588 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
66589 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
66590 return true;
66591 }
66592 return false;
66593 @@ -432,7 +434,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
66594 static int percpu_modalloc(struct module *mod,
66595 unsigned long size, unsigned long align)
66596 {
66597 - if (align > PAGE_SIZE) {
66598 + if (align-1 >= PAGE_SIZE) {
66599 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
66600 mod->name, align, PAGE_SIZE);
66601 align = PAGE_SIZE;
66602 @@ -1032,7 +1034,7 @@ struct module_attribute module_uevent =
66603 static ssize_t show_coresize(struct module_attribute *mattr,
66604 struct module_kobject *mk, char *buffer)
66605 {
66606 - return sprintf(buffer, "%u\n", mk->mod->core_size);
66607 + return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
66608 }
66609
66610 static struct module_attribute modinfo_coresize =
66611 @@ -1041,7 +1043,7 @@ static struct module_attribute modinfo_coresize =
66612 static ssize_t show_initsize(struct module_attribute *mattr,
66613 struct module_kobject *mk, char *buffer)
66614 {
66615 - return sprintf(buffer, "%u\n", mk->mod->init_size);
66616 + return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
66617 }
66618
66619 static struct module_attribute modinfo_initsize =
66620 @@ -1255,7 +1257,7 @@ resolve_symbol_wait(struct module *mod,
66621 */
66622 #ifdef CONFIG_SYSFS
66623
66624 -#ifdef CONFIG_KALLSYMS
66625 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66626 static inline bool sect_empty(const Elf_Shdr *sect)
66627 {
66628 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
66629 @@ -1721,21 +1723,21 @@ static void set_section_ro_nx(void *base,
66630
66631 static void unset_module_core_ro_nx(struct module *mod)
66632 {
66633 - set_page_attributes(mod->module_core + mod->core_text_size,
66634 - mod->module_core + mod->core_size,
66635 + set_page_attributes(mod->module_core_rw,
66636 + mod->module_core_rw + mod->core_size_rw,
66637 set_memory_x);
66638 - set_page_attributes(mod->module_core,
66639 - mod->module_core + mod->core_ro_size,
66640 + set_page_attributes(mod->module_core_rx,
66641 + mod->module_core_rx + mod->core_size_rx,
66642 set_memory_rw);
66643 }
66644
66645 static void unset_module_init_ro_nx(struct module *mod)
66646 {
66647 - set_page_attributes(mod->module_init + mod->init_text_size,
66648 - mod->module_init + mod->init_size,
66649 + set_page_attributes(mod->module_init_rw,
66650 + mod->module_init_rw + mod->init_size_rw,
66651 set_memory_x);
66652 - set_page_attributes(mod->module_init,
66653 - mod->module_init + mod->init_ro_size,
66654 + set_page_attributes(mod->module_init_rx,
66655 + mod->module_init_rx + mod->init_size_rx,
66656 set_memory_rw);
66657 }
66658
66659 @@ -1746,14 +1748,14 @@ void set_all_modules_text_rw(void)
66660
66661 mutex_lock(&module_mutex);
66662 list_for_each_entry_rcu(mod, &modules, list) {
66663 - if ((mod->module_core) && (mod->core_text_size)) {
66664 - set_page_attributes(mod->module_core,
66665 - mod->module_core + mod->core_text_size,
66666 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
66667 + set_page_attributes(mod->module_core_rx,
66668 + mod->module_core_rx + mod->core_size_rx,
66669 set_memory_rw);
66670 }
66671 - if ((mod->module_init) && (mod->init_text_size)) {
66672 - set_page_attributes(mod->module_init,
66673 - mod->module_init + mod->init_text_size,
66674 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
66675 + set_page_attributes(mod->module_init_rx,
66676 + mod->module_init_rx + mod->init_size_rx,
66677 set_memory_rw);
66678 }
66679 }
66680 @@ -1767,14 +1769,14 @@ void set_all_modules_text_ro(void)
66681
66682 mutex_lock(&module_mutex);
66683 list_for_each_entry_rcu(mod, &modules, list) {
66684 - if ((mod->module_core) && (mod->core_text_size)) {
66685 - set_page_attributes(mod->module_core,
66686 - mod->module_core + mod->core_text_size,
66687 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
66688 + set_page_attributes(mod->module_core_rx,
66689 + mod->module_core_rx + mod->core_size_rx,
66690 set_memory_ro);
66691 }
66692 - if ((mod->module_init) && (mod->init_text_size)) {
66693 - set_page_attributes(mod->module_init,
66694 - mod->module_init + mod->init_text_size,
66695 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
66696 + set_page_attributes(mod->module_init_rx,
66697 + mod->module_init_rx + mod->init_size_rx,
66698 set_memory_ro);
66699 }
66700 }
66701 @@ -1820,16 +1822,19 @@ static void free_module(struct module *mod)
66702
66703 /* This may be NULL, but that's OK */
66704 unset_module_init_ro_nx(mod);
66705 - module_free(mod, mod->module_init);
66706 + module_free(mod, mod->module_init_rw);
66707 + module_free_exec(mod, mod->module_init_rx);
66708 kfree(mod->args);
66709 percpu_modfree(mod);
66710
66711 /* Free lock-classes: */
66712 - lockdep_free_key_range(mod->module_core, mod->core_size);
66713 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
66714 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
66715
66716 /* Finally, free the core (containing the module structure) */
66717 unset_module_core_ro_nx(mod);
66718 - module_free(mod, mod->module_core);
66719 + module_free_exec(mod, mod->module_core_rx);
66720 + module_free(mod, mod->module_core_rw);
66721
66722 #ifdef CONFIG_MPU
66723 update_protections(current->mm);
66724 @@ -1899,9 +1904,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66725 int ret = 0;
66726 const struct kernel_symbol *ksym;
66727
66728 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66729 + int is_fs_load = 0;
66730 + int register_filesystem_found = 0;
66731 + char *p;
66732 +
66733 + p = strstr(mod->args, "grsec_modharden_fs");
66734 + if (p) {
66735 + char *endptr = p + strlen("grsec_modharden_fs");
66736 + /* copy \0 as well */
66737 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
66738 + is_fs_load = 1;
66739 + }
66740 +#endif
66741 +
66742 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
66743 const char *name = info->strtab + sym[i].st_name;
66744
66745 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66746 + /* it's a real shame this will never get ripped and copied
66747 + upstream! ;(
66748 + */
66749 + if (is_fs_load && !strcmp(name, "register_filesystem"))
66750 + register_filesystem_found = 1;
66751 +#endif
66752 +
66753 switch (sym[i].st_shndx) {
66754 case SHN_COMMON:
66755 /* We compiled with -fno-common. These are not
66756 @@ -1922,7 +1949,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66757 ksym = resolve_symbol_wait(mod, info, name);
66758 /* Ok if resolved. */
66759 if (ksym && !IS_ERR(ksym)) {
66760 + pax_open_kernel();
66761 sym[i].st_value = ksym->value;
66762 + pax_close_kernel();
66763 break;
66764 }
66765
66766 @@ -1941,11 +1970,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
66767 secbase = (unsigned long)mod_percpu(mod);
66768 else
66769 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
66770 + pax_open_kernel();
66771 sym[i].st_value += secbase;
66772 + pax_close_kernel();
66773 break;
66774 }
66775 }
66776
66777 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
66778 + if (is_fs_load && !register_filesystem_found) {
66779 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
66780 + ret = -EPERM;
66781 + }
66782 +#endif
66783 +
66784 return ret;
66785 }
66786
66787 @@ -2049,22 +2087,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
66788 || s->sh_entsize != ~0UL
66789 || strstarts(sname, ".init"))
66790 continue;
66791 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
66792 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
66793 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
66794 + else
66795 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
66796 pr_debug("\t%s\n", sname);
66797 }
66798 - switch (m) {
66799 - case 0: /* executable */
66800 - mod->core_size = debug_align(mod->core_size);
66801 - mod->core_text_size = mod->core_size;
66802 - break;
66803 - case 1: /* RO: text and ro-data */
66804 - mod->core_size = debug_align(mod->core_size);
66805 - mod->core_ro_size = mod->core_size;
66806 - break;
66807 - case 3: /* whole core */
66808 - mod->core_size = debug_align(mod->core_size);
66809 - break;
66810 - }
66811 }
66812
66813 pr_debug("Init section allocation order:\n");
66814 @@ -2078,23 +2106,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
66815 || s->sh_entsize != ~0UL
66816 || !strstarts(sname, ".init"))
66817 continue;
66818 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
66819 - | INIT_OFFSET_MASK);
66820 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
66821 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
66822 + else
66823 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
66824 + s->sh_entsize |= INIT_OFFSET_MASK;
66825 pr_debug("\t%s\n", sname);
66826 }
66827 - switch (m) {
66828 - case 0: /* executable */
66829 - mod->init_size = debug_align(mod->init_size);
66830 - mod->init_text_size = mod->init_size;
66831 - break;
66832 - case 1: /* RO: text and ro-data */
66833 - mod->init_size = debug_align(mod->init_size);
66834 - mod->init_ro_size = mod->init_size;
66835 - break;
66836 - case 3: /* whole init */
66837 - mod->init_size = debug_align(mod->init_size);
66838 - break;
66839 - }
66840 }
66841 }
66842
66843 @@ -2266,7 +2284,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
66844
66845 /* Put symbol section at end of init part of module. */
66846 symsect->sh_flags |= SHF_ALLOC;
66847 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
66848 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
66849 info->index.sym) | INIT_OFFSET_MASK;
66850 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
66851
66852 @@ -2281,13 +2299,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
66853 }
66854
66855 /* Append room for core symbols at end of core part. */
66856 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
66857 - info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
66858 - mod->core_size += strtab_size;
66859 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
66860 + info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
66861 + mod->core_size_rx += strtab_size;
66862
66863 /* Put string table section at end of init part of module. */
66864 strsect->sh_flags |= SHF_ALLOC;
66865 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
66866 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
66867 info->index.str) | INIT_OFFSET_MASK;
66868 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
66869 }
66870 @@ -2305,12 +2323,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
66871 /* Make sure we get permanent strtab: don't use info->strtab. */
66872 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
66873
66874 + pax_open_kernel();
66875 +
66876 /* Set types up while we still have access to sections. */
66877 for (i = 0; i < mod->num_symtab; i++)
66878 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
66879
66880 - mod->core_symtab = dst = mod->module_core + info->symoffs;
66881 - mod->core_strtab = s = mod->module_core + info->stroffs;
66882 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
66883 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
66884 src = mod->symtab;
66885 *dst = *src;
66886 *s++ = 0;
66887 @@ -2323,6 +2343,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
66888 s += strlcpy(s, &mod->strtab[src->st_name], KSYM_NAME_LEN) + 1;
66889 }
66890 mod->core_num_syms = ndst;
66891 +
66892 + pax_close_kernel();
66893 }
66894 #else
66895 static inline void layout_symtab(struct module *mod, struct load_info *info)
66896 @@ -2356,17 +2378,33 @@ void * __weak module_alloc(unsigned long size)
66897 return size == 0 ? NULL : vmalloc_exec(size);
66898 }
66899
66900 -static void *module_alloc_update_bounds(unsigned long size)
66901 +static void *module_alloc_update_bounds_rw(unsigned long size)
66902 {
66903 void *ret = module_alloc(size);
66904
66905 if (ret) {
66906 mutex_lock(&module_mutex);
66907 /* Update module bounds. */
66908 - if ((unsigned long)ret < module_addr_min)
66909 - module_addr_min = (unsigned long)ret;
66910 - if ((unsigned long)ret + size > module_addr_max)
66911 - module_addr_max = (unsigned long)ret + size;
66912 + if ((unsigned long)ret < module_addr_min_rw)
66913 + module_addr_min_rw = (unsigned long)ret;
66914 + if ((unsigned long)ret + size > module_addr_max_rw)
66915 + module_addr_max_rw = (unsigned long)ret + size;
66916 + mutex_unlock(&module_mutex);
66917 + }
66918 + return ret;
66919 +}
66920 +
66921 +static void *module_alloc_update_bounds_rx(unsigned long size)
66922 +{
66923 + void *ret = module_alloc_exec(size);
66924 +
66925 + if (ret) {
66926 + mutex_lock(&module_mutex);
66927 + /* Update module bounds. */
66928 + if ((unsigned long)ret < module_addr_min_rx)
66929 + module_addr_min_rx = (unsigned long)ret;
66930 + if ((unsigned long)ret + size > module_addr_max_rx)
66931 + module_addr_max_rx = (unsigned long)ret + size;
66932 mutex_unlock(&module_mutex);
66933 }
66934 return ret;
66935 @@ -2543,8 +2581,14 @@ static struct module *setup_load_info(struct load_info *info)
66936 static int check_modinfo(struct module *mod, struct load_info *info)
66937 {
66938 const char *modmagic = get_modinfo(info, "vermagic");
66939 + const char *license = get_modinfo(info, "license");
66940 int err;
66941
66942 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
66943 + if (!license || !license_is_gpl_compatible(license))
66944 + return -ENOEXEC;
66945 +#endif
66946 +
66947 /* This is allowed: modprobe --force will invalidate it. */
66948 if (!modmagic) {
66949 err = try_to_force_load(mod, "bad vermagic");
66950 @@ -2567,7 +2611,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
66951 }
66952
66953 /* Set up license info based on the info section */
66954 - set_license(mod, get_modinfo(info, "license"));
66955 + set_license(mod, license);
66956
66957 return 0;
66958 }
66959 @@ -2661,7 +2705,7 @@ static int move_module(struct module *mod, struct load_info *info)
66960 void *ptr;
66961
66962 /* Do the allocs. */
66963 - ptr = module_alloc_update_bounds(mod->core_size);
66964 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
66965 /*
66966 * The pointer to this block is stored in the module structure
66967 * which is inside the block. Just mark it as not being a
66968 @@ -2671,23 +2715,50 @@ static int move_module(struct module *mod, struct load_info *info)
66969 if (!ptr)
66970 return -ENOMEM;
66971
66972 - memset(ptr, 0, mod->core_size);
66973 - mod->module_core = ptr;
66974 + memset(ptr, 0, mod->core_size_rw);
66975 + mod->module_core_rw = ptr;
66976
66977 - ptr = module_alloc_update_bounds(mod->init_size);
66978 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
66979 /*
66980 * The pointer to this block is stored in the module structure
66981 * which is inside the block. This block doesn't need to be
66982 * scanned as it contains data and code that will be freed
66983 * after the module is initialized.
66984 */
66985 - kmemleak_ignore(ptr);
66986 - if (!ptr && mod->init_size) {
66987 - module_free(mod, mod->module_core);
66988 + kmemleak_not_leak(ptr);
66989 + if (!ptr && mod->init_size_rw) {
66990 + module_free(mod, mod->module_core_rw);
66991 return -ENOMEM;
66992 }
66993 - memset(ptr, 0, mod->init_size);
66994 - mod->module_init = ptr;
66995 + memset(ptr, 0, mod->init_size_rw);
66996 + mod->module_init_rw = ptr;
66997 +
66998 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
66999 + kmemleak_not_leak(ptr);
67000 + if (!ptr) {
67001 + module_free(mod, mod->module_init_rw);
67002 + module_free(mod, mod->module_core_rw);
67003 + return -ENOMEM;
67004 + }
67005 +
67006 + pax_open_kernel();
67007 + memset(ptr, 0, mod->core_size_rx);
67008 + pax_close_kernel();
67009 + mod->module_core_rx = ptr;
67010 +
67011 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
67012 + kmemleak_not_leak(ptr);
67013 + if (!ptr && mod->init_size_rx) {
67014 + module_free_exec(mod, mod->module_core_rx);
67015 + module_free(mod, mod->module_init_rw);
67016 + module_free(mod, mod->module_core_rw);
67017 + return -ENOMEM;
67018 + }
67019 +
67020 + pax_open_kernel();
67021 + memset(ptr, 0, mod->init_size_rx);
67022 + pax_close_kernel();
67023 + mod->module_init_rx = ptr;
67024
67025 /* Transfer each section which specifies SHF_ALLOC */
67026 pr_debug("final section addresses:\n");
67027 @@ -2698,16 +2769,45 @@ static int move_module(struct module *mod, struct load_info *info)
67028 if (!(shdr->sh_flags & SHF_ALLOC))
67029 continue;
67030
67031 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
67032 - dest = mod->module_init
67033 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
67034 - else
67035 - dest = mod->module_core + shdr->sh_entsize;
67036 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
67037 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
67038 + dest = mod->module_init_rw
67039 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
67040 + else
67041 + dest = mod->module_init_rx
67042 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
67043 + } else {
67044 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
67045 + dest = mod->module_core_rw + shdr->sh_entsize;
67046 + else
67047 + dest = mod->module_core_rx + shdr->sh_entsize;
67048 + }
67049 +
67050 + if (shdr->sh_type != SHT_NOBITS) {
67051 +
67052 +#ifdef CONFIG_PAX_KERNEXEC
67053 +#ifdef CONFIG_X86_64
67054 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
67055 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
67056 +#endif
67057 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
67058 + pax_open_kernel();
67059 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
67060 + pax_close_kernel();
67061 + } else
67062 +#endif
67063
67064 - if (shdr->sh_type != SHT_NOBITS)
67065 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
67066 + }
67067 /* Update sh_addr to point to copy in image. */
67068 - shdr->sh_addr = (unsigned long)dest;
67069 +
67070 +#ifdef CONFIG_PAX_KERNEXEC
67071 + if (shdr->sh_flags & SHF_EXECINSTR)
67072 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
67073 + else
67074 +#endif
67075 +
67076 + shdr->sh_addr = (unsigned long)dest;
67077 pr_debug("\t0x%lx %s\n",
67078 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
67079 }
67080 @@ -2758,12 +2858,12 @@ static void flush_module_icache(const struct module *mod)
67081 * Do it before processing of module parameters, so the module
67082 * can provide parameter accessor functions of its own.
67083 */
67084 - if (mod->module_init)
67085 - flush_icache_range((unsigned long)mod->module_init,
67086 - (unsigned long)mod->module_init
67087 - + mod->init_size);
67088 - flush_icache_range((unsigned long)mod->module_core,
67089 - (unsigned long)mod->module_core + mod->core_size);
67090 + if (mod->module_init_rx)
67091 + flush_icache_range((unsigned long)mod->module_init_rx,
67092 + (unsigned long)mod->module_init_rx
67093 + + mod->init_size_rx);
67094 + flush_icache_range((unsigned long)mod->module_core_rx,
67095 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
67096
67097 set_fs(old_fs);
67098 }
67099 @@ -2833,8 +2933,10 @@ out:
67100 static void module_deallocate(struct module *mod, struct load_info *info)
67101 {
67102 percpu_modfree(mod);
67103 - module_free(mod, mod->module_init);
67104 - module_free(mod, mod->module_core);
67105 + module_free_exec(mod, mod->module_init_rx);
67106 + module_free_exec(mod, mod->module_core_rx);
67107 + module_free(mod, mod->module_init_rw);
67108 + module_free(mod, mod->module_core_rw);
67109 }
67110
67111 int __weak module_finalize(const Elf_Ehdr *hdr,
67112 @@ -2898,9 +3000,38 @@ static struct module *load_module(void __user *umod,
67113 if (err)
67114 goto free_unload;
67115
67116 + /* Now copy in args */
67117 + mod->args = strndup_user(uargs, ~0UL >> 1);
67118 + if (IS_ERR(mod->args)) {
67119 + err = PTR_ERR(mod->args);
67120 + goto free_unload;
67121 + }
67122 +
67123 /* Set up MODINFO_ATTR fields */
67124 setup_modinfo(mod, &info);
67125
67126 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
67127 + {
67128 + char *p, *p2;
67129 +
67130 + if (strstr(mod->args, "grsec_modharden_netdev")) {
67131 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
67132 + err = -EPERM;
67133 + goto free_modinfo;
67134 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
67135 + p += strlen("grsec_modharden_normal");
67136 + p2 = strstr(p, "_");
67137 + if (p2) {
67138 + *p2 = '\0';
67139 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
67140 + *p2 = '_';
67141 + }
67142 + err = -EPERM;
67143 + goto free_modinfo;
67144 + }
67145 + }
67146 +#endif
67147 +
67148 /* Fix up syms, so that st_value is a pointer to location. */
67149 err = simplify_symbols(mod, &info);
67150 if (err < 0)
67151 @@ -2916,13 +3047,6 @@ static struct module *load_module(void __user *umod,
67152
67153 flush_module_icache(mod);
67154
67155 - /* Now copy in args */
67156 - mod->args = strndup_user(uargs, ~0UL >> 1);
67157 - if (IS_ERR(mod->args)) {
67158 - err = PTR_ERR(mod->args);
67159 - goto free_arch_cleanup;
67160 - }
67161 -
67162 /* Mark state as coming so strong_try_module_get() ignores us. */
67163 mod->state = MODULE_STATE_COMING;
67164
67165 @@ -2980,11 +3104,10 @@ static struct module *load_module(void __user *umod,
67166 unlock:
67167 mutex_unlock(&module_mutex);
67168 synchronize_sched();
67169 - kfree(mod->args);
67170 - free_arch_cleanup:
67171 module_arch_cleanup(mod);
67172 free_modinfo:
67173 free_modinfo(mod);
67174 + kfree(mod->args);
67175 free_unload:
67176 module_unload_free(mod);
67177 free_module:
67178 @@ -3025,16 +3148,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
67179 MODULE_STATE_COMING, mod);
67180
67181 /* Set RO and NX regions for core */
67182 - set_section_ro_nx(mod->module_core,
67183 - mod->core_text_size,
67184 - mod->core_ro_size,
67185 - mod->core_size);
67186 + set_section_ro_nx(mod->module_core_rx,
67187 + mod->core_size_rx,
67188 + mod->core_size_rx,
67189 + mod->core_size_rx);
67190
67191 /* Set RO and NX regions for init */
67192 - set_section_ro_nx(mod->module_init,
67193 - mod->init_text_size,
67194 - mod->init_ro_size,
67195 - mod->init_size);
67196 + set_section_ro_nx(mod->module_init_rx,
67197 + mod->init_size_rx,
67198 + mod->init_size_rx,
67199 + mod->init_size_rx);
67200
67201 do_mod_ctors(mod);
67202 /* Start the module */
67203 @@ -3080,11 +3203,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
67204 mod->strtab = mod->core_strtab;
67205 #endif
67206 unset_module_init_ro_nx(mod);
67207 - module_free(mod, mod->module_init);
67208 - mod->module_init = NULL;
67209 - mod->init_size = 0;
67210 - mod->init_ro_size = 0;
67211 - mod->init_text_size = 0;
67212 + module_free(mod, mod->module_init_rw);
67213 + module_free_exec(mod, mod->module_init_rx);
67214 + mod->module_init_rw = NULL;
67215 + mod->module_init_rx = NULL;
67216 + mod->init_size_rw = 0;
67217 + mod->init_size_rx = 0;
67218 mutex_unlock(&module_mutex);
67219
67220 return 0;
67221 @@ -3115,10 +3239,16 @@ static const char *get_ksymbol(struct module *mod,
67222 unsigned long nextval;
67223
67224 /* At worse, next value is at end of module */
67225 - if (within_module_init(addr, mod))
67226 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
67227 + if (within_module_init_rx(addr, mod))
67228 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
67229 + else if (within_module_init_rw(addr, mod))
67230 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
67231 + else if (within_module_core_rx(addr, mod))
67232 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
67233 + else if (within_module_core_rw(addr, mod))
67234 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
67235 else
67236 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
67237 + return NULL;
67238
67239 /* Scan for closest preceding symbol, and next symbol. (ELF
67240 starts real symbols at 1). */
67241 @@ -3353,7 +3483,7 @@ static int m_show(struct seq_file *m, void *p)
67242 char buf[8];
67243
67244 seq_printf(m, "%s %u",
67245 - mod->name, mod->init_size + mod->core_size);
67246 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
67247 print_unload_info(m, mod);
67248
67249 /* Informative for users. */
67250 @@ -3362,7 +3492,7 @@ static int m_show(struct seq_file *m, void *p)
67251 mod->state == MODULE_STATE_COMING ? "Loading":
67252 "Live");
67253 /* Used by oprofile and other similar tools. */
67254 - seq_printf(m, " 0x%pK", mod->module_core);
67255 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
67256
67257 /* Taints info */
67258 if (mod->taints)
67259 @@ -3398,7 +3528,17 @@ static const struct file_operations proc_modules_operations = {
67260
67261 static int __init proc_modules_init(void)
67262 {
67263 +#ifndef CONFIG_GRKERNSEC_HIDESYM
67264 +#ifdef CONFIG_GRKERNSEC_PROC_USER
67265 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
67266 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67267 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
67268 +#else
67269 proc_create("modules", 0, NULL, &proc_modules_operations);
67270 +#endif
67271 +#else
67272 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
67273 +#endif
67274 return 0;
67275 }
67276 module_init(proc_modules_init);
67277 @@ -3457,12 +3597,12 @@ struct module *__module_address(unsigned long addr)
67278 {
67279 struct module *mod;
67280
67281 - if (addr < module_addr_min || addr > module_addr_max)
67282 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
67283 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
67284 return NULL;
67285
67286 list_for_each_entry_rcu(mod, &modules, list)
67287 - if (within_module_core(addr, mod)
67288 - || within_module_init(addr, mod))
67289 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
67290 return mod;
67291 return NULL;
67292 }
67293 @@ -3496,11 +3636,20 @@ bool is_module_text_address(unsigned long addr)
67294 */
67295 struct module *__module_text_address(unsigned long addr)
67296 {
67297 - struct module *mod = __module_address(addr);
67298 + struct module *mod;
67299 +
67300 +#ifdef CONFIG_X86_32
67301 + addr = ktla_ktva(addr);
67302 +#endif
67303 +
67304 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
67305 + return NULL;
67306 +
67307 + mod = __module_address(addr);
67308 +
67309 if (mod) {
67310 /* Make sure it's within the text section. */
67311 - if (!within(addr, mod->module_init, mod->init_text_size)
67312 - && !within(addr, mod->module_core, mod->core_text_size))
67313 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
67314 mod = NULL;
67315 }
67316 return mod;
67317 diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
67318 index 7e3443f..b2a1e6b 100644
67319 --- a/kernel/mutex-debug.c
67320 +++ b/kernel/mutex-debug.c
67321 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
67322 }
67323
67324 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
67325 - struct thread_info *ti)
67326 + struct task_struct *task)
67327 {
67328 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
67329
67330 /* Mark the current thread as blocked on the lock: */
67331 - ti->task->blocked_on = waiter;
67332 + task->blocked_on = waiter;
67333 }
67334
67335 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
67336 - struct thread_info *ti)
67337 + struct task_struct *task)
67338 {
67339 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
67340 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
67341 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
67342 - ti->task->blocked_on = NULL;
67343 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
67344 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
67345 + task->blocked_on = NULL;
67346
67347 list_del_init(&waiter->list);
67348 waiter->task = NULL;
67349 diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
67350 index 0799fd3..d06ae3b 100644
67351 --- a/kernel/mutex-debug.h
67352 +++ b/kernel/mutex-debug.h
67353 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
67354 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
67355 extern void debug_mutex_add_waiter(struct mutex *lock,
67356 struct mutex_waiter *waiter,
67357 - struct thread_info *ti);
67358 + struct task_struct *task);
67359 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
67360 - struct thread_info *ti);
67361 + struct task_struct *task);
67362 extern void debug_mutex_unlock(struct mutex *lock);
67363 extern void debug_mutex_init(struct mutex *lock, const char *name,
67364 struct lock_class_key *key);
67365 diff --git a/kernel/mutex.c b/kernel/mutex.c
67366 index a307cc9..27fd2e9 100644
67367 --- a/kernel/mutex.c
67368 +++ b/kernel/mutex.c
67369 @@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
67370 spin_lock_mutex(&lock->wait_lock, flags);
67371
67372 debug_mutex_lock_common(lock, &waiter);
67373 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
67374 + debug_mutex_add_waiter(lock, &waiter, task);
67375
67376 /* add waiting tasks to the end of the waitqueue (FIFO): */
67377 list_add_tail(&waiter.list, &lock->wait_list);
67378 @@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
67379 * TASK_UNINTERRUPTIBLE case.)
67380 */
67381 if (unlikely(signal_pending_state(state, task))) {
67382 - mutex_remove_waiter(lock, &waiter,
67383 - task_thread_info(task));
67384 + mutex_remove_waiter(lock, &waiter, task);
67385 mutex_release(&lock->dep_map, 1, ip);
67386 spin_unlock_mutex(&lock->wait_lock, flags);
67387
67388 @@ -247,7 +246,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
67389 done:
67390 lock_acquired(&lock->dep_map, ip);
67391 /* got the lock - rejoice! */
67392 - mutex_remove_waiter(lock, &waiter, current_thread_info());
67393 + mutex_remove_waiter(lock, &waiter, task);
67394 mutex_set_owner(lock);
67395
67396 /* set it to 0 if there are no waiters left: */
67397 diff --git a/kernel/panic.c b/kernel/panic.c
67398 index 9ed023b..e49543e 100644
67399 --- a/kernel/panic.c
67400 +++ b/kernel/panic.c
67401 @@ -402,7 +402,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
67402 const char *board;
67403
67404 printk(KERN_WARNING "------------[ cut here ]------------\n");
67405 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
67406 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
67407 board = dmi_get_system_info(DMI_PRODUCT_NAME);
67408 if (board)
67409 printk(KERN_WARNING "Hardware name: %s\n", board);
67410 @@ -457,7 +457,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
67411 */
67412 void __stack_chk_fail(void)
67413 {
67414 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
67415 + dump_stack();
67416 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
67417 __builtin_return_address(0));
67418 }
67419 EXPORT_SYMBOL(__stack_chk_fail);
67420 diff --git a/kernel/pid.c b/kernel/pid.c
67421 index 9f08dfa..6765c40 100644
67422 --- a/kernel/pid.c
67423 +++ b/kernel/pid.c
67424 @@ -33,6 +33,7 @@
67425 #include <linux/rculist.h>
67426 #include <linux/bootmem.h>
67427 #include <linux/hash.h>
67428 +#include <linux/security.h>
67429 #include <linux/pid_namespace.h>
67430 #include <linux/init_task.h>
67431 #include <linux/syscalls.h>
67432 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
67433
67434 int pid_max = PID_MAX_DEFAULT;
67435
67436 -#define RESERVED_PIDS 300
67437 +#define RESERVED_PIDS 500
67438
67439 int pid_max_min = RESERVED_PIDS + 1;
67440 int pid_max_max = PID_MAX_LIMIT;
67441 @@ -420,10 +421,18 @@ EXPORT_SYMBOL(pid_task);
67442 */
67443 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
67444 {
67445 + struct task_struct *task;
67446 +
67447 rcu_lockdep_assert(rcu_read_lock_held(),
67448 "find_task_by_pid_ns() needs rcu_read_lock()"
67449 " protection");
67450 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
67451 +
67452 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
67453 +
67454 + if (gr_pid_is_chrooted(task))
67455 + return NULL;
67456 +
67457 + return task;
67458 }
67459
67460 struct task_struct *find_task_by_vpid(pid_t vnr)
67461 @@ -431,6 +440,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
67462 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
67463 }
67464
67465 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
67466 +{
67467 + rcu_lockdep_assert(rcu_read_lock_held(),
67468 + "find_task_by_pid_ns() needs rcu_read_lock()"
67469 + " protection");
67470 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
67471 +}
67472 +
67473 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
67474 {
67475 struct pid *pid;
67476 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
67477 index 125cb67..a4d1c30 100644
67478 --- a/kernel/posix-cpu-timers.c
67479 +++ b/kernel/posix-cpu-timers.c
67480 @@ -6,6 +6,7 @@
67481 #include <linux/posix-timers.h>
67482 #include <linux/errno.h>
67483 #include <linux/math64.h>
67484 +#include <linux/security.h>
67485 #include <asm/uaccess.h>
67486 #include <linux/kernel_stat.h>
67487 #include <trace/events/timer.h>
67488 @@ -1578,14 +1579,14 @@ struct k_clock clock_posix_cpu = {
67489
67490 static __init int init_posix_cpu_timers(void)
67491 {
67492 - struct k_clock process = {
67493 + static struct k_clock process = {
67494 .clock_getres = process_cpu_clock_getres,
67495 .clock_get = process_cpu_clock_get,
67496 .timer_create = process_cpu_timer_create,
67497 .nsleep = process_cpu_nsleep,
67498 .nsleep_restart = process_cpu_nsleep_restart,
67499 };
67500 - struct k_clock thread = {
67501 + static struct k_clock thread = {
67502 .clock_getres = thread_cpu_clock_getres,
67503 .clock_get = thread_cpu_clock_get,
67504 .timer_create = thread_cpu_timer_create,
67505 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
67506 index 69185ae..cc2847a 100644
67507 --- a/kernel/posix-timers.c
67508 +++ b/kernel/posix-timers.c
67509 @@ -43,6 +43,7 @@
67510 #include <linux/idr.h>
67511 #include <linux/posix-clock.h>
67512 #include <linux/posix-timers.h>
67513 +#include <linux/grsecurity.h>
67514 #include <linux/syscalls.h>
67515 #include <linux/wait.h>
67516 #include <linux/workqueue.h>
67517 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
67518 * which we beg off on and pass to do_sys_settimeofday().
67519 */
67520
67521 -static struct k_clock posix_clocks[MAX_CLOCKS];
67522 +static struct k_clock *posix_clocks[MAX_CLOCKS];
67523
67524 /*
67525 * These ones are defined below.
67526 @@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
67527 */
67528 static __init int init_posix_timers(void)
67529 {
67530 - struct k_clock clock_realtime = {
67531 + static struct k_clock clock_realtime = {
67532 .clock_getres = hrtimer_get_res,
67533 .clock_get = posix_clock_realtime_get,
67534 .clock_set = posix_clock_realtime_set,
67535 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
67536 .timer_get = common_timer_get,
67537 .timer_del = common_timer_del,
67538 };
67539 - struct k_clock clock_monotonic = {
67540 + static struct k_clock clock_monotonic = {
67541 .clock_getres = hrtimer_get_res,
67542 .clock_get = posix_ktime_get_ts,
67543 .nsleep = common_nsleep,
67544 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
67545 .timer_get = common_timer_get,
67546 .timer_del = common_timer_del,
67547 };
67548 - struct k_clock clock_monotonic_raw = {
67549 + static struct k_clock clock_monotonic_raw = {
67550 .clock_getres = hrtimer_get_res,
67551 .clock_get = posix_get_monotonic_raw,
67552 };
67553 - struct k_clock clock_realtime_coarse = {
67554 + static struct k_clock clock_realtime_coarse = {
67555 .clock_getres = posix_get_coarse_res,
67556 .clock_get = posix_get_realtime_coarse,
67557 };
67558 - struct k_clock clock_monotonic_coarse = {
67559 + static struct k_clock clock_monotonic_coarse = {
67560 .clock_getres = posix_get_coarse_res,
67561 .clock_get = posix_get_monotonic_coarse,
67562 };
67563 - struct k_clock clock_boottime = {
67564 + static struct k_clock clock_boottime = {
67565 .clock_getres = hrtimer_get_res,
67566 .clock_get = posix_get_boottime,
67567 .nsleep = common_nsleep,
67568 @@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
67569 return;
67570 }
67571
67572 - posix_clocks[clock_id] = *new_clock;
67573 + posix_clocks[clock_id] = new_clock;
67574 }
67575 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
67576
67577 @@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
67578 return (id & CLOCKFD_MASK) == CLOCKFD ?
67579 &clock_posix_dynamic : &clock_posix_cpu;
67580
67581 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
67582 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
67583 return NULL;
67584 - return &posix_clocks[id];
67585 + return posix_clocks[id];
67586 }
67587
67588 static int common_timer_create(struct k_itimer *new_timer)
67589 @@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
67590 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
67591 return -EFAULT;
67592
67593 + /* only the CLOCK_REALTIME clock can be set, all other clocks
67594 + have their clock_set fptr set to a nosettime dummy function
67595 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
67596 + call common_clock_set, which calls do_sys_settimeofday, which
67597 + we hook
67598 + */
67599 +
67600 return kc->clock_set(which_clock, &new_tp);
67601 }
67602
67603 diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
67604 index d523593..68197a4 100644
67605 --- a/kernel/power/poweroff.c
67606 +++ b/kernel/power/poweroff.c
67607 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
67608 .enable_mask = SYSRQ_ENABLE_BOOT,
67609 };
67610
67611 -static int pm_sysrq_init(void)
67612 +static int __init pm_sysrq_init(void)
67613 {
67614 register_sysrq_key('o', &sysrq_poweroff_op);
67615 return 0;
67616 diff --git a/kernel/power/process.c b/kernel/power/process.c
67617 index 19db29f..33b52b6 100644
67618 --- a/kernel/power/process.c
67619 +++ b/kernel/power/process.c
67620 @@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
67621 u64 elapsed_csecs64;
67622 unsigned int elapsed_csecs;
67623 bool wakeup = false;
67624 + bool timedout = false;
67625
67626 do_gettimeofday(&start);
67627
67628 @@ -43,6 +44,8 @@ static int try_to_freeze_tasks(bool user_only)
67629
67630 while (true) {
67631 todo = 0;
67632 + if (time_after(jiffies, end_time))
67633 + timedout = true;
67634 read_lock(&tasklist_lock);
67635 do_each_thread(g, p) {
67636 if (p == current || !freeze_task(p))
67637 @@ -58,9 +61,13 @@ static int try_to_freeze_tasks(bool user_only)
67638 * guaranteed that TASK_STOPPED/TRACED -> TASK_RUNNING
67639 * transition can't race with task state testing here.
67640 */
67641 - if (!task_is_stopped_or_traced(p) &&
67642 - !freezer_should_skip(p))
67643 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
67644 todo++;
67645 + if (timedout) {
67646 + printk(KERN_ERR "Task refusing to freeze:\n");
67647 + sched_show_task(p);
67648 + }
67649 + }
67650 } while_each_thread(g, p);
67651 read_unlock(&tasklist_lock);
67652
67653 @@ -69,7 +76,7 @@ static int try_to_freeze_tasks(bool user_only)
67654 todo += wq_busy;
67655 }
67656
67657 - if (!todo || time_after(jiffies, end_time))
67658 + if (!todo || timedout)
67659 break;
67660
67661 if (pm_wakeup_pending()) {
67662 diff --git a/kernel/printk.c b/kernel/printk.c
67663 index b663c2c..1d6ba7a 100644
67664 --- a/kernel/printk.c
67665 +++ b/kernel/printk.c
67666 @@ -316,6 +316,11 @@ static int check_syslog_permissions(int type, bool from_file)
67667 if (from_file && type != SYSLOG_ACTION_OPEN)
67668 return 0;
67669
67670 +#ifdef CONFIG_GRKERNSEC_DMESG
67671 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
67672 + return -EPERM;
67673 +#endif
67674 +
67675 if (syslog_action_restricted(type)) {
67676 if (capable(CAP_SYSLOG))
67677 return 0;
67678 diff --git a/kernel/profile.c b/kernel/profile.c
67679 index 76b8e77..a2930e8 100644
67680 --- a/kernel/profile.c
67681 +++ b/kernel/profile.c
67682 @@ -39,7 +39,7 @@ struct profile_hit {
67683 /* Oprofile timer tick hook */
67684 static int (*timer_hook)(struct pt_regs *) __read_mostly;
67685
67686 -static atomic_t *prof_buffer;
67687 +static atomic_unchecked_t *prof_buffer;
67688 static unsigned long prof_len, prof_shift;
67689
67690 int prof_on __read_mostly;
67691 @@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
67692 hits[i].pc = 0;
67693 continue;
67694 }
67695 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
67696 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
67697 hits[i].hits = hits[i].pc = 0;
67698 }
67699 }
67700 @@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
67701 * Add the current hit(s) and flush the write-queue out
67702 * to the global buffer:
67703 */
67704 - atomic_add(nr_hits, &prof_buffer[pc]);
67705 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
67706 for (i = 0; i < NR_PROFILE_HIT; ++i) {
67707 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
67708 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
67709 hits[i].pc = hits[i].hits = 0;
67710 }
67711 out:
67712 @@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
67713 {
67714 unsigned long pc;
67715 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
67716 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
67717 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
67718 }
67719 #endif /* !CONFIG_SMP */
67720
67721 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
67722 return -EFAULT;
67723 buf++; p++; count--; read++;
67724 }
67725 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
67726 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
67727 if (copy_to_user(buf, (void *)pnt, count))
67728 return -EFAULT;
67729 read += count;
67730 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
67731 }
67732 #endif
67733 profile_discard_flip_buffers();
67734 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
67735 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
67736 return count;
67737 }
67738
67739 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
67740 index ee8d49b..bd3d790 100644
67741 --- a/kernel/ptrace.c
67742 +++ b/kernel/ptrace.c
67743 @@ -280,7 +280,7 @@ static int ptrace_attach(struct task_struct *task, long request,
67744
67745 if (seize)
67746 flags |= PT_SEIZED;
67747 - if (ns_capable(task_user_ns(task), CAP_SYS_PTRACE))
67748 + if (ns_capable_nolog(task_user_ns(task), CAP_SYS_PTRACE))
67749 flags |= PT_PTRACE_CAP;
67750 task->ptrace = flags;
67751
67752 @@ -487,7 +487,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
67753 break;
67754 return -EIO;
67755 }
67756 - if (copy_to_user(dst, buf, retval))
67757 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
67758 return -EFAULT;
67759 copied += retval;
67760 src += retval;
67761 @@ -672,7 +672,7 @@ int ptrace_request(struct task_struct *child, long request,
67762 bool seized = child->ptrace & PT_SEIZED;
67763 int ret = -EIO;
67764 siginfo_t siginfo, *si;
67765 - void __user *datavp = (void __user *) data;
67766 + void __user *datavp = (__force void __user *) data;
67767 unsigned long __user *datalp = datavp;
67768 unsigned long flags;
67769
67770 @@ -874,14 +874,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
67771 goto out;
67772 }
67773
67774 + if (gr_handle_ptrace(child, request)) {
67775 + ret = -EPERM;
67776 + goto out_put_task_struct;
67777 + }
67778 +
67779 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
67780 ret = ptrace_attach(child, request, addr, data);
67781 /*
67782 * Some architectures need to do book-keeping after
67783 * a ptrace attach.
67784 */
67785 - if (!ret)
67786 + if (!ret) {
67787 arch_ptrace_attach(child);
67788 + gr_audit_ptrace(child);
67789 + }
67790 goto out_put_task_struct;
67791 }
67792
67793 @@ -907,7 +914,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
67794 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
67795 if (copied != sizeof(tmp))
67796 return -EIO;
67797 - return put_user(tmp, (unsigned long __user *)data);
67798 + return put_user(tmp, (__force unsigned long __user *)data);
67799 }
67800
67801 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
67802 @@ -1017,14 +1024,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
67803 goto out;
67804 }
67805
67806 + if (gr_handle_ptrace(child, request)) {
67807 + ret = -EPERM;
67808 + goto out_put_task_struct;
67809 + }
67810 +
67811 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
67812 ret = ptrace_attach(child, request, addr, data);
67813 /*
67814 * Some architectures need to do book-keeping after
67815 * a ptrace attach.
67816 */
67817 - if (!ret)
67818 + if (!ret) {
67819 arch_ptrace_attach(child);
67820 + gr_audit_ptrace(child);
67821 + }
67822 goto out_put_task_struct;
67823 }
67824
67825 diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
67826 index 37a5444..eec170a 100644
67827 --- a/kernel/rcutiny.c
67828 +++ b/kernel/rcutiny.c
67829 @@ -46,7 +46,7 @@
67830 struct rcu_ctrlblk;
67831 static void invoke_rcu_callbacks(void);
67832 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
67833 -static void rcu_process_callbacks(struct softirq_action *unused);
67834 +static void rcu_process_callbacks(void);
67835 static void __call_rcu(struct rcu_head *head,
67836 void (*func)(struct rcu_head *rcu),
67837 struct rcu_ctrlblk *rcp);
67838 @@ -307,7 +307,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
67839 rcu_is_callbacks_kthread()));
67840 }
67841
67842 -static void rcu_process_callbacks(struct softirq_action *unused)
67843 +static void rcu_process_callbacks(void)
67844 {
67845 __rcu_process_callbacks(&rcu_sched_ctrlblk);
67846 __rcu_process_callbacks(&rcu_bh_ctrlblk);
67847 diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
67848 index 22ecea0..3789898 100644
67849 --- a/kernel/rcutiny_plugin.h
67850 +++ b/kernel/rcutiny_plugin.h
67851 @@ -955,7 +955,7 @@ static int rcu_kthread(void *arg)
67852 have_rcu_kthread_work = morework;
67853 local_irq_restore(flags);
67854 if (work)
67855 - rcu_process_callbacks(NULL);
67856 + rcu_process_callbacks();
67857 schedule_timeout_interruptible(1); /* Leave CPU for others. */
67858 }
67859
67860 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
67861 index a89b381..efdcad8 100644
67862 --- a/kernel/rcutorture.c
67863 +++ b/kernel/rcutorture.c
67864 @@ -158,12 +158,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
67865 { 0 };
67866 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
67867 { 0 };
67868 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
67869 -static atomic_t n_rcu_torture_alloc;
67870 -static atomic_t n_rcu_torture_alloc_fail;
67871 -static atomic_t n_rcu_torture_free;
67872 -static atomic_t n_rcu_torture_mberror;
67873 -static atomic_t n_rcu_torture_error;
67874 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
67875 +static atomic_unchecked_t n_rcu_torture_alloc;
67876 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
67877 +static atomic_unchecked_t n_rcu_torture_free;
67878 +static atomic_unchecked_t n_rcu_torture_mberror;
67879 +static atomic_unchecked_t n_rcu_torture_error;
67880 static long n_rcu_torture_boost_ktrerror;
67881 static long n_rcu_torture_boost_rterror;
67882 static long n_rcu_torture_boost_failure;
67883 @@ -253,11 +253,11 @@ rcu_torture_alloc(void)
67884
67885 spin_lock_bh(&rcu_torture_lock);
67886 if (list_empty(&rcu_torture_freelist)) {
67887 - atomic_inc(&n_rcu_torture_alloc_fail);
67888 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
67889 spin_unlock_bh(&rcu_torture_lock);
67890 return NULL;
67891 }
67892 - atomic_inc(&n_rcu_torture_alloc);
67893 + atomic_inc_unchecked(&n_rcu_torture_alloc);
67894 p = rcu_torture_freelist.next;
67895 list_del_init(p);
67896 spin_unlock_bh(&rcu_torture_lock);
67897 @@ -270,7 +270,7 @@ rcu_torture_alloc(void)
67898 static void
67899 rcu_torture_free(struct rcu_torture *p)
67900 {
67901 - atomic_inc(&n_rcu_torture_free);
67902 + atomic_inc_unchecked(&n_rcu_torture_free);
67903 spin_lock_bh(&rcu_torture_lock);
67904 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
67905 spin_unlock_bh(&rcu_torture_lock);
67906 @@ -390,7 +390,7 @@ rcu_torture_cb(struct rcu_head *p)
67907 i = rp->rtort_pipe_count;
67908 if (i > RCU_TORTURE_PIPE_LEN)
67909 i = RCU_TORTURE_PIPE_LEN;
67910 - atomic_inc(&rcu_torture_wcount[i]);
67911 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
67912 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
67913 rp->rtort_mbtest = 0;
67914 rcu_torture_free(rp);
67915 @@ -437,7 +437,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
67916 i = rp->rtort_pipe_count;
67917 if (i > RCU_TORTURE_PIPE_LEN)
67918 i = RCU_TORTURE_PIPE_LEN;
67919 - atomic_inc(&rcu_torture_wcount[i]);
67920 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
67921 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
67922 rp->rtort_mbtest = 0;
67923 list_del(&rp->rtort_free);
67924 @@ -926,7 +926,7 @@ rcu_torture_writer(void *arg)
67925 i = old_rp->rtort_pipe_count;
67926 if (i > RCU_TORTURE_PIPE_LEN)
67927 i = RCU_TORTURE_PIPE_LEN;
67928 - atomic_inc(&rcu_torture_wcount[i]);
67929 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
67930 old_rp->rtort_pipe_count++;
67931 cur_ops->deferred_free(old_rp);
67932 }
67933 @@ -1007,7 +1007,7 @@ static void rcu_torture_timer(unsigned long unused)
67934 }
67935 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
67936 if (p->rtort_mbtest == 0)
67937 - atomic_inc(&n_rcu_torture_mberror);
67938 + atomic_inc_unchecked(&n_rcu_torture_mberror);
67939 spin_lock(&rand_lock);
67940 cur_ops->read_delay(&rand);
67941 n_rcu_torture_timers++;
67942 @@ -1071,7 +1071,7 @@ rcu_torture_reader(void *arg)
67943 }
67944 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
67945 if (p->rtort_mbtest == 0)
67946 - atomic_inc(&n_rcu_torture_mberror);
67947 + atomic_inc_unchecked(&n_rcu_torture_mberror);
67948 cur_ops->read_delay(&rand);
67949 preempt_disable();
67950 pipe_count = p->rtort_pipe_count;
67951 @@ -1133,10 +1133,10 @@ rcu_torture_printk(char *page)
67952 rcu_torture_current,
67953 rcu_torture_current_version,
67954 list_empty(&rcu_torture_freelist),
67955 - atomic_read(&n_rcu_torture_alloc),
67956 - atomic_read(&n_rcu_torture_alloc_fail),
67957 - atomic_read(&n_rcu_torture_free),
67958 - atomic_read(&n_rcu_torture_mberror),
67959 + atomic_read_unchecked(&n_rcu_torture_alloc),
67960 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
67961 + atomic_read_unchecked(&n_rcu_torture_free),
67962 + atomic_read_unchecked(&n_rcu_torture_mberror),
67963 n_rcu_torture_boost_ktrerror,
67964 n_rcu_torture_boost_rterror,
67965 n_rcu_torture_boost_failure,
67966 @@ -1146,7 +1146,7 @@ rcu_torture_printk(char *page)
67967 n_online_attempts,
67968 n_offline_successes,
67969 n_offline_attempts);
67970 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
67971 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
67972 n_rcu_torture_boost_ktrerror != 0 ||
67973 n_rcu_torture_boost_rterror != 0 ||
67974 n_rcu_torture_boost_failure != 0)
67975 @@ -1154,7 +1154,7 @@ rcu_torture_printk(char *page)
67976 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
67977 if (i > 1) {
67978 cnt += sprintf(&page[cnt], "!!! ");
67979 - atomic_inc(&n_rcu_torture_error);
67980 + atomic_inc_unchecked(&n_rcu_torture_error);
67981 WARN_ON_ONCE(1);
67982 }
67983 cnt += sprintf(&page[cnt], "Reader Pipe: ");
67984 @@ -1168,7 +1168,7 @@ rcu_torture_printk(char *page)
67985 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
67986 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
67987 cnt += sprintf(&page[cnt], " %d",
67988 - atomic_read(&rcu_torture_wcount[i]));
67989 + atomic_read_unchecked(&rcu_torture_wcount[i]));
67990 }
67991 cnt += sprintf(&page[cnt], "\n");
67992 if (cur_ops->stats)
67993 @@ -1676,7 +1676,7 @@ rcu_torture_cleanup(void)
67994
67995 if (cur_ops->cleanup)
67996 cur_ops->cleanup();
67997 - if (atomic_read(&n_rcu_torture_error))
67998 + if (atomic_read_unchecked(&n_rcu_torture_error))
67999 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
68000 else if (n_online_successes != n_online_attempts ||
68001 n_offline_successes != n_offline_attempts)
68002 @@ -1744,17 +1744,17 @@ rcu_torture_init(void)
68003
68004 rcu_torture_current = NULL;
68005 rcu_torture_current_version = 0;
68006 - atomic_set(&n_rcu_torture_alloc, 0);
68007 - atomic_set(&n_rcu_torture_alloc_fail, 0);
68008 - atomic_set(&n_rcu_torture_free, 0);
68009 - atomic_set(&n_rcu_torture_mberror, 0);
68010 - atomic_set(&n_rcu_torture_error, 0);
68011 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
68012 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
68013 + atomic_set_unchecked(&n_rcu_torture_free, 0);
68014 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
68015 + atomic_set_unchecked(&n_rcu_torture_error, 0);
68016 n_rcu_torture_boost_ktrerror = 0;
68017 n_rcu_torture_boost_rterror = 0;
68018 n_rcu_torture_boost_failure = 0;
68019 n_rcu_torture_boosts = 0;
68020 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
68021 - atomic_set(&rcu_torture_wcount[i], 0);
68022 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
68023 for_each_possible_cpu(cpu) {
68024 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
68025 per_cpu(rcu_torture_count, cpu)[i] = 0;
68026 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
68027 index d0c5baf..109b2e7 100644
68028 --- a/kernel/rcutree.c
68029 +++ b/kernel/rcutree.c
68030 @@ -357,9 +357,9 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
68031 rcu_prepare_for_idle(smp_processor_id());
68032 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
68033 smp_mb__before_atomic_inc(); /* See above. */
68034 - atomic_inc(&rdtp->dynticks);
68035 + atomic_inc_unchecked(&rdtp->dynticks);
68036 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
68037 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
68038 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
68039
68040 /*
68041 * The idle task is not permitted to enter the idle loop while
68042 @@ -448,10 +448,10 @@ void rcu_irq_exit(void)
68043 static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
68044 {
68045 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
68046 - atomic_inc(&rdtp->dynticks);
68047 + atomic_inc_unchecked(&rdtp->dynticks);
68048 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
68049 smp_mb__after_atomic_inc(); /* See above. */
68050 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
68051 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
68052 rcu_cleanup_after_idle(smp_processor_id());
68053 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
68054 if (!is_idle_task(current)) {
68055 @@ -545,14 +545,14 @@ void rcu_nmi_enter(void)
68056 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
68057
68058 if (rdtp->dynticks_nmi_nesting == 0 &&
68059 - (atomic_read(&rdtp->dynticks) & 0x1))
68060 + (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
68061 return;
68062 rdtp->dynticks_nmi_nesting++;
68063 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
68064 - atomic_inc(&rdtp->dynticks);
68065 + atomic_inc_unchecked(&rdtp->dynticks);
68066 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
68067 smp_mb__after_atomic_inc(); /* See above. */
68068 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
68069 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
68070 }
68071
68072 /**
68073 @@ -571,9 +571,9 @@ void rcu_nmi_exit(void)
68074 return;
68075 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
68076 smp_mb__before_atomic_inc(); /* See above. */
68077 - atomic_inc(&rdtp->dynticks);
68078 + atomic_inc_unchecked(&rdtp->dynticks);
68079 smp_mb__after_atomic_inc(); /* Force delay to next write. */
68080 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
68081 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
68082 }
68083
68084 #ifdef CONFIG_PROVE_RCU
68085 @@ -589,7 +589,7 @@ int rcu_is_cpu_idle(void)
68086 int ret;
68087
68088 preempt_disable();
68089 - ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
68090 + ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
68091 preempt_enable();
68092 return ret;
68093 }
68094 @@ -659,7 +659,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
68095 */
68096 static int dyntick_save_progress_counter(struct rcu_data *rdp)
68097 {
68098 - rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
68099 + rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
68100 return (rdp->dynticks_snap & 0x1) == 0;
68101 }
68102
68103 @@ -674,7 +674,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
68104 unsigned int curr;
68105 unsigned int snap;
68106
68107 - curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
68108 + curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
68109 snap = (unsigned int)rdp->dynticks_snap;
68110
68111 /*
68112 @@ -704,10 +704,10 @@ static int jiffies_till_stall_check(void)
68113 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
68114 */
68115 if (till_stall_check < 3) {
68116 - ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
68117 + ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
68118 till_stall_check = 3;
68119 } else if (till_stall_check > 300) {
68120 - ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
68121 + ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
68122 till_stall_check = 300;
68123 }
68124 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
68125 @@ -1766,7 +1766,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
68126 /*
68127 * Do RCU core processing for the current CPU.
68128 */
68129 -static void rcu_process_callbacks(struct softirq_action *unused)
68130 +static void rcu_process_callbacks(void)
68131 {
68132 trace_rcu_utilization("Start RCU core");
68133 __rcu_process_callbacks(&rcu_sched_state,
68134 @@ -1949,8 +1949,8 @@ void synchronize_rcu_bh(void)
68135 }
68136 EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
68137
68138 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
68139 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
68140 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
68141 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
68142
68143 static int synchronize_sched_expedited_cpu_stop(void *data)
68144 {
68145 @@ -2011,7 +2011,7 @@ void synchronize_sched_expedited(void)
68146 int firstsnap, s, snap, trycount = 0;
68147
68148 /* Note that atomic_inc_return() implies full memory barrier. */
68149 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
68150 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
68151 get_online_cpus();
68152 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
68153
68154 @@ -2033,7 +2033,7 @@ void synchronize_sched_expedited(void)
68155 }
68156
68157 /* Check to see if someone else did our work for us. */
68158 - s = atomic_read(&sync_sched_expedited_done);
68159 + s = atomic_read_unchecked(&sync_sched_expedited_done);
68160 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
68161 smp_mb(); /* ensure test happens before caller kfree */
68162 return;
68163 @@ -2048,7 +2048,7 @@ void synchronize_sched_expedited(void)
68164 * grace period works for us.
68165 */
68166 get_online_cpus();
68167 - snap = atomic_read(&sync_sched_expedited_started);
68168 + snap = atomic_read_unchecked(&sync_sched_expedited_started);
68169 smp_mb(); /* ensure read is before try_stop_cpus(). */
68170 }
68171
68172 @@ -2059,12 +2059,12 @@ void synchronize_sched_expedited(void)
68173 * than we did beat us to the punch.
68174 */
68175 do {
68176 - s = atomic_read(&sync_sched_expedited_done);
68177 + s = atomic_read_unchecked(&sync_sched_expedited_done);
68178 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
68179 smp_mb(); /* ensure test happens before caller kfree */
68180 break;
68181 }
68182 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
68183 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
68184
68185 put_online_cpus();
68186 }
68187 @@ -2262,7 +2262,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
68188 rdp->qlen = 0;
68189 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
68190 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
68191 - WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
68192 + WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
68193 rdp->cpu = cpu;
68194 rdp->rsp = rsp;
68195 raw_spin_unlock_irqrestore(&rnp->lock, flags);
68196 @@ -2290,8 +2290,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
68197 rdp->n_force_qs_snap = rsp->n_force_qs;
68198 rdp->blimit = blimit;
68199 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
68200 - atomic_set(&rdp->dynticks->dynticks,
68201 - (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
68202 + atomic_set_unchecked(&rdp->dynticks->dynticks,
68203 + (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
68204 rcu_prepare_for_idle_init(cpu);
68205 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
68206
68207 diff --git a/kernel/rcutree.h b/kernel/rcutree.h
68208 index cdd1be0..5b2efb4 100644
68209 --- a/kernel/rcutree.h
68210 +++ b/kernel/rcutree.h
68211 @@ -87,7 +87,7 @@ struct rcu_dynticks {
68212 long long dynticks_nesting; /* Track irq/process nesting level. */
68213 /* Process level is worth LLONG_MAX/2. */
68214 int dynticks_nmi_nesting; /* Track NMI nesting level. */
68215 - atomic_t dynticks; /* Even value for idle, else odd. */
68216 + atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
68217 };
68218
68219 /* RCU's kthread states for tracing. */
68220 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
68221 index c023464..7f57225 100644
68222 --- a/kernel/rcutree_plugin.h
68223 +++ b/kernel/rcutree_plugin.h
68224 @@ -909,7 +909,7 @@ void synchronize_rcu_expedited(void)
68225
68226 /* Clean up and exit. */
68227 smp_mb(); /* ensure expedited GP seen before counter increment. */
68228 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
68229 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
68230 unlock_mb_ret:
68231 mutex_unlock(&sync_rcu_preempt_exp_mutex);
68232 mb_ret:
68233 diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
68234 index ed459ed..a03c3fa 100644
68235 --- a/kernel/rcutree_trace.c
68236 +++ b/kernel/rcutree_trace.c
68237 @@ -68,7 +68,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
68238 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
68239 rdp->qs_pending);
68240 seq_printf(m, " dt=%d/%llx/%d df=%lu",
68241 - atomic_read(&rdp->dynticks->dynticks),
68242 + atomic_read_unchecked(&rdp->dynticks->dynticks),
68243 rdp->dynticks->dynticks_nesting,
68244 rdp->dynticks->dynticks_nmi_nesting,
68245 rdp->dynticks_fqs);
68246 @@ -140,7 +140,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
68247 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
68248 rdp->qs_pending);
68249 seq_printf(m, ",%d,%llx,%d,%lu",
68250 - atomic_read(&rdp->dynticks->dynticks),
68251 + atomic_read_unchecked(&rdp->dynticks->dynticks),
68252 rdp->dynticks->dynticks_nesting,
68253 rdp->dynticks->dynticks_nmi_nesting,
68254 rdp->dynticks_fqs);
68255 diff --git a/kernel/resource.c b/kernel/resource.c
68256 index 7e8ea66..1efd11f 100644
68257 --- a/kernel/resource.c
68258 +++ b/kernel/resource.c
68259 @@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
68260
68261 static int __init ioresources_init(void)
68262 {
68263 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
68264 +#ifdef CONFIG_GRKERNSEC_PROC_USER
68265 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
68266 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
68267 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68268 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
68269 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
68270 +#endif
68271 +#else
68272 proc_create("ioports", 0, NULL, &proc_ioports_operations);
68273 proc_create("iomem", 0, NULL, &proc_iomem_operations);
68274 +#endif
68275 return 0;
68276 }
68277 __initcall(ioresources_init);
68278 diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
68279 index 98ec494..4241d6d 100644
68280 --- a/kernel/rtmutex-tester.c
68281 +++ b/kernel/rtmutex-tester.c
68282 @@ -20,7 +20,7 @@
68283 #define MAX_RT_TEST_MUTEXES 8
68284
68285 static spinlock_t rttest_lock;
68286 -static atomic_t rttest_event;
68287 +static atomic_unchecked_t rttest_event;
68288
68289 struct test_thread_data {
68290 int opcode;
68291 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68292
68293 case RTTEST_LOCKCONT:
68294 td->mutexes[td->opdata] = 1;
68295 - td->event = atomic_add_return(1, &rttest_event);
68296 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68297 return 0;
68298
68299 case RTTEST_RESET:
68300 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68301 return 0;
68302
68303 case RTTEST_RESETEVENT:
68304 - atomic_set(&rttest_event, 0);
68305 + atomic_set_unchecked(&rttest_event, 0);
68306 return 0;
68307
68308 default:
68309 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68310 return ret;
68311
68312 td->mutexes[id] = 1;
68313 - td->event = atomic_add_return(1, &rttest_event);
68314 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68315 rt_mutex_lock(&mutexes[id]);
68316 - td->event = atomic_add_return(1, &rttest_event);
68317 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68318 td->mutexes[id] = 4;
68319 return 0;
68320
68321 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68322 return ret;
68323
68324 td->mutexes[id] = 1;
68325 - td->event = atomic_add_return(1, &rttest_event);
68326 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68327 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
68328 - td->event = atomic_add_return(1, &rttest_event);
68329 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68330 td->mutexes[id] = ret ? 0 : 4;
68331 return ret ? -EINTR : 0;
68332
68333 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
68334 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
68335 return ret;
68336
68337 - td->event = atomic_add_return(1, &rttest_event);
68338 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68339 rt_mutex_unlock(&mutexes[id]);
68340 - td->event = atomic_add_return(1, &rttest_event);
68341 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68342 td->mutexes[id] = 0;
68343 return 0;
68344
68345 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
68346 break;
68347
68348 td->mutexes[dat] = 2;
68349 - td->event = atomic_add_return(1, &rttest_event);
68350 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68351 break;
68352
68353 default:
68354 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
68355 return;
68356
68357 td->mutexes[dat] = 3;
68358 - td->event = atomic_add_return(1, &rttest_event);
68359 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68360 break;
68361
68362 case RTTEST_LOCKNOWAIT:
68363 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
68364 return;
68365
68366 td->mutexes[dat] = 1;
68367 - td->event = atomic_add_return(1, &rttest_event);
68368 + td->event = atomic_add_return_unchecked(1, &rttest_event);
68369 return;
68370
68371 default:
68372 diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
68373 index 0984a21..939f183 100644
68374 --- a/kernel/sched/auto_group.c
68375 +++ b/kernel/sched/auto_group.c
68376 @@ -11,7 +11,7 @@
68377
68378 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
68379 static struct autogroup autogroup_default;
68380 -static atomic_t autogroup_seq_nr;
68381 +static atomic_unchecked_t autogroup_seq_nr;
68382
68383 void __init autogroup_init(struct task_struct *init_task)
68384 {
68385 @@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
68386
68387 kref_init(&ag->kref);
68388 init_rwsem(&ag->lock);
68389 - ag->id = atomic_inc_return(&autogroup_seq_nr);
68390 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
68391 ag->tg = tg;
68392 #ifdef CONFIG_RT_GROUP_SCHED
68393 /*
68394 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
68395 index 817bf70..9099fb4 100644
68396 --- a/kernel/sched/core.c
68397 +++ b/kernel/sched/core.c
68398 @@ -4038,6 +4038,8 @@ int can_nice(const struct task_struct *p, const int nice)
68399 /* convert nice value [19,-20] to rlimit style value [1,40] */
68400 int nice_rlim = 20 - nice;
68401
68402 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
68403 +
68404 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
68405 capable(CAP_SYS_NICE));
68406 }
68407 @@ -4071,7 +4073,8 @@ SYSCALL_DEFINE1(nice, int, increment)
68408 if (nice > 19)
68409 nice = 19;
68410
68411 - if (increment < 0 && !can_nice(current, nice))
68412 + if (increment < 0 && (!can_nice(current, nice) ||
68413 + gr_handle_chroot_nice()))
68414 return -EPERM;
68415
68416 retval = security_task_setnice(current, nice);
68417 @@ -4228,6 +4231,7 @@ recheck:
68418 unsigned long rlim_rtprio =
68419 task_rlimit(p, RLIMIT_RTPRIO);
68420
68421 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
68422 /* can't set/change the rt policy */
68423 if (policy != p->policy && !rlim_rtprio)
68424 return -EPERM;
68425 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
68426 index e955364..eacd2a4 100644
68427 --- a/kernel/sched/fair.c
68428 +++ b/kernel/sched/fair.c
68429 @@ -5107,7 +5107,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
68430 * run_rebalance_domains is triggered when needed from the scheduler tick.
68431 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
68432 */
68433 -static void run_rebalance_domains(struct softirq_action *h)
68434 +static void run_rebalance_domains(void)
68435 {
68436 int this_cpu = smp_processor_id();
68437 struct rq *this_rq = cpu_rq(this_cpu);
68438 diff --git a/kernel/signal.c b/kernel/signal.c
68439 index 17afcaf..4500b05 100644
68440 --- a/kernel/signal.c
68441 +++ b/kernel/signal.c
68442 @@ -47,12 +47,12 @@ static struct kmem_cache *sigqueue_cachep;
68443
68444 int print_fatal_signals __read_mostly;
68445
68446 -static void __user *sig_handler(struct task_struct *t, int sig)
68447 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
68448 {
68449 return t->sighand->action[sig - 1].sa.sa_handler;
68450 }
68451
68452 -static int sig_handler_ignored(void __user *handler, int sig)
68453 +static int sig_handler_ignored(__sighandler_t handler, int sig)
68454 {
68455 /* Is it explicitly or implicitly ignored? */
68456 return handler == SIG_IGN ||
68457 @@ -61,7 +61,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
68458
68459 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
68460 {
68461 - void __user *handler;
68462 + __sighandler_t handler;
68463
68464 handler = sig_handler(t, sig);
68465
68466 @@ -365,6 +365,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
68467 atomic_inc(&user->sigpending);
68468 rcu_read_unlock();
68469
68470 + if (!override_rlimit)
68471 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
68472 +
68473 if (override_rlimit ||
68474 atomic_read(&user->sigpending) <=
68475 task_rlimit(t, RLIMIT_SIGPENDING)) {
68476 @@ -489,7 +492,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
68477
68478 int unhandled_signal(struct task_struct *tsk, int sig)
68479 {
68480 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
68481 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
68482 if (is_global_init(tsk))
68483 return 1;
68484 if (handler != SIG_IGN && handler != SIG_DFL)
68485 @@ -816,6 +819,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
68486 }
68487 }
68488
68489 + /* allow glibc communication via tgkill to other threads in our
68490 + thread group */
68491 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
68492 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
68493 + && gr_handle_signal(t, sig))
68494 + return -EPERM;
68495 +
68496 return security_task_kill(t, info, sig, 0);
68497 }
68498
68499 @@ -1204,7 +1214,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
68500 return send_signal(sig, info, p, 1);
68501 }
68502
68503 -static int
68504 +int
68505 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
68506 {
68507 return send_signal(sig, info, t, 0);
68508 @@ -1241,6 +1251,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
68509 unsigned long int flags;
68510 int ret, blocked, ignored;
68511 struct k_sigaction *action;
68512 + int is_unhandled = 0;
68513
68514 spin_lock_irqsave(&t->sighand->siglock, flags);
68515 action = &t->sighand->action[sig-1];
68516 @@ -1255,9 +1266,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
68517 }
68518 if (action->sa.sa_handler == SIG_DFL)
68519 t->signal->flags &= ~SIGNAL_UNKILLABLE;
68520 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
68521 + is_unhandled = 1;
68522 ret = specific_send_sig_info(sig, info, t);
68523 spin_unlock_irqrestore(&t->sighand->siglock, flags);
68524
68525 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
68526 + normal operation */
68527 + if (is_unhandled) {
68528 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
68529 + gr_handle_crash(t, sig);
68530 + }
68531 +
68532 return ret;
68533 }
68534
68535 @@ -1324,8 +1344,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
68536 ret = check_kill_permission(sig, info, p);
68537 rcu_read_unlock();
68538
68539 - if (!ret && sig)
68540 + if (!ret && sig) {
68541 ret = do_send_sig_info(sig, info, p, true);
68542 + if (!ret)
68543 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
68544 + }
68545
68546 return ret;
68547 }
68548 @@ -2840,7 +2863,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
68549 int error = -ESRCH;
68550
68551 rcu_read_lock();
68552 - p = find_task_by_vpid(pid);
68553 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
68554 + /* allow glibc communication via tgkill to other threads in our
68555 + thread group */
68556 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
68557 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
68558 + p = find_task_by_vpid_unrestricted(pid);
68559 + else
68560 +#endif
68561 + p = find_task_by_vpid(pid);
68562 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
68563 error = check_kill_permission(sig, info, p);
68564 /*
68565 diff --git a/kernel/smp.c b/kernel/smp.c
68566 index 2f8b10e..a41bc14 100644
68567 --- a/kernel/smp.c
68568 +++ b/kernel/smp.c
68569 @@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
68570 }
68571 EXPORT_SYMBOL(smp_call_function);
68572
68573 -void ipi_call_lock(void)
68574 +void ipi_call_lock(void) __acquires(call_function.lock)
68575 {
68576 raw_spin_lock(&call_function.lock);
68577 }
68578
68579 -void ipi_call_unlock(void)
68580 +void ipi_call_unlock(void) __releases(call_function.lock)
68581 {
68582 raw_spin_unlock(&call_function.lock);
68583 }
68584
68585 -void ipi_call_lock_irq(void)
68586 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
68587 {
68588 raw_spin_lock_irq(&call_function.lock);
68589 }
68590
68591 -void ipi_call_unlock_irq(void)
68592 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
68593 {
68594 raw_spin_unlock_irq(&call_function.lock);
68595 }
68596 diff --git a/kernel/softirq.c b/kernel/softirq.c
68597 index 671f959..91c51cb 100644
68598 --- a/kernel/softirq.c
68599 +++ b/kernel/softirq.c
68600 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
68601
68602 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
68603
68604 -char *softirq_to_name[NR_SOFTIRQS] = {
68605 +const char * const softirq_to_name[NR_SOFTIRQS] = {
68606 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
68607 "TASKLET", "SCHED", "HRTIMER", "RCU"
68608 };
68609 @@ -235,7 +235,7 @@ restart:
68610 kstat_incr_softirqs_this_cpu(vec_nr);
68611
68612 trace_softirq_entry(vec_nr);
68613 - h->action(h);
68614 + h->action();
68615 trace_softirq_exit(vec_nr);
68616 if (unlikely(prev_count != preempt_count())) {
68617 printk(KERN_ERR "huh, entered softirq %u %s %p"
68618 @@ -381,9 +381,11 @@ void __raise_softirq_irqoff(unsigned int nr)
68619 or_softirq_pending(1UL << nr);
68620 }
68621
68622 -void open_softirq(int nr, void (*action)(struct softirq_action *))
68623 +void open_softirq(int nr, void (*action)(void))
68624 {
68625 - softirq_vec[nr].action = action;
68626 + pax_open_kernel();
68627 + *(void **)&softirq_vec[nr].action = action;
68628 + pax_close_kernel();
68629 }
68630
68631 /*
68632 @@ -437,7 +439,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
68633
68634 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
68635
68636 -static void tasklet_action(struct softirq_action *a)
68637 +static void tasklet_action(void)
68638 {
68639 struct tasklet_struct *list;
68640
68641 @@ -472,7 +474,7 @@ static void tasklet_action(struct softirq_action *a)
68642 }
68643 }
68644
68645 -static void tasklet_hi_action(struct softirq_action *a)
68646 +static void tasklet_hi_action(void)
68647 {
68648 struct tasklet_struct *list;
68649
68650 diff --git a/kernel/sys.c b/kernel/sys.c
68651 index e7006eb..8fb7c51 100644
68652 --- a/kernel/sys.c
68653 +++ b/kernel/sys.c
68654 @@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
68655 error = -EACCES;
68656 goto out;
68657 }
68658 +
68659 + if (gr_handle_chroot_setpriority(p, niceval)) {
68660 + error = -EACCES;
68661 + goto out;
68662 + }
68663 +
68664 no_nice = security_task_setnice(p, niceval);
68665 if (no_nice) {
68666 error = no_nice;
68667 @@ -581,6 +587,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
68668 goto error;
68669 }
68670
68671 + if (gr_check_group_change(new->gid, new->egid, -1))
68672 + goto error;
68673 +
68674 if (rgid != (gid_t) -1 ||
68675 (egid != (gid_t) -1 && egid != old->gid))
68676 new->sgid = new->egid;
68677 @@ -610,6 +619,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
68678 old = current_cred();
68679
68680 retval = -EPERM;
68681 +
68682 + if (gr_check_group_change(gid, gid, gid))
68683 + goto error;
68684 +
68685 if (nsown_capable(CAP_SETGID))
68686 new->gid = new->egid = new->sgid = new->fsgid = gid;
68687 else if (gid == old->gid || gid == old->sgid)
68688 @@ -627,7 +640,7 @@ error:
68689 /*
68690 * change the user struct in a credentials set to match the new UID
68691 */
68692 -static int set_user(struct cred *new)
68693 +int set_user(struct cred *new)
68694 {
68695 struct user_struct *new_user;
68696
68697 @@ -697,6 +710,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
68698 goto error;
68699 }
68700
68701 + if (gr_check_user_change(new->uid, new->euid, -1))
68702 + goto error;
68703 +
68704 if (new->uid != old->uid) {
68705 retval = set_user(new);
68706 if (retval < 0)
68707 @@ -741,6 +757,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
68708 old = current_cred();
68709
68710 retval = -EPERM;
68711 +
68712 + if (gr_check_crash_uid(uid))
68713 + goto error;
68714 + if (gr_check_user_change(uid, uid, uid))
68715 + goto error;
68716 +
68717 if (nsown_capable(CAP_SETUID)) {
68718 new->suid = new->uid = uid;
68719 if (uid != old->uid) {
68720 @@ -795,6 +817,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
68721 goto error;
68722 }
68723
68724 + if (gr_check_user_change(ruid, euid, -1))
68725 + goto error;
68726 +
68727 if (ruid != (uid_t) -1) {
68728 new->uid = ruid;
68729 if (ruid != old->uid) {
68730 @@ -859,6 +884,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
68731 goto error;
68732 }
68733
68734 + if (gr_check_group_change(rgid, egid, -1))
68735 + goto error;
68736 +
68737 if (rgid != (gid_t) -1)
68738 new->gid = rgid;
68739 if (egid != (gid_t) -1)
68740 @@ -905,6 +933,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
68741 old = current_cred();
68742 old_fsuid = old->fsuid;
68743
68744 + if (gr_check_user_change(-1, -1, uid))
68745 + goto error;
68746 +
68747 if (uid == old->uid || uid == old->euid ||
68748 uid == old->suid || uid == old->fsuid ||
68749 nsown_capable(CAP_SETUID)) {
68750 @@ -915,6 +946,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
68751 }
68752 }
68753
68754 +error:
68755 abort_creds(new);
68756 return old_fsuid;
68757
68758 @@ -941,12 +973,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
68759 if (gid == old->gid || gid == old->egid ||
68760 gid == old->sgid || gid == old->fsgid ||
68761 nsown_capable(CAP_SETGID)) {
68762 + if (gr_check_group_change(-1, -1, gid))
68763 + goto error;
68764 +
68765 if (gid != old_fsgid) {
68766 new->fsgid = gid;
68767 goto change_okay;
68768 }
68769 }
68770
68771 +error:
68772 abort_creds(new);
68773 return old_fsgid;
68774
68775 @@ -1198,7 +1234,10 @@ static int override_release(char __user *release, int len)
68776 }
68777 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
68778 snprintf(buf, len, "2.6.%u%s", v, rest);
68779 - ret = copy_to_user(release, buf, len);
68780 + if (len > sizeof(buf))
68781 + ret = -EFAULT;
68782 + else
68783 + ret = copy_to_user(release, buf, len);
68784 }
68785 return ret;
68786 }
68787 @@ -1252,19 +1291,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
68788 return -EFAULT;
68789
68790 down_read(&uts_sem);
68791 - error = __copy_to_user(&name->sysname, &utsname()->sysname,
68792 + error = __copy_to_user(name->sysname, &utsname()->sysname,
68793 __OLD_UTS_LEN);
68794 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
68795 - error |= __copy_to_user(&name->nodename, &utsname()->nodename,
68796 + error |= __copy_to_user(name->nodename, &utsname()->nodename,
68797 __OLD_UTS_LEN);
68798 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
68799 - error |= __copy_to_user(&name->release, &utsname()->release,
68800 + error |= __copy_to_user(name->release, &utsname()->release,
68801 __OLD_UTS_LEN);
68802 error |= __put_user(0, name->release + __OLD_UTS_LEN);
68803 - error |= __copy_to_user(&name->version, &utsname()->version,
68804 + error |= __copy_to_user(name->version, &utsname()->version,
68805 __OLD_UTS_LEN);
68806 error |= __put_user(0, name->version + __OLD_UTS_LEN);
68807 - error |= __copy_to_user(&name->machine, &utsname()->machine,
68808 + error |= __copy_to_user(name->machine, &utsname()->machine,
68809 __OLD_UTS_LEN);
68810 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
68811 up_read(&uts_sem);
68812 @@ -1847,7 +1886,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
68813 error = get_dumpable(me->mm);
68814 break;
68815 case PR_SET_DUMPABLE:
68816 - if (arg2 < 0 || arg2 > 1) {
68817 + if (arg2 > 1) {
68818 error = -EINVAL;
68819 break;
68820 }
68821 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
68822 index 4ab1187..0b75ced 100644
68823 --- a/kernel/sysctl.c
68824 +++ b/kernel/sysctl.c
68825 @@ -91,7 +91,6 @@
68826
68827
68828 #if defined(CONFIG_SYSCTL)
68829 -
68830 /* External variables not in a header file. */
68831 extern int sysctl_overcommit_memory;
68832 extern int sysctl_overcommit_ratio;
68833 @@ -169,10 +168,8 @@ static int proc_taint(struct ctl_table *table, int write,
68834 void __user *buffer, size_t *lenp, loff_t *ppos);
68835 #endif
68836
68837 -#ifdef CONFIG_PRINTK
68838 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
68839 void __user *buffer, size_t *lenp, loff_t *ppos);
68840 -#endif
68841
68842 #ifdef CONFIG_MAGIC_SYSRQ
68843 /* Note: sysrq code uses it's own private copy */
68844 @@ -196,6 +193,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
68845
68846 #endif
68847
68848 +extern struct ctl_table grsecurity_table[];
68849 +
68850 static struct ctl_table kern_table[];
68851 static struct ctl_table vm_table[];
68852 static struct ctl_table fs_table[];
68853 @@ -210,6 +209,20 @@ extern struct ctl_table epoll_table[];
68854 int sysctl_legacy_va_layout;
68855 #endif
68856
68857 +#ifdef CONFIG_PAX_SOFTMODE
68858 +static ctl_table pax_table[] = {
68859 + {
68860 + .procname = "softmode",
68861 + .data = &pax_softmode,
68862 + .maxlen = sizeof(unsigned int),
68863 + .mode = 0600,
68864 + .proc_handler = &proc_dointvec,
68865 + },
68866 +
68867 + { }
68868 +};
68869 +#endif
68870 +
68871 /* The default sysctl tables: */
68872
68873 static struct ctl_table sysctl_base_table[] = {
68874 @@ -256,6 +269,22 @@ static int max_extfrag_threshold = 1000;
68875 #endif
68876
68877 static struct ctl_table kern_table[] = {
68878 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
68879 + {
68880 + .procname = "grsecurity",
68881 + .mode = 0500,
68882 + .child = grsecurity_table,
68883 + },
68884 +#endif
68885 +
68886 +#ifdef CONFIG_PAX_SOFTMODE
68887 + {
68888 + .procname = "pax",
68889 + .mode = 0500,
68890 + .child = pax_table,
68891 + },
68892 +#endif
68893 +
68894 {
68895 .procname = "sched_child_runs_first",
68896 .data = &sysctl_sched_child_runs_first,
68897 @@ -540,7 +569,7 @@ static struct ctl_table kern_table[] = {
68898 .data = &modprobe_path,
68899 .maxlen = KMOD_PATH_LEN,
68900 .mode = 0644,
68901 - .proc_handler = proc_dostring,
68902 + .proc_handler = proc_dostring_modpriv,
68903 },
68904 {
68905 .procname = "modules_disabled",
68906 @@ -707,16 +736,20 @@ static struct ctl_table kern_table[] = {
68907 .extra1 = &zero,
68908 .extra2 = &one,
68909 },
68910 +#endif
68911 {
68912 .procname = "kptr_restrict",
68913 .data = &kptr_restrict,
68914 .maxlen = sizeof(int),
68915 .mode = 0644,
68916 .proc_handler = proc_dointvec_minmax_sysadmin,
68917 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68918 + .extra1 = &two,
68919 +#else
68920 .extra1 = &zero,
68921 +#endif
68922 .extra2 = &two,
68923 },
68924 -#endif
68925 {
68926 .procname = "ngroups_max",
68927 .data = &ngroups_max,
68928 @@ -1215,6 +1248,13 @@ static struct ctl_table vm_table[] = {
68929 .proc_handler = proc_dointvec_minmax,
68930 .extra1 = &zero,
68931 },
68932 + {
68933 + .procname = "heap_stack_gap",
68934 + .data = &sysctl_heap_stack_gap,
68935 + .maxlen = sizeof(sysctl_heap_stack_gap),
68936 + .mode = 0644,
68937 + .proc_handler = proc_doulongvec_minmax,
68938 + },
68939 #else
68940 {
68941 .procname = "nr_trim_pages",
68942 @@ -1645,6 +1685,16 @@ int proc_dostring(struct ctl_table *table, int write,
68943 buffer, lenp, ppos);
68944 }
68945
68946 +int proc_dostring_modpriv(struct ctl_table *table, int write,
68947 + void __user *buffer, size_t *lenp, loff_t *ppos)
68948 +{
68949 + if (write && !capable(CAP_SYS_MODULE))
68950 + return -EPERM;
68951 +
68952 + return _proc_do_string(table->data, table->maxlen, write,
68953 + buffer, lenp, ppos);
68954 +}
68955 +
68956 static size_t proc_skip_spaces(char **buf)
68957 {
68958 size_t ret;
68959 @@ -1750,6 +1800,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
68960 len = strlen(tmp);
68961 if (len > *size)
68962 len = *size;
68963 + if (len > sizeof(tmp))
68964 + len = sizeof(tmp);
68965 if (copy_to_user(*buf, tmp, len))
68966 return -EFAULT;
68967 *size -= len;
68968 @@ -1942,7 +1994,6 @@ static int proc_taint(struct ctl_table *table, int write,
68969 return err;
68970 }
68971
68972 -#ifdef CONFIG_PRINTK
68973 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
68974 void __user *buffer, size_t *lenp, loff_t *ppos)
68975 {
68976 @@ -1951,7 +2002,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
68977
68978 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
68979 }
68980 -#endif
68981
68982 struct do_proc_dointvec_minmax_conv_param {
68983 int *min;
68984 @@ -2066,8 +2116,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
68985 *i = val;
68986 } else {
68987 val = convdiv * (*i) / convmul;
68988 - if (!first)
68989 + if (!first) {
68990 err = proc_put_char(&buffer, &left, '\t');
68991 + if (err)
68992 + break;
68993 + }
68994 err = proc_put_long(&buffer, &left, val, false);
68995 if (err)
68996 break;
68997 @@ -2459,6 +2512,12 @@ int proc_dostring(struct ctl_table *table, int write,
68998 return -ENOSYS;
68999 }
69000
69001 +int proc_dostring_modpriv(struct ctl_table *table, int write,
69002 + void __user *buffer, size_t *lenp, loff_t *ppos)
69003 +{
69004 + return -ENOSYS;
69005 +}
69006 +
69007 int proc_dointvec(struct ctl_table *table, int write,
69008 void __user *buffer, size_t *lenp, loff_t *ppos)
69009 {
69010 @@ -2515,5 +2574,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
69011 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
69012 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
69013 EXPORT_SYMBOL(proc_dostring);
69014 +EXPORT_SYMBOL(proc_dostring_modpriv);
69015 EXPORT_SYMBOL(proc_doulongvec_minmax);
69016 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
69017 diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
69018 index a650694..aaeeb20 100644
69019 --- a/kernel/sysctl_binary.c
69020 +++ b/kernel/sysctl_binary.c
69021 @@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
69022 int i;
69023
69024 set_fs(KERNEL_DS);
69025 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
69026 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
69027 set_fs(old_fs);
69028 if (result < 0)
69029 goto out_kfree;
69030 @@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
69031 }
69032
69033 set_fs(KERNEL_DS);
69034 - result = vfs_write(file, buffer, str - buffer, &pos);
69035 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
69036 set_fs(old_fs);
69037 if (result < 0)
69038 goto out_kfree;
69039 @@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
69040 int i;
69041
69042 set_fs(KERNEL_DS);
69043 - result = vfs_read(file, buffer, BUFSZ - 1, &pos);
69044 + result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
69045 set_fs(old_fs);
69046 if (result < 0)
69047 goto out_kfree;
69048 @@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
69049 }
69050
69051 set_fs(KERNEL_DS);
69052 - result = vfs_write(file, buffer, str - buffer, &pos);
69053 + result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
69054 set_fs(old_fs);
69055 if (result < 0)
69056 goto out_kfree;
69057 @@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
69058 int i;
69059
69060 set_fs(KERNEL_DS);
69061 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
69062 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
69063 set_fs(old_fs);
69064 if (result < 0)
69065 goto out;
69066 @@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
69067 __le16 dnaddr;
69068
69069 set_fs(KERNEL_DS);
69070 - result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
69071 + result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
69072 set_fs(old_fs);
69073 if (result < 0)
69074 goto out;
69075 @@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
69076 le16_to_cpu(dnaddr) & 0x3ff);
69077
69078 set_fs(KERNEL_DS);
69079 - result = vfs_write(file, buf, len, &pos);
69080 + result = vfs_write(file, (const char __force_user *)buf, len, &pos);
69081 set_fs(old_fs);
69082 if (result < 0)
69083 goto out;
69084 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
69085 index e660464..c8b9e67 100644
69086 --- a/kernel/taskstats.c
69087 +++ b/kernel/taskstats.c
69088 @@ -27,9 +27,12 @@
69089 #include <linux/cgroup.h>
69090 #include <linux/fs.h>
69091 #include <linux/file.h>
69092 +#include <linux/grsecurity.h>
69093 #include <net/genetlink.h>
69094 #include <linux/atomic.h>
69095
69096 +extern int gr_is_taskstats_denied(int pid);
69097 +
69098 /*
69099 * Maximum length of a cpumask that can be specified in
69100 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
69101 @@ -556,6 +559,9 @@ err:
69102
69103 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
69104 {
69105 + if (gr_is_taskstats_denied(current->pid))
69106 + return -EACCES;
69107 +
69108 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
69109 return cmd_attr_register_cpumask(info);
69110 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
69111 diff --git a/kernel/time.c b/kernel/time.c
69112 index ba744cf..267b7c5 100644
69113 --- a/kernel/time.c
69114 +++ b/kernel/time.c
69115 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
69116 return error;
69117
69118 if (tz) {
69119 + /* we log in do_settimeofday called below, so don't log twice
69120 + */
69121 + if (!tv)
69122 + gr_log_timechange();
69123 +
69124 sys_tz = *tz;
69125 update_vsyscall_tz();
69126 if (firsttime) {
69127 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
69128 index 8a538c5..def79d4 100644
69129 --- a/kernel/time/alarmtimer.c
69130 +++ b/kernel/time/alarmtimer.c
69131 @@ -779,7 +779,7 @@ static int __init alarmtimer_init(void)
69132 struct platform_device *pdev;
69133 int error = 0;
69134 int i;
69135 - struct k_clock alarm_clock = {
69136 + static struct k_clock alarm_clock = {
69137 .clock_getres = alarm_clock_getres,
69138 .clock_get = alarm_clock_get,
69139 .timer_create = alarm_timer_create,
69140 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
69141 index f113755..ec24223 100644
69142 --- a/kernel/time/tick-broadcast.c
69143 +++ b/kernel/time/tick-broadcast.c
69144 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
69145 * then clear the broadcast bit.
69146 */
69147 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
69148 - int cpu = smp_processor_id();
69149 + cpu = smp_processor_id();
69150
69151 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
69152 tick_broadcast_clear_oneshot(cpu);
69153 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
69154 index 7c50de8..e29a94d 100644
69155 --- a/kernel/time/timekeeping.c
69156 +++ b/kernel/time/timekeeping.c
69157 @@ -14,6 +14,7 @@
69158 #include <linux/init.h>
69159 #include <linux/mm.h>
69160 #include <linux/sched.h>
69161 +#include <linux/grsecurity.h>
69162 #include <linux/syscore_ops.h>
69163 #include <linux/clocksource.h>
69164 #include <linux/jiffies.h>
69165 @@ -388,6 +389,8 @@ int do_settimeofday(const struct timespec *tv)
69166 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
69167 return -EINVAL;
69168
69169 + gr_log_timechange();
69170 +
69171 write_seqlock_irqsave(&timekeeper.lock, flags);
69172
69173 timekeeping_forward_now();
69174 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
69175 index 3258455..f35227d 100644
69176 --- a/kernel/time/timer_list.c
69177 +++ b/kernel/time/timer_list.c
69178 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
69179
69180 static void print_name_offset(struct seq_file *m, void *sym)
69181 {
69182 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69183 + SEQ_printf(m, "<%p>", NULL);
69184 +#else
69185 char symname[KSYM_NAME_LEN];
69186
69187 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
69188 SEQ_printf(m, "<%pK>", sym);
69189 else
69190 SEQ_printf(m, "%s", symname);
69191 +#endif
69192 }
69193
69194 static void
69195 @@ -112,7 +116,11 @@ next_one:
69196 static void
69197 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
69198 {
69199 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69200 + SEQ_printf(m, " .base: %p\n", NULL);
69201 +#else
69202 SEQ_printf(m, " .base: %pK\n", base);
69203 +#endif
69204 SEQ_printf(m, " .index: %d\n",
69205 base->index);
69206 SEQ_printf(m, " .resolution: %Lu nsecs\n",
69207 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
69208 {
69209 struct proc_dir_entry *pe;
69210
69211 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
69212 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
69213 +#else
69214 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
69215 +#endif
69216 if (!pe)
69217 return -ENOMEM;
69218 return 0;
69219 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
69220 index 0b537f2..9e71eca 100644
69221 --- a/kernel/time/timer_stats.c
69222 +++ b/kernel/time/timer_stats.c
69223 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
69224 static unsigned long nr_entries;
69225 static struct entry entries[MAX_ENTRIES];
69226
69227 -static atomic_t overflow_count;
69228 +static atomic_unchecked_t overflow_count;
69229
69230 /*
69231 * The entries are in a hash-table, for fast lookup:
69232 @@ -140,7 +140,7 @@ static void reset_entries(void)
69233 nr_entries = 0;
69234 memset(entries, 0, sizeof(entries));
69235 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
69236 - atomic_set(&overflow_count, 0);
69237 + atomic_set_unchecked(&overflow_count, 0);
69238 }
69239
69240 static struct entry *alloc_entry(void)
69241 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
69242 if (likely(entry))
69243 entry->count++;
69244 else
69245 - atomic_inc(&overflow_count);
69246 + atomic_inc_unchecked(&overflow_count);
69247
69248 out_unlock:
69249 raw_spin_unlock_irqrestore(lock, flags);
69250 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
69251
69252 static void print_name_offset(struct seq_file *m, unsigned long addr)
69253 {
69254 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69255 + seq_printf(m, "<%p>", NULL);
69256 +#else
69257 char symname[KSYM_NAME_LEN];
69258
69259 if (lookup_symbol_name(addr, symname) < 0)
69260 seq_printf(m, "<%p>", (void *)addr);
69261 else
69262 seq_printf(m, "%s", symname);
69263 +#endif
69264 }
69265
69266 static int tstats_show(struct seq_file *m, void *v)
69267 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
69268
69269 seq_puts(m, "Timer Stats Version: v0.2\n");
69270 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
69271 - if (atomic_read(&overflow_count))
69272 + if (atomic_read_unchecked(&overflow_count))
69273 seq_printf(m, "Overflow: %d entries\n",
69274 - atomic_read(&overflow_count));
69275 + atomic_read_unchecked(&overflow_count));
69276
69277 for (i = 0; i < nr_entries; i++) {
69278 entry = entries + i;
69279 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
69280 {
69281 struct proc_dir_entry *pe;
69282
69283 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
69284 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
69285 +#else
69286 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
69287 +#endif
69288 if (!pe)
69289 return -ENOMEM;
69290 return 0;
69291 diff --git a/kernel/timer.c b/kernel/timer.c
69292 index a297ffc..5e16b0b 100644
69293 --- a/kernel/timer.c
69294 +++ b/kernel/timer.c
69295 @@ -1354,7 +1354,7 @@ void update_process_times(int user_tick)
69296 /*
69297 * This function runs timers and the timer-tq in bottom half context.
69298 */
69299 -static void run_timer_softirq(struct softirq_action *h)
69300 +static void run_timer_softirq(void)
69301 {
69302 struct tvec_base *base = __this_cpu_read(tvec_bases);
69303
69304 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
69305 index c0bd030..62a1927 100644
69306 --- a/kernel/trace/blktrace.c
69307 +++ b/kernel/trace/blktrace.c
69308 @@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
69309 struct blk_trace *bt = filp->private_data;
69310 char buf[16];
69311
69312 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
69313 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
69314
69315 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
69316 }
69317 @@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
69318 return 1;
69319
69320 bt = buf->chan->private_data;
69321 - atomic_inc(&bt->dropped);
69322 + atomic_inc_unchecked(&bt->dropped);
69323 return 0;
69324 }
69325
69326 @@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
69327
69328 bt->dir = dir;
69329 bt->dev = dev;
69330 - atomic_set(&bt->dropped, 0);
69331 + atomic_set_unchecked(&bt->dropped, 0);
69332
69333 ret = -EIO;
69334 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
69335 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
69336 index 0fa92f6..89950b2 100644
69337 --- a/kernel/trace/ftrace.c
69338 +++ b/kernel/trace/ftrace.c
69339 @@ -1800,12 +1800,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
69340 if (unlikely(ftrace_disabled))
69341 return 0;
69342
69343 + ret = ftrace_arch_code_modify_prepare();
69344 + FTRACE_WARN_ON(ret);
69345 + if (ret)
69346 + return 0;
69347 +
69348 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
69349 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
69350 if (ret) {
69351 ftrace_bug(ret, ip);
69352 - return 0;
69353 }
69354 - return 1;
69355 + return ret ? 0 : 1;
69356 }
69357
69358 /*
69359 @@ -2917,7 +2922,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
69360
69361 int
69362 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
69363 - void *data)
69364 + void *data)
69365 {
69366 struct ftrace_func_probe *entry;
69367 struct ftrace_page *pg;
69368 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
69369 index 55e4d4c..8c915ec 100644
69370 --- a/kernel/trace/trace.c
69371 +++ b/kernel/trace/trace.c
69372 @@ -4316,10 +4316,9 @@ static const struct file_operations tracing_dyn_info_fops = {
69373 };
69374 #endif
69375
69376 -static struct dentry *d_tracer;
69377 -
69378 struct dentry *tracing_init_dentry(void)
69379 {
69380 + static struct dentry *d_tracer;
69381 static int once;
69382
69383 if (d_tracer)
69384 @@ -4339,10 +4338,9 @@ struct dentry *tracing_init_dentry(void)
69385 return d_tracer;
69386 }
69387
69388 -static struct dentry *d_percpu;
69389 -
69390 struct dentry *tracing_dentry_percpu(void)
69391 {
69392 + static struct dentry *d_percpu;
69393 static int once;
69394 struct dentry *d_tracer;
69395
69396 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
69397 index 29111da..d190fe2 100644
69398 --- a/kernel/trace/trace_events.c
69399 +++ b/kernel/trace/trace_events.c
69400 @@ -1308,10 +1308,6 @@ static LIST_HEAD(ftrace_module_file_list);
69401 struct ftrace_module_file_ops {
69402 struct list_head list;
69403 struct module *mod;
69404 - struct file_operations id;
69405 - struct file_operations enable;
69406 - struct file_operations format;
69407 - struct file_operations filter;
69408 };
69409
69410 static struct ftrace_module_file_ops *
69411 @@ -1332,17 +1328,12 @@ trace_create_file_ops(struct module *mod)
69412
69413 file_ops->mod = mod;
69414
69415 - file_ops->id = ftrace_event_id_fops;
69416 - file_ops->id.owner = mod;
69417 -
69418 - file_ops->enable = ftrace_enable_fops;
69419 - file_ops->enable.owner = mod;
69420 -
69421 - file_ops->filter = ftrace_event_filter_fops;
69422 - file_ops->filter.owner = mod;
69423 -
69424 - file_ops->format = ftrace_event_format_fops;
69425 - file_ops->format.owner = mod;
69426 + pax_open_kernel();
69427 + *(void **)&mod->trace_id.owner = mod;
69428 + *(void **)&mod->trace_enable.owner = mod;
69429 + *(void **)&mod->trace_filter.owner = mod;
69430 + *(void **)&mod->trace_format.owner = mod;
69431 + pax_close_kernel();
69432
69433 list_add(&file_ops->list, &ftrace_module_file_list);
69434
69435 @@ -1366,8 +1357,8 @@ static void trace_module_add_events(struct module *mod)
69436
69437 for_each_event(call, start, end) {
69438 __trace_add_event_call(*call, mod,
69439 - &file_ops->id, &file_ops->enable,
69440 - &file_ops->filter, &file_ops->format);
69441 + &mod->trace_id, &mod->trace_enable,
69442 + &mod->trace_filter, &mod->trace_format);
69443 }
69444 }
69445
69446 diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
69447 index 580a05e..9b31acb 100644
69448 --- a/kernel/trace/trace_kprobe.c
69449 +++ b/kernel/trace/trace_kprobe.c
69450 @@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
69451 long ret;
69452 int maxlen = get_rloc_len(*(u32 *)dest);
69453 u8 *dst = get_rloc_data(dest);
69454 - u8 *src = addr;
69455 + const u8 __user *src = (const u8 __force_user *)addr;
69456 mm_segment_t old_fs = get_fs();
69457 if (!maxlen)
69458 return;
69459 @@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
69460 pagefault_disable();
69461 do
69462 ret = __copy_from_user_inatomic(dst++, src++, 1);
69463 - while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
69464 + while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
69465 dst[-1] = '\0';
69466 pagefault_enable();
69467 set_fs(old_fs);
69468 @@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
69469 ((u8 *)get_rloc_data(dest))[0] = '\0';
69470 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
69471 } else
69472 - *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
69473 + *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
69474 get_rloc_offs(*(u32 *)dest));
69475 }
69476 /* Return the length of string -- including null terminal byte */
69477 @@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
69478 set_fs(KERNEL_DS);
69479 pagefault_disable();
69480 do {
69481 - ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
69482 + ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
69483 len++;
69484 } while (c && ret == 0 && len < MAX_STRING_SIZE);
69485 pagefault_enable();
69486 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
69487 index fd3c8aa..5f324a6 100644
69488 --- a/kernel/trace/trace_mmiotrace.c
69489 +++ b/kernel/trace/trace_mmiotrace.c
69490 @@ -24,7 +24,7 @@ struct header_iter {
69491 static struct trace_array *mmio_trace_array;
69492 static bool overrun_detected;
69493 static unsigned long prev_overruns;
69494 -static atomic_t dropped_count;
69495 +static atomic_unchecked_t dropped_count;
69496
69497 static void mmio_reset_data(struct trace_array *tr)
69498 {
69499 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
69500
69501 static unsigned long count_overruns(struct trace_iterator *iter)
69502 {
69503 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
69504 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
69505 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
69506
69507 if (over > prev_overruns)
69508 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
69509 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
69510 sizeof(*entry), 0, pc);
69511 if (!event) {
69512 - atomic_inc(&dropped_count);
69513 + atomic_inc_unchecked(&dropped_count);
69514 return;
69515 }
69516 entry = ring_buffer_event_data(event);
69517 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
69518 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
69519 sizeof(*entry), 0, pc);
69520 if (!event) {
69521 - atomic_inc(&dropped_count);
69522 + atomic_inc_unchecked(&dropped_count);
69523 return;
69524 }
69525 entry = ring_buffer_event_data(event);
69526 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
69527 index df611a0..10d8b32 100644
69528 --- a/kernel/trace/trace_output.c
69529 +++ b/kernel/trace/trace_output.c
69530 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
69531
69532 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
69533 if (!IS_ERR(p)) {
69534 - p = mangle_path(s->buffer + s->len, p, "\n");
69535 + p = mangle_path(s->buffer + s->len, p, "\n\\");
69536 if (p) {
69537 s->len = p - s->buffer;
69538 return 1;
69539 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
69540 index d4545f4..a9010a1 100644
69541 --- a/kernel/trace/trace_stack.c
69542 +++ b/kernel/trace/trace_stack.c
69543 @@ -53,7 +53,7 @@ static inline void check_stack(void)
69544 return;
69545
69546 /* we do not handle interrupt stacks yet */
69547 - if (!object_is_on_stack(&this_size))
69548 + if (!object_starts_on_stack(&this_size))
69549 return;
69550
69551 local_irq_save(flags);
69552 diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
69553 index 209b379..7f76423 100644
69554 --- a/kernel/trace/trace_workqueue.c
69555 +++ b/kernel/trace/trace_workqueue.c
69556 @@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
69557 int cpu;
69558 pid_t pid;
69559 /* Can be inserted from interrupt or user context, need to be atomic */
69560 - atomic_t inserted;
69561 + atomic_unchecked_t inserted;
69562 /*
69563 * Don't need to be atomic, works are serialized in a single workqueue thread
69564 * on a single CPU.
69565 @@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
69566 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
69567 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
69568 if (node->pid == wq_thread->pid) {
69569 - atomic_inc(&node->inserted);
69570 + atomic_inc_unchecked(&node->inserted);
69571 goto found;
69572 }
69573 }
69574 @@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
69575 tsk = get_pid_task(pid, PIDTYPE_PID);
69576 if (tsk) {
69577 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
69578 - atomic_read(&cws->inserted), cws->executed,
69579 + atomic_read_unchecked(&cws->inserted), cws->executed,
69580 tsk->comm);
69581 put_task_struct(tsk);
69582 }
69583 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
69584 index 6777153..8519f60 100644
69585 --- a/lib/Kconfig.debug
69586 +++ b/lib/Kconfig.debug
69587 @@ -1132,6 +1132,7 @@ config LATENCYTOP
69588 depends on DEBUG_KERNEL
69589 depends on STACKTRACE_SUPPORT
69590 depends on PROC_FS
69591 + depends on !GRKERNSEC_HIDESYM
69592 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
69593 select KALLSYMS
69594 select KALLSYMS_ALL
69595 diff --git a/lib/bitmap.c b/lib/bitmap.c
69596 index b5a8b6a..a69623c 100644
69597 --- a/lib/bitmap.c
69598 +++ b/lib/bitmap.c
69599 @@ -421,7 +421,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
69600 {
69601 int c, old_c, totaldigits, ndigits, nchunks, nbits;
69602 u32 chunk;
69603 - const char __user __force *ubuf = (const char __user __force *)buf;
69604 + const char __user *ubuf = (const char __force_user *)buf;
69605
69606 bitmap_zero(maskp, nmaskbits);
69607
69608 @@ -506,7 +506,7 @@ int bitmap_parse_user(const char __user *ubuf,
69609 {
69610 if (!access_ok(VERIFY_READ, ubuf, ulen))
69611 return -EFAULT;
69612 - return __bitmap_parse((const char __force *)ubuf,
69613 + return __bitmap_parse((const char __force_kernel *)ubuf,
69614 ulen, 1, maskp, nmaskbits);
69615
69616 }
69617 @@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
69618 {
69619 unsigned a, b;
69620 int c, old_c, totaldigits;
69621 - const char __user __force *ubuf = (const char __user __force *)buf;
69622 + const char __user *ubuf = (const char __force_user *)buf;
69623 int exp_digit, in_range;
69624
69625 totaldigits = c = 0;
69626 @@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
69627 {
69628 if (!access_ok(VERIFY_READ, ubuf, ulen))
69629 return -EFAULT;
69630 - return __bitmap_parselist((const char __force *)ubuf,
69631 + return __bitmap_parselist((const char __force_kernel *)ubuf,
69632 ulen, 1, maskp, nmaskbits);
69633 }
69634 EXPORT_SYMBOL(bitmap_parselist_user);
69635 diff --git a/lib/bug.c b/lib/bug.c
69636 index a28c141..2bd3d95 100644
69637 --- a/lib/bug.c
69638 +++ b/lib/bug.c
69639 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
69640 return BUG_TRAP_TYPE_NONE;
69641
69642 bug = find_bug(bugaddr);
69643 + if (!bug)
69644 + return BUG_TRAP_TYPE_NONE;
69645
69646 file = NULL;
69647 line = 0;
69648 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
69649 index 0ab9ae8..f01ceca 100644
69650 --- a/lib/debugobjects.c
69651 +++ b/lib/debugobjects.c
69652 @@ -288,7 +288,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
69653 if (limit > 4)
69654 return;
69655
69656 - is_on_stack = object_is_on_stack(addr);
69657 + is_on_stack = object_starts_on_stack(addr);
69658 if (is_on_stack == onstack)
69659 return;
69660
69661 diff --git a/lib/devres.c b/lib/devres.c
69662 index 80b9c76..9e32279 100644
69663 --- a/lib/devres.c
69664 +++ b/lib/devres.c
69665 @@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
69666 void devm_iounmap(struct device *dev, void __iomem *addr)
69667 {
69668 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
69669 - (void *)addr));
69670 + (void __force *)addr));
69671 iounmap(addr);
69672 }
69673 EXPORT_SYMBOL(devm_iounmap);
69674 @@ -192,7 +192,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
69675 {
69676 ioport_unmap(addr);
69677 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
69678 - devm_ioport_map_match, (void *)addr));
69679 + devm_ioport_map_match, (void __force *)addr));
69680 }
69681 EXPORT_SYMBOL(devm_ioport_unmap);
69682
69683 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
69684 index 13ef233..5241683 100644
69685 --- a/lib/dma-debug.c
69686 +++ b/lib/dma-debug.c
69687 @@ -924,7 +924,7 @@ out:
69688
69689 static void check_for_stack(struct device *dev, void *addr)
69690 {
69691 - if (object_is_on_stack(addr))
69692 + if (object_starts_on_stack(addr))
69693 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
69694 "stack [addr=%p]\n", addr);
69695 }
69696 diff --git a/lib/extable.c b/lib/extable.c
69697 index 4cac81e..63e9b8f 100644
69698 --- a/lib/extable.c
69699 +++ b/lib/extable.c
69700 @@ -13,6 +13,7 @@
69701 #include <linux/init.h>
69702 #include <linux/sort.h>
69703 #include <asm/uaccess.h>
69704 +#include <asm/pgtable.h>
69705
69706 #ifndef ARCH_HAS_SORT_EXTABLE
69707 /*
69708 @@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
69709 void sort_extable(struct exception_table_entry *start,
69710 struct exception_table_entry *finish)
69711 {
69712 + pax_open_kernel();
69713 sort(start, finish - start, sizeof(struct exception_table_entry),
69714 cmp_ex, NULL);
69715 + pax_close_kernel();
69716 }
69717
69718 #ifdef CONFIG_MODULES
69719 diff --git a/lib/inflate.c b/lib/inflate.c
69720 index 013a761..c28f3fc 100644
69721 --- a/lib/inflate.c
69722 +++ b/lib/inflate.c
69723 @@ -269,7 +269,7 @@ static void free(void *where)
69724 malloc_ptr = free_mem_ptr;
69725 }
69726 #else
69727 -#define malloc(a) kmalloc(a, GFP_KERNEL)
69728 +#define malloc(a) kmalloc((a), GFP_KERNEL)
69729 #define free(a) kfree(a)
69730 #endif
69731
69732 diff --git a/lib/ioremap.c b/lib/ioremap.c
69733 index 0c9216c..863bd89 100644
69734 --- a/lib/ioremap.c
69735 +++ b/lib/ioremap.c
69736 @@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
69737 unsigned long next;
69738
69739 phys_addr -= addr;
69740 - pmd = pmd_alloc(&init_mm, pud, addr);
69741 + pmd = pmd_alloc_kernel(&init_mm, pud, addr);
69742 if (!pmd)
69743 return -ENOMEM;
69744 do {
69745 @@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
69746 unsigned long next;
69747
69748 phys_addr -= addr;
69749 - pud = pud_alloc(&init_mm, pgd, addr);
69750 + pud = pud_alloc_kernel(&init_mm, pgd, addr);
69751 if (!pud)
69752 return -ENOMEM;
69753 do {
69754 diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
69755 index bd2bea9..6b3c95e 100644
69756 --- a/lib/is_single_threaded.c
69757 +++ b/lib/is_single_threaded.c
69758 @@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
69759 struct task_struct *p, *t;
69760 bool ret;
69761
69762 + if (!mm)
69763 + return true;
69764 +
69765 if (atomic_read(&task->signal->live) != 1)
69766 return false;
69767
69768 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
69769 index 3ac50dc..240bb7e 100644
69770 --- a/lib/radix-tree.c
69771 +++ b/lib/radix-tree.c
69772 @@ -79,7 +79,7 @@ struct radix_tree_preload {
69773 int nr;
69774 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
69775 };
69776 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
69777 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
69778
69779 static inline void *ptr_to_indirect(void *ptr)
69780 {
69781 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
69782 index abbabec..b69d6dd 100644
69783 --- a/lib/vsprintf.c
69784 +++ b/lib/vsprintf.c
69785 @@ -16,6 +16,9 @@
69786 * - scnprintf and vscnprintf
69787 */
69788
69789 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69790 +#define __INCLUDED_BY_HIDESYM 1
69791 +#endif
69792 #include <stdarg.h>
69793 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
69794 #include <linux/types.h>
69795 @@ -433,7 +436,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
69796 char sym[KSYM_SYMBOL_LEN];
69797 if (ext == 'B')
69798 sprint_backtrace(sym, value);
69799 - else if (ext != 'f' && ext != 's')
69800 + else if (ext != 'f' && ext != 's' && ext != 'a')
69801 sprint_symbol(sym, value);
69802 else
69803 kallsyms_lookup(value, NULL, NULL, NULL, sym);
69804 @@ -809,7 +812,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
69805 return number(buf, end, *(const netdev_features_t *)addr, spec);
69806 }
69807
69808 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69809 +int kptr_restrict __read_mostly = 2;
69810 +#else
69811 int kptr_restrict __read_mostly;
69812 +#endif
69813
69814 /*
69815 * Show a '%p' thing. A kernel extension is that the '%p' is followed
69816 @@ -823,6 +830,8 @@ int kptr_restrict __read_mostly;
69817 * - 'S' For symbolic direct pointers with offset
69818 * - 's' For symbolic direct pointers without offset
69819 * - 'B' For backtraced symbolic direct pointers with offset
69820 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
69821 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
69822 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
69823 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
69824 * - 'M' For a 6-byte MAC address, it prints the address in the
69825 @@ -866,14 +875,25 @@ static noinline_for_stack
69826 char *pointer(const char *fmt, char *buf, char *end, void *ptr,
69827 struct printf_spec spec)
69828 {
69829 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69830 + /* 'P' = approved pointers to copy to userland,
69831 + as in the /proc/kallsyms case, as we make it display nothing
69832 + for non-root users, and the real contents for root users
69833 + */
69834 + if (ptr > TASK_SIZE && *fmt != 'P' && is_usercopy_alloc(buf)) {
69835 + ptr = NULL;
69836 + goto simple;
69837 + }
69838 +#endif
69839 +
69840 if (!ptr && *fmt != 'K') {
69841 /*
69842 - * Print (null) with the same width as a pointer so it makes
69843 + * Print (nil) with the same width as a pointer so it makes
69844 * tabular output look nice.
69845 */
69846 if (spec.field_width == -1)
69847 spec.field_width = 2 * sizeof(void *);
69848 - return string(buf, end, "(null)", spec);
69849 + return string(buf, end, "(nil)", spec);
69850 }
69851
69852 switch (*fmt) {
69853 @@ -883,6 +903,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
69854 /* Fallthrough */
69855 case 'S':
69856 case 's':
69857 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69858 + break;
69859 +#else
69860 + return symbol_string(buf, end, ptr, spec, *fmt);
69861 +#endif
69862 + case 'A':
69863 + case 'a':
69864 case 'B':
69865 return symbol_string(buf, end, ptr, spec, *fmt);
69866 case 'R':
69867 @@ -920,6 +947,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
69868 va_end(va);
69869 return buf;
69870 }
69871 + case 'P':
69872 + break;
69873 case 'K':
69874 /*
69875 * %pK cannot be used in IRQ context because its test
69876 @@ -942,6 +971,9 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
69877 }
69878 break;
69879 }
69880 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69881 +simple:
69882 +#endif
69883 spec.flags |= SMALL;
69884 if (spec.field_width == -1) {
69885 spec.field_width = 2 * sizeof(void *);
69886 @@ -1653,11 +1685,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
69887 typeof(type) value; \
69888 if (sizeof(type) == 8) { \
69889 args = PTR_ALIGN(args, sizeof(u32)); \
69890 - *(u32 *)&value = *(u32 *)args; \
69891 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
69892 + *(u32 *)&value = *(const u32 *)args; \
69893 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
69894 } else { \
69895 args = PTR_ALIGN(args, sizeof(type)); \
69896 - value = *(typeof(type) *)args; \
69897 + value = *(const typeof(type) *)args; \
69898 } \
69899 args += sizeof(type); \
69900 value; \
69901 @@ -1720,7 +1752,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
69902 case FORMAT_TYPE_STR: {
69903 const char *str_arg = args;
69904 args += strlen(str_arg) + 1;
69905 - str = string(str, end, (char *)str_arg, spec);
69906 + str = string(str, end, str_arg, spec);
69907 break;
69908 }
69909
69910 diff --git a/localversion-grsec b/localversion-grsec
69911 new file mode 100644
69912 index 0000000..7cd6065
69913 --- /dev/null
69914 +++ b/localversion-grsec
69915 @@ -0,0 +1 @@
69916 +-grsec
69917 diff --git a/mm/Kconfig b/mm/Kconfig
69918 index e338407..4210331 100644
69919 --- a/mm/Kconfig
69920 +++ b/mm/Kconfig
69921 @@ -247,10 +247,10 @@ config KSM
69922 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
69923
69924 config DEFAULT_MMAP_MIN_ADDR
69925 - int "Low address space to protect from user allocation"
69926 + int "Low address space to protect from user allocation"
69927 depends on MMU
69928 - default 4096
69929 - help
69930 + default 65536
69931 + help
69932 This is the portion of low virtual memory which should be protected
69933 from userspace allocation. Keeping a user from writing to low pages
69934 can help reduce the impact of kernel NULL pointer bugs.
69935 @@ -280,7 +280,7 @@ config MEMORY_FAILURE
69936
69937 config HWPOISON_INJECT
69938 tristate "HWPoison pages injector"
69939 - depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
69940 + depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
69941 select PROC_PAGE_MONITOR
69942
69943 config NOMMU_INITIAL_TRIM_EXCESS
69944 diff --git a/mm/filemap.c b/mm/filemap.c
69945 index 79c4b2b..596b417 100644
69946 --- a/mm/filemap.c
69947 +++ b/mm/filemap.c
69948 @@ -1762,7 +1762,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
69949 struct address_space *mapping = file->f_mapping;
69950
69951 if (!mapping->a_ops->readpage)
69952 - return -ENOEXEC;
69953 + return -ENODEV;
69954 file_accessed(file);
69955 vma->vm_ops = &generic_file_vm_ops;
69956 vma->vm_flags |= VM_CAN_NONLINEAR;
69957 @@ -2168,6 +2168,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
69958 *pos = i_size_read(inode);
69959
69960 if (limit != RLIM_INFINITY) {
69961 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
69962 if (*pos >= limit) {
69963 send_sig(SIGXFSZ, current, 0);
69964 return -EFBIG;
69965 diff --git a/mm/fremap.c b/mm/fremap.c
69966 index 9ed4fd4..c42648d 100644
69967 --- a/mm/fremap.c
69968 +++ b/mm/fremap.c
69969 @@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
69970 retry:
69971 vma = find_vma(mm, start);
69972
69973 +#ifdef CONFIG_PAX_SEGMEXEC
69974 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
69975 + goto out;
69976 +#endif
69977 +
69978 /*
69979 * Make sure the vma is shared, that it supports prefaulting,
69980 * and that the remapped range is valid and fully within
69981 diff --git a/mm/highmem.c b/mm/highmem.c
69982 index 57d82c6..e9e0552 100644
69983 --- a/mm/highmem.c
69984 +++ b/mm/highmem.c
69985 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
69986 * So no dangers, even with speculative execution.
69987 */
69988 page = pte_page(pkmap_page_table[i]);
69989 + pax_open_kernel();
69990 pte_clear(&init_mm, (unsigned long)page_address(page),
69991 &pkmap_page_table[i]);
69992 -
69993 + pax_close_kernel();
69994 set_page_address(page, NULL);
69995 need_flush = 1;
69996 }
69997 @@ -186,9 +187,11 @@ start:
69998 }
69999 }
70000 vaddr = PKMAP_ADDR(last_pkmap_nr);
70001 +
70002 + pax_open_kernel();
70003 set_pte_at(&init_mm, vaddr,
70004 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
70005 -
70006 + pax_close_kernel();
70007 pkmap_count[last_pkmap_nr] = 1;
70008 set_page_address(page, (void *)vaddr);
70009
70010 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
70011 index f0e5306..cb9398e 100644
70012 --- a/mm/huge_memory.c
70013 +++ b/mm/huge_memory.c
70014 @@ -733,7 +733,7 @@ out:
70015 * run pte_offset_map on the pmd, if an huge pmd could
70016 * materialize from under us from a different thread.
70017 */
70018 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
70019 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
70020 return VM_FAULT_OOM;
70021 /* if an huge pmd materialized from under us just retry later */
70022 if (unlikely(pmd_trans_huge(*pmd)))
70023 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
70024 index 263e177..3f36aec 100644
70025 --- a/mm/hugetlb.c
70026 +++ b/mm/hugetlb.c
70027 @@ -2446,6 +2446,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
70028 return 1;
70029 }
70030
70031 +#ifdef CONFIG_PAX_SEGMEXEC
70032 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
70033 +{
70034 + struct mm_struct *mm = vma->vm_mm;
70035 + struct vm_area_struct *vma_m;
70036 + unsigned long address_m;
70037 + pte_t *ptep_m;
70038 +
70039 + vma_m = pax_find_mirror_vma(vma);
70040 + if (!vma_m)
70041 + return;
70042 +
70043 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70044 + address_m = address + SEGMEXEC_TASK_SIZE;
70045 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
70046 + get_page(page_m);
70047 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
70048 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
70049 +}
70050 +#endif
70051 +
70052 /*
70053 * Hugetlb_cow() should be called with page lock of the original hugepage held.
70054 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
70055 @@ -2558,6 +2579,11 @@ retry_avoidcopy:
70056 make_huge_pte(vma, new_page, 1));
70057 page_remove_rmap(old_page);
70058 hugepage_add_new_anon_rmap(new_page, vma, address);
70059 +
70060 +#ifdef CONFIG_PAX_SEGMEXEC
70061 + pax_mirror_huge_pte(vma, address, new_page);
70062 +#endif
70063 +
70064 /* Make the old page be freed below */
70065 new_page = old_page;
70066 mmu_notifier_invalidate_range_end(mm,
70067 @@ -2712,6 +2738,10 @@ retry:
70068 && (vma->vm_flags & VM_SHARED)));
70069 set_huge_pte_at(mm, address, ptep, new_pte);
70070
70071 +#ifdef CONFIG_PAX_SEGMEXEC
70072 + pax_mirror_huge_pte(vma, address, page);
70073 +#endif
70074 +
70075 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
70076 /* Optimization, do the COW without a second fault */
70077 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
70078 @@ -2741,6 +2771,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70079 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
70080 struct hstate *h = hstate_vma(vma);
70081
70082 +#ifdef CONFIG_PAX_SEGMEXEC
70083 + struct vm_area_struct *vma_m;
70084 +#endif
70085 +
70086 address &= huge_page_mask(h);
70087
70088 ptep = huge_pte_offset(mm, address);
70089 @@ -2754,6 +2788,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70090 VM_FAULT_SET_HINDEX(h - hstates);
70091 }
70092
70093 +#ifdef CONFIG_PAX_SEGMEXEC
70094 + vma_m = pax_find_mirror_vma(vma);
70095 + if (vma_m) {
70096 + unsigned long address_m;
70097 +
70098 + if (vma->vm_start > vma_m->vm_start) {
70099 + address_m = address;
70100 + address -= SEGMEXEC_TASK_SIZE;
70101 + vma = vma_m;
70102 + h = hstate_vma(vma);
70103 + } else
70104 + address_m = address + SEGMEXEC_TASK_SIZE;
70105 +
70106 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
70107 + return VM_FAULT_OOM;
70108 + address_m &= HPAGE_MASK;
70109 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
70110 + }
70111 +#endif
70112 +
70113 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
70114 if (!ptep)
70115 return VM_FAULT_OOM;
70116 diff --git a/mm/internal.h b/mm/internal.h
70117 index 2189af4..f2ca332 100644
70118 --- a/mm/internal.h
70119 +++ b/mm/internal.h
70120 @@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
70121 * in mm/page_alloc.c
70122 */
70123 extern void __free_pages_bootmem(struct page *page, unsigned int order);
70124 +extern void free_compound_page(struct page *page);
70125 extern void prep_compound_page(struct page *page, unsigned long order);
70126 #ifdef CONFIG_MEMORY_FAILURE
70127 extern bool is_free_buddy_page(struct page *page);
70128 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
70129 index 45eb621..6ccd8ea 100644
70130 --- a/mm/kmemleak.c
70131 +++ b/mm/kmemleak.c
70132 @@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
70133
70134 for (i = 0; i < object->trace_len; i++) {
70135 void *ptr = (void *)object->trace[i];
70136 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
70137 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
70138 }
70139 }
70140
70141 diff --git a/mm/maccess.c b/mm/maccess.c
70142 index d53adf9..03a24bf 100644
70143 --- a/mm/maccess.c
70144 +++ b/mm/maccess.c
70145 @@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
70146 set_fs(KERNEL_DS);
70147 pagefault_disable();
70148 ret = __copy_from_user_inatomic(dst,
70149 - (__force const void __user *)src, size);
70150 + (const void __force_user *)src, size);
70151 pagefault_enable();
70152 set_fs(old_fs);
70153
70154 @@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
70155
70156 set_fs(KERNEL_DS);
70157 pagefault_disable();
70158 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
70159 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
70160 pagefault_enable();
70161 set_fs(old_fs);
70162
70163 diff --git a/mm/madvise.c b/mm/madvise.c
70164 index 55f645c..cde5320 100644
70165 --- a/mm/madvise.c
70166 +++ b/mm/madvise.c
70167 @@ -46,6 +46,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
70168 pgoff_t pgoff;
70169 unsigned long new_flags = vma->vm_flags;
70170
70171 +#ifdef CONFIG_PAX_SEGMEXEC
70172 + struct vm_area_struct *vma_m;
70173 +#endif
70174 +
70175 switch (behavior) {
70176 case MADV_NORMAL:
70177 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
70178 @@ -117,6 +121,13 @@ success:
70179 /*
70180 * vm_flags is protected by the mmap_sem held in write mode.
70181 */
70182 +
70183 +#ifdef CONFIG_PAX_SEGMEXEC
70184 + vma_m = pax_find_mirror_vma(vma);
70185 + if (vma_m)
70186 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
70187 +#endif
70188 +
70189 vma->vm_flags = new_flags;
70190
70191 out:
70192 @@ -175,6 +186,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
70193 struct vm_area_struct ** prev,
70194 unsigned long start, unsigned long end)
70195 {
70196 +
70197 +#ifdef CONFIG_PAX_SEGMEXEC
70198 + struct vm_area_struct *vma_m;
70199 +#endif
70200 +
70201 *prev = vma;
70202 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
70203 return -EINVAL;
70204 @@ -187,6 +203,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
70205 zap_page_range(vma, start, end - start, &details);
70206 } else
70207 zap_page_range(vma, start, end - start, NULL);
70208 +
70209 +#ifdef CONFIG_PAX_SEGMEXEC
70210 + vma_m = pax_find_mirror_vma(vma);
70211 + if (vma_m) {
70212 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
70213 + struct zap_details details = {
70214 + .nonlinear_vma = vma_m,
70215 + .last_index = ULONG_MAX,
70216 + };
70217 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
70218 + } else
70219 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
70220 + }
70221 +#endif
70222 +
70223 return 0;
70224 }
70225
70226 @@ -394,6 +425,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
70227 if (end < start)
70228 goto out;
70229
70230 +#ifdef CONFIG_PAX_SEGMEXEC
70231 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
70232 + if (end > SEGMEXEC_TASK_SIZE)
70233 + goto out;
70234 + } else
70235 +#endif
70236 +
70237 + if (end > TASK_SIZE)
70238 + goto out;
70239 +
70240 error = 0;
70241 if (end == start)
70242 goto out;
70243 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
70244 index 97cc273..6ed703f 100644
70245 --- a/mm/memory-failure.c
70246 +++ b/mm/memory-failure.c
70247 @@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
70248
70249 int sysctl_memory_failure_recovery __read_mostly = 1;
70250
70251 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
70252 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
70253
70254 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
70255
70256 @@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
70257 pfn, t->comm, t->pid);
70258 si.si_signo = SIGBUS;
70259 si.si_errno = 0;
70260 - si.si_addr = (void *)addr;
70261 + si.si_addr = (void __user *)addr;
70262 #ifdef __ARCH_SI_TRAPNO
70263 si.si_trapno = trapno;
70264 #endif
70265 @@ -1036,7 +1036,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
70266 }
70267
70268 nr_pages = 1 << compound_trans_order(hpage);
70269 - atomic_long_add(nr_pages, &mce_bad_pages);
70270 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
70271
70272 /*
70273 * We need/can do nothing about count=0 pages.
70274 @@ -1066,7 +1066,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
70275 if (!PageHWPoison(hpage)
70276 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
70277 || (p != hpage && TestSetPageHWPoison(hpage))) {
70278 - atomic_long_sub(nr_pages, &mce_bad_pages);
70279 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70280 return 0;
70281 }
70282 set_page_hwpoison_huge_page(hpage);
70283 @@ -1124,7 +1124,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
70284 }
70285 if (hwpoison_filter(p)) {
70286 if (TestClearPageHWPoison(p))
70287 - atomic_long_sub(nr_pages, &mce_bad_pages);
70288 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70289 unlock_page(hpage);
70290 put_page(hpage);
70291 return 0;
70292 @@ -1319,7 +1319,7 @@ int unpoison_memory(unsigned long pfn)
70293 return 0;
70294 }
70295 if (TestClearPageHWPoison(p))
70296 - atomic_long_sub(nr_pages, &mce_bad_pages);
70297 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70298 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
70299 return 0;
70300 }
70301 @@ -1333,7 +1333,7 @@ int unpoison_memory(unsigned long pfn)
70302 */
70303 if (TestClearPageHWPoison(page)) {
70304 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
70305 - atomic_long_sub(nr_pages, &mce_bad_pages);
70306 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70307 freeit = 1;
70308 if (PageHuge(page))
70309 clear_page_hwpoison_huge_page(page);
70310 @@ -1446,7 +1446,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
70311 }
70312 done:
70313 if (!PageHWPoison(hpage))
70314 - atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
70315 + atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
70316 set_page_hwpoison_huge_page(hpage);
70317 dequeue_hwpoisoned_huge_page(hpage);
70318 /* keep elevated page count for bad page */
70319 @@ -1577,7 +1577,7 @@ int soft_offline_page(struct page *page, int flags)
70320 return ret;
70321
70322 done:
70323 - atomic_long_add(1, &mce_bad_pages);
70324 + atomic_long_add_unchecked(1, &mce_bad_pages);
70325 SetPageHWPoison(page);
70326 /* keep elevated page count for bad page */
70327 return ret;
70328 diff --git a/mm/memory.c b/mm/memory.c
70329 index 6105f47..3363489 100644
70330 --- a/mm/memory.c
70331 +++ b/mm/memory.c
70332 @@ -434,8 +434,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
70333 return;
70334
70335 pmd = pmd_offset(pud, start);
70336 +
70337 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
70338 pud_clear(pud);
70339 pmd_free_tlb(tlb, pmd, start);
70340 +#endif
70341 +
70342 }
70343
70344 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
70345 @@ -466,9 +470,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
70346 if (end - 1 > ceiling - 1)
70347 return;
70348
70349 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
70350 pud = pud_offset(pgd, start);
70351 pgd_clear(pgd);
70352 pud_free_tlb(tlb, pud, start);
70353 +#endif
70354 +
70355 }
70356
70357 /*
70358 @@ -1597,12 +1604,6 @@ no_page_table:
70359 return page;
70360 }
70361
70362 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
70363 -{
70364 - return stack_guard_page_start(vma, addr) ||
70365 - stack_guard_page_end(vma, addr+PAGE_SIZE);
70366 -}
70367 -
70368 /**
70369 * __get_user_pages() - pin user pages in memory
70370 * @tsk: task_struct of target task
70371 @@ -1675,10 +1676,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
70372 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
70373 i = 0;
70374
70375 - do {
70376 + while (nr_pages) {
70377 struct vm_area_struct *vma;
70378
70379 - vma = find_extend_vma(mm, start);
70380 + vma = find_vma(mm, start);
70381 if (!vma && in_gate_area(mm, start)) {
70382 unsigned long pg = start & PAGE_MASK;
70383 pgd_t *pgd;
70384 @@ -1726,7 +1727,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
70385 goto next_page;
70386 }
70387
70388 - if (!vma ||
70389 + if (!vma || start < vma->vm_start ||
70390 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
70391 !(vm_flags & vma->vm_flags))
70392 return i ? : -EFAULT;
70393 @@ -1753,11 +1754,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
70394 int ret;
70395 unsigned int fault_flags = 0;
70396
70397 - /* For mlock, just skip the stack guard page. */
70398 - if (foll_flags & FOLL_MLOCK) {
70399 - if (stack_guard_page(vma, start))
70400 - goto next_page;
70401 - }
70402 if (foll_flags & FOLL_WRITE)
70403 fault_flags |= FAULT_FLAG_WRITE;
70404 if (nonblocking)
70405 @@ -1831,7 +1827,7 @@ next_page:
70406 start += PAGE_SIZE;
70407 nr_pages--;
70408 } while (nr_pages && start < vma->vm_end);
70409 - } while (nr_pages);
70410 + }
70411 return i;
70412 }
70413 EXPORT_SYMBOL(__get_user_pages);
70414 @@ -2038,6 +2034,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
70415 page_add_file_rmap(page);
70416 set_pte_at(mm, addr, pte, mk_pte(page, prot));
70417
70418 +#ifdef CONFIG_PAX_SEGMEXEC
70419 + pax_mirror_file_pte(vma, addr, page, ptl);
70420 +#endif
70421 +
70422 retval = 0;
70423 pte_unmap_unlock(pte, ptl);
70424 return retval;
70425 @@ -2072,10 +2072,22 @@ out:
70426 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
70427 struct page *page)
70428 {
70429 +
70430 +#ifdef CONFIG_PAX_SEGMEXEC
70431 + struct vm_area_struct *vma_m;
70432 +#endif
70433 +
70434 if (addr < vma->vm_start || addr >= vma->vm_end)
70435 return -EFAULT;
70436 if (!page_count(page))
70437 return -EINVAL;
70438 +
70439 +#ifdef CONFIG_PAX_SEGMEXEC
70440 + vma_m = pax_find_mirror_vma(vma);
70441 + if (vma_m)
70442 + vma_m->vm_flags |= VM_INSERTPAGE;
70443 +#endif
70444 +
70445 vma->vm_flags |= VM_INSERTPAGE;
70446 return insert_page(vma, addr, page, vma->vm_page_prot);
70447 }
70448 @@ -2161,6 +2173,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
70449 unsigned long pfn)
70450 {
70451 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
70452 + BUG_ON(vma->vm_mirror);
70453
70454 if (addr < vma->vm_start || addr >= vma->vm_end)
70455 return -EFAULT;
70456 @@ -2368,7 +2381,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
70457
70458 BUG_ON(pud_huge(*pud));
70459
70460 - pmd = pmd_alloc(mm, pud, addr);
70461 + pmd = (mm == &init_mm) ?
70462 + pmd_alloc_kernel(mm, pud, addr) :
70463 + pmd_alloc(mm, pud, addr);
70464 if (!pmd)
70465 return -ENOMEM;
70466 do {
70467 @@ -2388,7 +2403,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
70468 unsigned long next;
70469 int err;
70470
70471 - pud = pud_alloc(mm, pgd, addr);
70472 + pud = (mm == &init_mm) ?
70473 + pud_alloc_kernel(mm, pgd, addr) :
70474 + pud_alloc(mm, pgd, addr);
70475 if (!pud)
70476 return -ENOMEM;
70477 do {
70478 @@ -2476,6 +2493,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
70479 copy_user_highpage(dst, src, va, vma);
70480 }
70481
70482 +#ifdef CONFIG_PAX_SEGMEXEC
70483 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
70484 +{
70485 + struct mm_struct *mm = vma->vm_mm;
70486 + spinlock_t *ptl;
70487 + pte_t *pte, entry;
70488 +
70489 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
70490 + entry = *pte;
70491 + if (!pte_present(entry)) {
70492 + if (!pte_none(entry)) {
70493 + BUG_ON(pte_file(entry));
70494 + free_swap_and_cache(pte_to_swp_entry(entry));
70495 + pte_clear_not_present_full(mm, address, pte, 0);
70496 + }
70497 + } else {
70498 + struct page *page;
70499 +
70500 + flush_cache_page(vma, address, pte_pfn(entry));
70501 + entry = ptep_clear_flush(vma, address, pte);
70502 + BUG_ON(pte_dirty(entry));
70503 + page = vm_normal_page(vma, address, entry);
70504 + if (page) {
70505 + update_hiwater_rss(mm);
70506 + if (PageAnon(page))
70507 + dec_mm_counter_fast(mm, MM_ANONPAGES);
70508 + else
70509 + dec_mm_counter_fast(mm, MM_FILEPAGES);
70510 + page_remove_rmap(page);
70511 + page_cache_release(page);
70512 + }
70513 + }
70514 + pte_unmap_unlock(pte, ptl);
70515 +}
70516 +
70517 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
70518 + *
70519 + * the ptl of the lower mapped page is held on entry and is not released on exit
70520 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
70521 + */
70522 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
70523 +{
70524 + struct mm_struct *mm = vma->vm_mm;
70525 + unsigned long address_m;
70526 + spinlock_t *ptl_m;
70527 + struct vm_area_struct *vma_m;
70528 + pmd_t *pmd_m;
70529 + pte_t *pte_m, entry_m;
70530 +
70531 + BUG_ON(!page_m || !PageAnon(page_m));
70532 +
70533 + vma_m = pax_find_mirror_vma(vma);
70534 + if (!vma_m)
70535 + return;
70536 +
70537 + BUG_ON(!PageLocked(page_m));
70538 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70539 + address_m = address + SEGMEXEC_TASK_SIZE;
70540 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70541 + pte_m = pte_offset_map(pmd_m, address_m);
70542 + ptl_m = pte_lockptr(mm, pmd_m);
70543 + if (ptl != ptl_m) {
70544 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70545 + if (!pte_none(*pte_m))
70546 + goto out;
70547 + }
70548 +
70549 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
70550 + page_cache_get(page_m);
70551 + page_add_anon_rmap(page_m, vma_m, address_m);
70552 + inc_mm_counter_fast(mm, MM_ANONPAGES);
70553 + set_pte_at(mm, address_m, pte_m, entry_m);
70554 + update_mmu_cache(vma_m, address_m, entry_m);
70555 +out:
70556 + if (ptl != ptl_m)
70557 + spin_unlock(ptl_m);
70558 + pte_unmap(pte_m);
70559 + unlock_page(page_m);
70560 +}
70561 +
70562 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
70563 +{
70564 + struct mm_struct *mm = vma->vm_mm;
70565 + unsigned long address_m;
70566 + spinlock_t *ptl_m;
70567 + struct vm_area_struct *vma_m;
70568 + pmd_t *pmd_m;
70569 + pte_t *pte_m, entry_m;
70570 +
70571 + BUG_ON(!page_m || PageAnon(page_m));
70572 +
70573 + vma_m = pax_find_mirror_vma(vma);
70574 + if (!vma_m)
70575 + return;
70576 +
70577 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70578 + address_m = address + SEGMEXEC_TASK_SIZE;
70579 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70580 + pte_m = pte_offset_map(pmd_m, address_m);
70581 + ptl_m = pte_lockptr(mm, pmd_m);
70582 + if (ptl != ptl_m) {
70583 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70584 + if (!pte_none(*pte_m))
70585 + goto out;
70586 + }
70587 +
70588 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
70589 + page_cache_get(page_m);
70590 + page_add_file_rmap(page_m);
70591 + inc_mm_counter_fast(mm, MM_FILEPAGES);
70592 + set_pte_at(mm, address_m, pte_m, entry_m);
70593 + update_mmu_cache(vma_m, address_m, entry_m);
70594 +out:
70595 + if (ptl != ptl_m)
70596 + spin_unlock(ptl_m);
70597 + pte_unmap(pte_m);
70598 +}
70599 +
70600 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
70601 +{
70602 + struct mm_struct *mm = vma->vm_mm;
70603 + unsigned long address_m;
70604 + spinlock_t *ptl_m;
70605 + struct vm_area_struct *vma_m;
70606 + pmd_t *pmd_m;
70607 + pte_t *pte_m, entry_m;
70608 +
70609 + vma_m = pax_find_mirror_vma(vma);
70610 + if (!vma_m)
70611 + return;
70612 +
70613 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70614 + address_m = address + SEGMEXEC_TASK_SIZE;
70615 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
70616 + pte_m = pte_offset_map(pmd_m, address_m);
70617 + ptl_m = pte_lockptr(mm, pmd_m);
70618 + if (ptl != ptl_m) {
70619 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70620 + if (!pte_none(*pte_m))
70621 + goto out;
70622 + }
70623 +
70624 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
70625 + set_pte_at(mm, address_m, pte_m, entry_m);
70626 +out:
70627 + if (ptl != ptl_m)
70628 + spin_unlock(ptl_m);
70629 + pte_unmap(pte_m);
70630 +}
70631 +
70632 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
70633 +{
70634 + struct page *page_m;
70635 + pte_t entry;
70636 +
70637 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
70638 + goto out;
70639 +
70640 + entry = *pte;
70641 + page_m = vm_normal_page(vma, address, entry);
70642 + if (!page_m)
70643 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
70644 + else if (PageAnon(page_m)) {
70645 + if (pax_find_mirror_vma(vma)) {
70646 + pte_unmap_unlock(pte, ptl);
70647 + lock_page(page_m);
70648 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
70649 + if (pte_same(entry, *pte))
70650 + pax_mirror_anon_pte(vma, address, page_m, ptl);
70651 + else
70652 + unlock_page(page_m);
70653 + }
70654 + } else
70655 + pax_mirror_file_pte(vma, address, page_m, ptl);
70656 +
70657 +out:
70658 + pte_unmap_unlock(pte, ptl);
70659 +}
70660 +#endif
70661 +
70662 /*
70663 * This routine handles present pages, when users try to write
70664 * to a shared page. It is done by copying the page to a new address
70665 @@ -2687,6 +2884,12 @@ gotten:
70666 */
70667 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
70668 if (likely(pte_same(*page_table, orig_pte))) {
70669 +
70670 +#ifdef CONFIG_PAX_SEGMEXEC
70671 + if (pax_find_mirror_vma(vma))
70672 + BUG_ON(!trylock_page(new_page));
70673 +#endif
70674 +
70675 if (old_page) {
70676 if (!PageAnon(old_page)) {
70677 dec_mm_counter_fast(mm, MM_FILEPAGES);
70678 @@ -2738,6 +2941,10 @@ gotten:
70679 page_remove_rmap(old_page);
70680 }
70681
70682 +#ifdef CONFIG_PAX_SEGMEXEC
70683 + pax_mirror_anon_pte(vma, address, new_page, ptl);
70684 +#endif
70685 +
70686 /* Free the old page.. */
70687 new_page = old_page;
70688 ret |= VM_FAULT_WRITE;
70689 @@ -3017,6 +3224,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
70690 swap_free(entry);
70691 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
70692 try_to_free_swap(page);
70693 +
70694 +#ifdef CONFIG_PAX_SEGMEXEC
70695 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
70696 +#endif
70697 +
70698 unlock_page(page);
70699 if (swapcache) {
70700 /*
70701 @@ -3040,6 +3252,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
70702
70703 /* No need to invalidate - it was non-present before */
70704 update_mmu_cache(vma, address, page_table);
70705 +
70706 +#ifdef CONFIG_PAX_SEGMEXEC
70707 + pax_mirror_anon_pte(vma, address, page, ptl);
70708 +#endif
70709 +
70710 unlock:
70711 pte_unmap_unlock(page_table, ptl);
70712 out:
70713 @@ -3059,40 +3276,6 @@ out_release:
70714 }
70715
70716 /*
70717 - * This is like a special single-page "expand_{down|up}wards()",
70718 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
70719 - * doesn't hit another vma.
70720 - */
70721 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
70722 -{
70723 - address &= PAGE_MASK;
70724 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
70725 - struct vm_area_struct *prev = vma->vm_prev;
70726 -
70727 - /*
70728 - * Is there a mapping abutting this one below?
70729 - *
70730 - * That's only ok if it's the same stack mapping
70731 - * that has gotten split..
70732 - */
70733 - if (prev && prev->vm_end == address)
70734 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
70735 -
70736 - expand_downwards(vma, address - PAGE_SIZE);
70737 - }
70738 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
70739 - struct vm_area_struct *next = vma->vm_next;
70740 -
70741 - /* As VM_GROWSDOWN but s/below/above/ */
70742 - if (next && next->vm_start == address + PAGE_SIZE)
70743 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
70744 -
70745 - expand_upwards(vma, address + PAGE_SIZE);
70746 - }
70747 - return 0;
70748 -}
70749 -
70750 -/*
70751 * We enter with non-exclusive mmap_sem (to exclude vma changes,
70752 * but allow concurrent faults), and pte mapped but not yet locked.
70753 * We return with mmap_sem still held, but pte unmapped and unlocked.
70754 @@ -3101,27 +3284,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
70755 unsigned long address, pte_t *page_table, pmd_t *pmd,
70756 unsigned int flags)
70757 {
70758 - struct page *page;
70759 + struct page *page = NULL;
70760 spinlock_t *ptl;
70761 pte_t entry;
70762
70763 - pte_unmap(page_table);
70764 -
70765 - /* Check if we need to add a guard page to the stack */
70766 - if (check_stack_guard_page(vma, address) < 0)
70767 - return VM_FAULT_SIGBUS;
70768 -
70769 - /* Use the zero-page for reads */
70770 if (!(flags & FAULT_FLAG_WRITE)) {
70771 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
70772 vma->vm_page_prot));
70773 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
70774 + ptl = pte_lockptr(mm, pmd);
70775 + spin_lock(ptl);
70776 if (!pte_none(*page_table))
70777 goto unlock;
70778 goto setpte;
70779 }
70780
70781 /* Allocate our own private page. */
70782 + pte_unmap(page_table);
70783 +
70784 if (unlikely(anon_vma_prepare(vma)))
70785 goto oom;
70786 page = alloc_zeroed_user_highpage_movable(vma, address);
70787 @@ -3140,6 +3319,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
70788 if (!pte_none(*page_table))
70789 goto release;
70790
70791 +#ifdef CONFIG_PAX_SEGMEXEC
70792 + if (pax_find_mirror_vma(vma))
70793 + BUG_ON(!trylock_page(page));
70794 +#endif
70795 +
70796 inc_mm_counter_fast(mm, MM_ANONPAGES);
70797 page_add_new_anon_rmap(page, vma, address);
70798 setpte:
70799 @@ -3147,6 +3331,12 @@ setpte:
70800
70801 /* No need to invalidate - it was non-present before */
70802 update_mmu_cache(vma, address, page_table);
70803 +
70804 +#ifdef CONFIG_PAX_SEGMEXEC
70805 + if (page)
70806 + pax_mirror_anon_pte(vma, address, page, ptl);
70807 +#endif
70808 +
70809 unlock:
70810 pte_unmap_unlock(page_table, ptl);
70811 return 0;
70812 @@ -3290,6 +3480,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70813 */
70814 /* Only go through if we didn't race with anybody else... */
70815 if (likely(pte_same(*page_table, orig_pte))) {
70816 +
70817 +#ifdef CONFIG_PAX_SEGMEXEC
70818 + if (anon && pax_find_mirror_vma(vma))
70819 + BUG_ON(!trylock_page(page));
70820 +#endif
70821 +
70822 flush_icache_page(vma, page);
70823 entry = mk_pte(page, vma->vm_page_prot);
70824 if (flags & FAULT_FLAG_WRITE)
70825 @@ -3309,6 +3505,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70826
70827 /* no need to invalidate: a not-present page won't be cached */
70828 update_mmu_cache(vma, address, page_table);
70829 +
70830 +#ifdef CONFIG_PAX_SEGMEXEC
70831 + if (anon)
70832 + pax_mirror_anon_pte(vma, address, page, ptl);
70833 + else
70834 + pax_mirror_file_pte(vma, address, page, ptl);
70835 +#endif
70836 +
70837 } else {
70838 if (cow_page)
70839 mem_cgroup_uncharge_page(cow_page);
70840 @@ -3462,6 +3666,12 @@ int handle_pte_fault(struct mm_struct *mm,
70841 if (flags & FAULT_FLAG_WRITE)
70842 flush_tlb_fix_spurious_fault(vma, address);
70843 }
70844 +
70845 +#ifdef CONFIG_PAX_SEGMEXEC
70846 + pax_mirror_pte(vma, address, pte, pmd, ptl);
70847 + return 0;
70848 +#endif
70849 +
70850 unlock:
70851 pte_unmap_unlock(pte, ptl);
70852 return 0;
70853 @@ -3478,6 +3688,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70854 pmd_t *pmd;
70855 pte_t *pte;
70856
70857 +#ifdef CONFIG_PAX_SEGMEXEC
70858 + struct vm_area_struct *vma_m;
70859 +#endif
70860 +
70861 __set_current_state(TASK_RUNNING);
70862
70863 count_vm_event(PGFAULT);
70864 @@ -3489,6 +3703,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70865 if (unlikely(is_vm_hugetlb_page(vma)))
70866 return hugetlb_fault(mm, vma, address, flags);
70867
70868 +#ifdef CONFIG_PAX_SEGMEXEC
70869 + vma_m = pax_find_mirror_vma(vma);
70870 + if (vma_m) {
70871 + unsigned long address_m;
70872 + pgd_t *pgd_m;
70873 + pud_t *pud_m;
70874 + pmd_t *pmd_m;
70875 +
70876 + if (vma->vm_start > vma_m->vm_start) {
70877 + address_m = address;
70878 + address -= SEGMEXEC_TASK_SIZE;
70879 + vma = vma_m;
70880 + } else
70881 + address_m = address + SEGMEXEC_TASK_SIZE;
70882 +
70883 + pgd_m = pgd_offset(mm, address_m);
70884 + pud_m = pud_alloc(mm, pgd_m, address_m);
70885 + if (!pud_m)
70886 + return VM_FAULT_OOM;
70887 + pmd_m = pmd_alloc(mm, pud_m, address_m);
70888 + if (!pmd_m)
70889 + return VM_FAULT_OOM;
70890 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
70891 + return VM_FAULT_OOM;
70892 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
70893 + }
70894 +#endif
70895 +
70896 pgd = pgd_offset(mm, address);
70897 pud = pud_alloc(mm, pgd, address);
70898 if (!pud)
70899 @@ -3518,7 +3760,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
70900 * run pte_offset_map on the pmd, if an huge pmd could
70901 * materialize from under us from a different thread.
70902 */
70903 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
70904 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
70905 return VM_FAULT_OOM;
70906 /* if an huge pmd materialized from under us just retry later */
70907 if (unlikely(pmd_trans_huge(*pmd)))
70908 @@ -3555,6 +3797,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
70909 spin_unlock(&mm->page_table_lock);
70910 return 0;
70911 }
70912 +
70913 +int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
70914 +{
70915 + pud_t *new = pud_alloc_one(mm, address);
70916 + if (!new)
70917 + return -ENOMEM;
70918 +
70919 + smp_wmb(); /* See comment in __pte_alloc */
70920 +
70921 + spin_lock(&mm->page_table_lock);
70922 + if (pgd_present(*pgd)) /* Another has populated it */
70923 + pud_free(mm, new);
70924 + else
70925 + pgd_populate_kernel(mm, pgd, new);
70926 + spin_unlock(&mm->page_table_lock);
70927 + return 0;
70928 +}
70929 #endif /* __PAGETABLE_PUD_FOLDED */
70930
70931 #ifndef __PAGETABLE_PMD_FOLDED
70932 @@ -3585,6 +3844,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
70933 spin_unlock(&mm->page_table_lock);
70934 return 0;
70935 }
70936 +
70937 +int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
70938 +{
70939 + pmd_t *new = pmd_alloc_one(mm, address);
70940 + if (!new)
70941 + return -ENOMEM;
70942 +
70943 + smp_wmb(); /* See comment in __pte_alloc */
70944 +
70945 + spin_lock(&mm->page_table_lock);
70946 +#ifndef __ARCH_HAS_4LEVEL_HACK
70947 + if (pud_present(*pud)) /* Another has populated it */
70948 + pmd_free(mm, new);
70949 + else
70950 + pud_populate_kernel(mm, pud, new);
70951 +#else
70952 + if (pgd_present(*pud)) /* Another has populated it */
70953 + pmd_free(mm, new);
70954 + else
70955 + pgd_populate_kernel(mm, pud, new);
70956 +#endif /* __ARCH_HAS_4LEVEL_HACK */
70957 + spin_unlock(&mm->page_table_lock);
70958 + return 0;
70959 +}
70960 #endif /* __PAGETABLE_PMD_FOLDED */
70961
70962 int make_pages_present(unsigned long addr, unsigned long end)
70963 @@ -3622,7 +3905,7 @@ static int __init gate_vma_init(void)
70964 gate_vma.vm_start = FIXADDR_USER_START;
70965 gate_vma.vm_end = FIXADDR_USER_END;
70966 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
70967 - gate_vma.vm_page_prot = __P101;
70968 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
70969
70970 return 0;
70971 }
70972 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
70973 index bf5b485..e44c2cb 100644
70974 --- a/mm/mempolicy.c
70975 +++ b/mm/mempolicy.c
70976 @@ -619,6 +619,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
70977 unsigned long vmstart;
70978 unsigned long vmend;
70979
70980 +#ifdef CONFIG_PAX_SEGMEXEC
70981 + struct vm_area_struct *vma_m;
70982 +#endif
70983 +
70984 vma = find_vma(mm, start);
70985 if (!vma || vma->vm_start > start)
70986 return -EFAULT;
70987 @@ -672,6 +676,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
70988 if (err)
70989 goto out;
70990 }
70991 +
70992 +#ifdef CONFIG_PAX_SEGMEXEC
70993 + vma_m = pax_find_mirror_vma(vma);
70994 + if (vma_m && vma_m->vm_ops && vma_m->vm_ops->set_policy) {
70995 + err = vma_m->vm_ops->set_policy(vma_m, new_pol);
70996 + if (err)
70997 + goto out;
70998 + }
70999 +#endif
71000 +
71001 }
71002
71003 out:
71004 @@ -1105,6 +1119,17 @@ static long do_mbind(unsigned long start, unsigned long len,
71005
71006 if (end < start)
71007 return -EINVAL;
71008 +
71009 +#ifdef CONFIG_PAX_SEGMEXEC
71010 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
71011 + if (end > SEGMEXEC_TASK_SIZE)
71012 + return -EINVAL;
71013 + } else
71014 +#endif
71015 +
71016 + if (end > TASK_SIZE)
71017 + return -EINVAL;
71018 +
71019 if (end == start)
71020 return 0;
71021
71022 @@ -1328,8 +1353,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
71023 */
71024 tcred = __task_cred(task);
71025 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
71026 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
71027 - !capable(CAP_SYS_NICE)) {
71028 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
71029 rcu_read_unlock();
71030 err = -EPERM;
71031 goto out_put;
71032 @@ -1360,6 +1384,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
71033 goto out;
71034 }
71035
71036 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71037 + if (mm != current->mm &&
71038 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
71039 + mmput(mm);
71040 + err = -EPERM;
71041 + goto out;
71042 + }
71043 +#endif
71044 +
71045 err = do_migrate_pages(mm, old, new,
71046 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
71047
71048 diff --git a/mm/mlock.c b/mm/mlock.c
71049 index ef726e8..13e0901 100644
71050 --- a/mm/mlock.c
71051 +++ b/mm/mlock.c
71052 @@ -13,6 +13,7 @@
71053 #include <linux/pagemap.h>
71054 #include <linux/mempolicy.h>
71055 #include <linux/syscalls.h>
71056 +#include <linux/security.h>
71057 #include <linux/sched.h>
71058 #include <linux/export.h>
71059 #include <linux/rmap.h>
71060 @@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
71061 return -EINVAL;
71062 if (end == start)
71063 return 0;
71064 + if (end > TASK_SIZE)
71065 + return -EINVAL;
71066 +
71067 vma = find_vma(current->mm, start);
71068 if (!vma || vma->vm_start > start)
71069 return -ENOMEM;
71070 @@ -396,6 +400,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
71071 for (nstart = start ; ; ) {
71072 vm_flags_t newflags;
71073
71074 +#ifdef CONFIG_PAX_SEGMEXEC
71075 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
71076 + break;
71077 +#endif
71078 +
71079 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
71080
71081 newflags = vma->vm_flags | VM_LOCKED;
71082 @@ -501,6 +510,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
71083 lock_limit >>= PAGE_SHIFT;
71084
71085 /* check against resource limits */
71086 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
71087 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
71088 error = do_mlock(start, len, 1);
71089 up_write(&current->mm->mmap_sem);
71090 @@ -524,17 +534,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
71091 static int do_mlockall(int flags)
71092 {
71093 struct vm_area_struct * vma, * prev = NULL;
71094 - unsigned int def_flags = 0;
71095
71096 if (flags & MCL_FUTURE)
71097 - def_flags = VM_LOCKED;
71098 - current->mm->def_flags = def_flags;
71099 + current->mm->def_flags |= VM_LOCKED;
71100 + else
71101 + current->mm->def_flags &= ~VM_LOCKED;
71102 if (flags == MCL_FUTURE)
71103 goto out;
71104
71105 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
71106 vm_flags_t newflags;
71107
71108 +#ifdef CONFIG_PAX_SEGMEXEC
71109 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
71110 + break;
71111 +#endif
71112 +
71113 + BUG_ON(vma->vm_end > TASK_SIZE);
71114 newflags = vma->vm_flags | VM_LOCKED;
71115 if (!(flags & MCL_CURRENT))
71116 newflags &= ~VM_LOCKED;
71117 @@ -567,6 +583,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
71118 lock_limit >>= PAGE_SHIFT;
71119
71120 ret = -ENOMEM;
71121 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
71122 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
71123 capable(CAP_IPC_LOCK))
71124 ret = do_mlockall(flags);
71125 diff --git a/mm/mmap.c b/mm/mmap.c
71126 index 848ef52..d2b586c 100644
71127 --- a/mm/mmap.c
71128 +++ b/mm/mmap.c
71129 @@ -46,6 +46,16 @@
71130 #define arch_rebalance_pgtables(addr, len) (addr)
71131 #endif
71132
71133 +static inline void verify_mm_writelocked(struct mm_struct *mm)
71134 +{
71135 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
71136 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
71137 + up_read(&mm->mmap_sem);
71138 + BUG();
71139 + }
71140 +#endif
71141 +}
71142 +
71143 static void unmap_region(struct mm_struct *mm,
71144 struct vm_area_struct *vma, struct vm_area_struct *prev,
71145 unsigned long start, unsigned long end);
71146 @@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
71147 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
71148 *
71149 */
71150 -pgprot_t protection_map[16] = {
71151 +pgprot_t protection_map[16] __read_only = {
71152 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
71153 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
71154 };
71155
71156 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
71157 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
71158 {
71159 - return __pgprot(pgprot_val(protection_map[vm_flags &
71160 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
71161 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
71162 pgprot_val(arch_vm_get_page_prot(vm_flags)));
71163 +
71164 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
71165 + if (!(__supported_pte_mask & _PAGE_NX) &&
71166 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
71167 + (vm_flags & (VM_READ | VM_WRITE)))
71168 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
71169 +#endif
71170 +
71171 + return prot;
71172 }
71173 EXPORT_SYMBOL(vm_get_page_prot);
71174
71175 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
71176 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
71177 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
71178 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
71179 /*
71180 * Make sure vm_committed_as in one cacheline and not cacheline shared with
71181 * other variables. It can be updated by several CPUs frequently.
71182 @@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
71183 struct vm_area_struct *next = vma->vm_next;
71184
71185 might_sleep();
71186 + BUG_ON(vma->vm_mirror);
71187 if (vma->vm_ops && vma->vm_ops->close)
71188 vma->vm_ops->close(vma);
71189 if (vma->vm_file) {
71190 @@ -274,6 +295,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
71191 * not page aligned -Ram Gupta
71192 */
71193 rlim = rlimit(RLIMIT_DATA);
71194 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
71195 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
71196 (mm->end_data - mm->start_data) > rlim)
71197 goto out;
71198 @@ -690,6 +712,12 @@ static int
71199 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
71200 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
71201 {
71202 +
71203 +#ifdef CONFIG_PAX_SEGMEXEC
71204 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
71205 + return 0;
71206 +#endif
71207 +
71208 if (is_mergeable_vma(vma, file, vm_flags) &&
71209 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
71210 if (vma->vm_pgoff == vm_pgoff)
71211 @@ -709,6 +737,12 @@ static int
71212 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
71213 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
71214 {
71215 +
71216 +#ifdef CONFIG_PAX_SEGMEXEC
71217 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
71218 + return 0;
71219 +#endif
71220 +
71221 if (is_mergeable_vma(vma, file, vm_flags) &&
71222 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
71223 pgoff_t vm_pglen;
71224 @@ -751,13 +785,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
71225 struct vm_area_struct *vma_merge(struct mm_struct *mm,
71226 struct vm_area_struct *prev, unsigned long addr,
71227 unsigned long end, unsigned long vm_flags,
71228 - struct anon_vma *anon_vma, struct file *file,
71229 + struct anon_vma *anon_vma, struct file *file,
71230 pgoff_t pgoff, struct mempolicy *policy)
71231 {
71232 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
71233 struct vm_area_struct *area, *next;
71234 int err;
71235
71236 +#ifdef CONFIG_PAX_SEGMEXEC
71237 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
71238 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
71239 +
71240 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
71241 +#endif
71242 +
71243 /*
71244 * We later require that vma->vm_flags == vm_flags,
71245 * so this tests vma->vm_flags & VM_SPECIAL, too.
71246 @@ -773,6 +814,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
71247 if (next && next->vm_end == end) /* cases 6, 7, 8 */
71248 next = next->vm_next;
71249
71250 +#ifdef CONFIG_PAX_SEGMEXEC
71251 + if (prev)
71252 + prev_m = pax_find_mirror_vma(prev);
71253 + if (area)
71254 + area_m = pax_find_mirror_vma(area);
71255 + if (next)
71256 + next_m = pax_find_mirror_vma(next);
71257 +#endif
71258 +
71259 /*
71260 * Can it merge with the predecessor?
71261 */
71262 @@ -792,9 +842,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
71263 /* cases 1, 6 */
71264 err = vma_adjust(prev, prev->vm_start,
71265 next->vm_end, prev->vm_pgoff, NULL);
71266 - } else /* cases 2, 5, 7 */
71267 +
71268 +#ifdef CONFIG_PAX_SEGMEXEC
71269 + if (!err && prev_m)
71270 + err = vma_adjust(prev_m, prev_m->vm_start,
71271 + next_m->vm_end, prev_m->vm_pgoff, NULL);
71272 +#endif
71273 +
71274 + } else { /* cases 2, 5, 7 */
71275 err = vma_adjust(prev, prev->vm_start,
71276 end, prev->vm_pgoff, NULL);
71277 +
71278 +#ifdef CONFIG_PAX_SEGMEXEC
71279 + if (!err && prev_m)
71280 + err = vma_adjust(prev_m, prev_m->vm_start,
71281 + end_m, prev_m->vm_pgoff, NULL);
71282 +#endif
71283 +
71284 + }
71285 if (err)
71286 return NULL;
71287 khugepaged_enter_vma_merge(prev);
71288 @@ -808,12 +873,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
71289 mpol_equal(policy, vma_policy(next)) &&
71290 can_vma_merge_before(next, vm_flags,
71291 anon_vma, file, pgoff+pglen)) {
71292 - if (prev && addr < prev->vm_end) /* case 4 */
71293 + if (prev && addr < prev->vm_end) { /* case 4 */
71294 err = vma_adjust(prev, prev->vm_start,
71295 addr, prev->vm_pgoff, NULL);
71296 - else /* cases 3, 8 */
71297 +
71298 +#ifdef CONFIG_PAX_SEGMEXEC
71299 + if (!err && prev_m)
71300 + err = vma_adjust(prev_m, prev_m->vm_start,
71301 + addr_m, prev_m->vm_pgoff, NULL);
71302 +#endif
71303 +
71304 + } else { /* cases 3, 8 */
71305 err = vma_adjust(area, addr, next->vm_end,
71306 next->vm_pgoff - pglen, NULL);
71307 +
71308 +#ifdef CONFIG_PAX_SEGMEXEC
71309 + if (!err && area_m)
71310 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
71311 + next_m->vm_pgoff - pglen, NULL);
71312 +#endif
71313 +
71314 + }
71315 if (err)
71316 return NULL;
71317 khugepaged_enter_vma_merge(area);
71318 @@ -922,14 +1002,11 @@ none:
71319 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
71320 struct file *file, long pages)
71321 {
71322 - const unsigned long stack_flags
71323 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
71324 -
71325 if (file) {
71326 mm->shared_vm += pages;
71327 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
71328 mm->exec_vm += pages;
71329 - } else if (flags & stack_flags)
71330 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
71331 mm->stack_vm += pages;
71332 if (flags & (VM_RESERVED|VM_IO))
71333 mm->reserved_vm += pages;
71334 @@ -969,7 +1046,7 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71335 * (the exception is when the underlying filesystem is noexec
71336 * mounted, in which case we dont add PROT_EXEC.)
71337 */
71338 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
71339 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
71340 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
71341 prot |= PROT_EXEC;
71342
71343 @@ -995,7 +1072,7 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71344 /* Obtain the address to map to. we verify (or select) it and ensure
71345 * that it represents a valid section of the address space.
71346 */
71347 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
71348 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
71349 if (addr & ~PAGE_MASK)
71350 return addr;
71351
71352 @@ -1006,6 +1083,36 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71353 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
71354 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
71355
71356 +#ifdef CONFIG_PAX_MPROTECT
71357 + if (mm->pax_flags & MF_PAX_MPROTECT) {
71358 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
71359 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
71360 + gr_log_rwxmmap(file);
71361 +
71362 +#ifdef CONFIG_PAX_EMUPLT
71363 + vm_flags &= ~VM_EXEC;
71364 +#else
71365 + return -EPERM;
71366 +#endif
71367 +
71368 + }
71369 +
71370 + if (!(vm_flags & VM_EXEC))
71371 + vm_flags &= ~VM_MAYEXEC;
71372 +#else
71373 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
71374 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
71375 +#endif
71376 + else
71377 + vm_flags &= ~VM_MAYWRITE;
71378 + }
71379 +#endif
71380 +
71381 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
71382 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
71383 + vm_flags &= ~VM_PAGEEXEC;
71384 +#endif
71385 +
71386 if (flags & MAP_LOCKED)
71387 if (!can_do_mlock())
71388 return -EPERM;
71389 @@ -1017,6 +1124,7 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71390 locked += mm->locked_vm;
71391 lock_limit = rlimit(RLIMIT_MEMLOCK);
71392 lock_limit >>= PAGE_SHIFT;
71393 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
71394 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
71395 return -EAGAIN;
71396 }
71397 @@ -1087,6 +1195,9 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
71398 if (error)
71399 return error;
71400
71401 + if (!gr_acl_handle_mmap(file, prot))
71402 + return -EACCES;
71403 +
71404 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
71405 }
71406
71407 @@ -1192,7 +1303,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
71408 vm_flags_t vm_flags = vma->vm_flags;
71409
71410 /* If it was private or non-writable, the write bit is already clear */
71411 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
71412 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
71413 return 0;
71414
71415 /* The backer wishes to know when pages are first written to? */
71416 @@ -1241,14 +1352,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
71417 unsigned long charged = 0;
71418 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
71419
71420 +#ifdef CONFIG_PAX_SEGMEXEC
71421 + struct vm_area_struct *vma_m = NULL;
71422 +#endif
71423 +
71424 + /*
71425 + * mm->mmap_sem is required to protect against another thread
71426 + * changing the mappings in case we sleep.
71427 + */
71428 + verify_mm_writelocked(mm);
71429 +
71430 /* Clear old maps */
71431 error = -ENOMEM;
71432 -munmap_back:
71433 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71434 if (vma && vma->vm_start < addr + len) {
71435 if (do_munmap(mm, addr, len))
71436 return -ENOMEM;
71437 - goto munmap_back;
71438 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71439 + BUG_ON(vma && vma->vm_start < addr + len);
71440 }
71441
71442 /* Check against address space limit. */
71443 @@ -1297,6 +1418,16 @@ munmap_back:
71444 goto unacct_error;
71445 }
71446
71447 +#ifdef CONFIG_PAX_SEGMEXEC
71448 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
71449 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71450 + if (!vma_m) {
71451 + error = -ENOMEM;
71452 + goto free_vma;
71453 + }
71454 + }
71455 +#endif
71456 +
71457 vma->vm_mm = mm;
71458 vma->vm_start = addr;
71459 vma->vm_end = addr + len;
71460 @@ -1321,6 +1452,19 @@ munmap_back:
71461 error = file->f_op->mmap(file, vma);
71462 if (error)
71463 goto unmap_and_free_vma;
71464 +
71465 +#ifdef CONFIG_PAX_SEGMEXEC
71466 + if (vma_m && (vm_flags & VM_EXECUTABLE))
71467 + added_exe_file_vma(mm);
71468 +#endif
71469 +
71470 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
71471 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
71472 + vma->vm_flags |= VM_PAGEEXEC;
71473 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
71474 + }
71475 +#endif
71476 +
71477 if (vm_flags & VM_EXECUTABLE)
71478 added_exe_file_vma(mm);
71479
71480 @@ -1358,6 +1502,11 @@ munmap_back:
71481 vma_link(mm, vma, prev, rb_link, rb_parent);
71482 file = vma->vm_file;
71483
71484 +#ifdef CONFIG_PAX_SEGMEXEC
71485 + if (vma_m)
71486 + BUG_ON(pax_mirror_vma(vma_m, vma));
71487 +#endif
71488 +
71489 /* Once vma denies write, undo our temporary denial count */
71490 if (correct_wcount)
71491 atomic_inc(&inode->i_writecount);
71492 @@ -1366,6 +1515,7 @@ out:
71493
71494 mm->total_vm += len >> PAGE_SHIFT;
71495 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
71496 + track_exec_limit(mm, addr, addr + len, vm_flags);
71497 if (vm_flags & VM_LOCKED) {
71498 if (!mlock_vma_pages_range(vma, addr, addr + len))
71499 mm->locked_vm += (len >> PAGE_SHIFT);
71500 @@ -1383,6 +1533,12 @@ unmap_and_free_vma:
71501 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
71502 charged = 0;
71503 free_vma:
71504 +
71505 +#ifdef CONFIG_PAX_SEGMEXEC
71506 + if (vma_m)
71507 + kmem_cache_free(vm_area_cachep, vma_m);
71508 +#endif
71509 +
71510 kmem_cache_free(vm_area_cachep, vma);
71511 unacct_error:
71512 if (charged)
71513 @@ -1390,6 +1546,44 @@ unacct_error:
71514 return error;
71515 }
71516
71517 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
71518 +{
71519 + if (!vma) {
71520 +#ifdef CONFIG_STACK_GROWSUP
71521 + if (addr > sysctl_heap_stack_gap)
71522 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
71523 + else
71524 + vma = find_vma(current->mm, 0);
71525 + if (vma && (vma->vm_flags & VM_GROWSUP))
71526 + return false;
71527 +#endif
71528 + return true;
71529 + }
71530 +
71531 + if (addr + len > vma->vm_start)
71532 + return false;
71533 +
71534 + if (vma->vm_flags & VM_GROWSDOWN)
71535 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
71536 +#ifdef CONFIG_STACK_GROWSUP
71537 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
71538 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
71539 +#endif
71540 +
71541 + return true;
71542 +}
71543 +
71544 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
71545 +{
71546 + if (vma->vm_start < len)
71547 + return -ENOMEM;
71548 + if (!(vma->vm_flags & VM_GROWSDOWN))
71549 + return vma->vm_start - len;
71550 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
71551 + return vma->vm_start - len - sysctl_heap_stack_gap;
71552 + return -ENOMEM;
71553 +}
71554 +
71555 /* Get an address range which is currently unmapped.
71556 * For shmat() with addr=0.
71557 *
71558 @@ -1416,18 +1610,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
71559 if (flags & MAP_FIXED)
71560 return addr;
71561
71562 +#ifdef CONFIG_PAX_RANDMMAP
71563 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
71564 +#endif
71565 +
71566 if (addr) {
71567 addr = PAGE_ALIGN(addr);
71568 - vma = find_vma(mm, addr);
71569 - if (TASK_SIZE - len >= addr &&
71570 - (!vma || addr + len <= vma->vm_start))
71571 - return addr;
71572 + if (TASK_SIZE - len >= addr) {
71573 + vma = find_vma(mm, addr);
71574 + if (check_heap_stack_gap(vma, addr, len))
71575 + return addr;
71576 + }
71577 }
71578 if (len > mm->cached_hole_size) {
71579 - start_addr = addr = mm->free_area_cache;
71580 + start_addr = addr = mm->free_area_cache;
71581 } else {
71582 - start_addr = addr = TASK_UNMAPPED_BASE;
71583 - mm->cached_hole_size = 0;
71584 + start_addr = addr = mm->mmap_base;
71585 + mm->cached_hole_size = 0;
71586 }
71587
71588 full_search:
71589 @@ -1438,34 +1637,40 @@ full_search:
71590 * Start a new search - just in case we missed
71591 * some holes.
71592 */
71593 - if (start_addr != TASK_UNMAPPED_BASE) {
71594 - addr = TASK_UNMAPPED_BASE;
71595 - start_addr = addr;
71596 + if (start_addr != mm->mmap_base) {
71597 + start_addr = addr = mm->mmap_base;
71598 mm->cached_hole_size = 0;
71599 goto full_search;
71600 }
71601 return -ENOMEM;
71602 }
71603 - if (!vma || addr + len <= vma->vm_start) {
71604 - /*
71605 - * Remember the place where we stopped the search:
71606 - */
71607 - mm->free_area_cache = addr + len;
71608 - return addr;
71609 - }
71610 + if (check_heap_stack_gap(vma, addr, len))
71611 + break;
71612 if (addr + mm->cached_hole_size < vma->vm_start)
71613 mm->cached_hole_size = vma->vm_start - addr;
71614 addr = vma->vm_end;
71615 }
71616 +
71617 + /*
71618 + * Remember the place where we stopped the search:
71619 + */
71620 + mm->free_area_cache = addr + len;
71621 + return addr;
71622 }
71623 #endif
71624
71625 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
71626 {
71627 +
71628 +#ifdef CONFIG_PAX_SEGMEXEC
71629 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
71630 + return;
71631 +#endif
71632 +
71633 /*
71634 * Is this a new hole at the lowest possible address?
71635 */
71636 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
71637 + if (addr >= mm->mmap_base && addr < mm->free_area_cache)
71638 mm->free_area_cache = addr;
71639 }
71640
71641 @@ -1481,7 +1686,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71642 {
71643 struct vm_area_struct *vma;
71644 struct mm_struct *mm = current->mm;
71645 - unsigned long addr = addr0, start_addr;
71646 + unsigned long base = mm->mmap_base, addr = addr0, start_addr;
71647
71648 /* requested length too big for entire address space */
71649 if (len > TASK_SIZE)
71650 @@ -1490,13 +1695,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
71651 if (flags & MAP_FIXED)
71652 return addr;
71653
71654 +#ifdef CONFIG_PAX_RANDMMAP
71655 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
71656 +#endif
71657 +
71658 /* requesting a specific address */
71659 if (addr) {
71660 addr = PAGE_ALIGN(addr);
71661 - vma = find_vma(mm, addr);
71662 - if (TASK_SIZE - len >= addr &&
71663 - (!vma || addr + len <= vma->vm_start))
71664 - return addr;
71665 + if (TASK_SIZE - len >= addr) {
71666 + vma = find_vma(mm, addr);
71667 + if (check_heap_stack_gap(vma, addr, len))
71668 + return addr;
71669 + }
71670 }
71671
71672 /* check if free_area_cache is useful for us */
71673 @@ -1520,7 +1730,7 @@ try_again:
71674 * return with success:
71675 */
71676 vma = find_vma(mm, addr);
71677 - if (!vma || addr+len <= vma->vm_start)
71678 + if (check_heap_stack_gap(vma, addr, len))
71679 /* remember the address as a hint for next time */
71680 return (mm->free_area_cache = addr);
71681
71682 @@ -1529,8 +1739,8 @@ try_again:
71683 mm->cached_hole_size = vma->vm_start - addr;
71684
71685 /* try just below the current vma->vm_start */
71686 - addr = vma->vm_start-len;
71687 - } while (len < vma->vm_start);
71688 + addr = skip_heap_stack_gap(vma, len);
71689 + } while (!IS_ERR_VALUE(addr));
71690
71691 fail:
71692 /*
71693 @@ -1553,13 +1763,21 @@ fail:
71694 * can happen with large stack limits and large mmap()
71695 * allocations.
71696 */
71697 + mm->mmap_base = TASK_UNMAPPED_BASE;
71698 +
71699 +#ifdef CONFIG_PAX_RANDMMAP
71700 + if (mm->pax_flags & MF_PAX_RANDMMAP)
71701 + mm->mmap_base += mm->delta_mmap;
71702 +#endif
71703 +
71704 + mm->free_area_cache = mm->mmap_base;
71705 mm->cached_hole_size = ~0UL;
71706 - mm->free_area_cache = TASK_UNMAPPED_BASE;
71707 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
71708 /*
71709 * Restore the topdown base:
71710 */
71711 - mm->free_area_cache = mm->mmap_base;
71712 + mm->mmap_base = base;
71713 + mm->free_area_cache = base;
71714 mm->cached_hole_size = ~0UL;
71715
71716 return addr;
71717 @@ -1568,6 +1786,12 @@ fail:
71718
71719 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
71720 {
71721 +
71722 +#ifdef CONFIG_PAX_SEGMEXEC
71723 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
71724 + return;
71725 +#endif
71726 +
71727 /*
71728 * Is this a new hole at the highest possible address?
71729 */
71730 @@ -1575,8 +1799,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
71731 mm->free_area_cache = addr;
71732
71733 /* dont allow allocations above current base */
71734 - if (mm->free_area_cache > mm->mmap_base)
71735 + if (mm->free_area_cache > mm->mmap_base) {
71736 mm->free_area_cache = mm->mmap_base;
71737 + mm->cached_hole_size = ~0UL;
71738 + }
71739 }
71740
71741 unsigned long
71742 @@ -1672,6 +1898,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
71743 return vma;
71744 }
71745
71746 +#ifdef CONFIG_PAX_SEGMEXEC
71747 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
71748 +{
71749 + struct vm_area_struct *vma_m;
71750 +
71751 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
71752 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
71753 + BUG_ON(vma->vm_mirror);
71754 + return NULL;
71755 + }
71756 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
71757 + vma_m = vma->vm_mirror;
71758 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
71759 + BUG_ON(vma->vm_file != vma_m->vm_file);
71760 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
71761 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
71762 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
71763 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
71764 + return vma_m;
71765 +}
71766 +#endif
71767 +
71768 /*
71769 * Verify that the stack growth is acceptable and
71770 * update accounting. This is shared with both the
71771 @@ -1688,6 +1936,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71772 return -ENOMEM;
71773
71774 /* Stack limit test */
71775 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
71776 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
71777 return -ENOMEM;
71778
71779 @@ -1698,6 +1947,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71780 locked = mm->locked_vm + grow;
71781 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
71782 limit >>= PAGE_SHIFT;
71783 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
71784 if (locked > limit && !capable(CAP_IPC_LOCK))
71785 return -ENOMEM;
71786 }
71787 @@ -1728,37 +1978,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
71788 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
71789 * vma is the last one with address > vma->vm_end. Have to extend vma.
71790 */
71791 +#ifndef CONFIG_IA64
71792 +static
71793 +#endif
71794 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
71795 {
71796 int error;
71797 + bool locknext;
71798
71799 if (!(vma->vm_flags & VM_GROWSUP))
71800 return -EFAULT;
71801
71802 + /* Also guard against wrapping around to address 0. */
71803 + if (address < PAGE_ALIGN(address+1))
71804 + address = PAGE_ALIGN(address+1);
71805 + else
71806 + return -ENOMEM;
71807 +
71808 /*
71809 * We must make sure the anon_vma is allocated
71810 * so that the anon_vma locking is not a noop.
71811 */
71812 if (unlikely(anon_vma_prepare(vma)))
71813 return -ENOMEM;
71814 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
71815 + if (locknext && anon_vma_prepare(vma->vm_next))
71816 + return -ENOMEM;
71817 vma_lock_anon_vma(vma);
71818 + if (locknext)
71819 + vma_lock_anon_vma(vma->vm_next);
71820
71821 /*
71822 * vma->vm_start/vm_end cannot change under us because the caller
71823 * is required to hold the mmap_sem in read mode. We need the
71824 - * anon_vma lock to serialize against concurrent expand_stacks.
71825 - * Also guard against wrapping around to address 0.
71826 + * anon_vma locks to serialize against concurrent expand_stacks
71827 + * and expand_upwards.
71828 */
71829 - if (address < PAGE_ALIGN(address+4))
71830 - address = PAGE_ALIGN(address+4);
71831 - else {
71832 - vma_unlock_anon_vma(vma);
71833 - return -ENOMEM;
71834 - }
71835 error = 0;
71836
71837 /* Somebody else might have raced and expanded it already */
71838 - if (address > vma->vm_end) {
71839 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
71840 + error = -ENOMEM;
71841 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
71842 unsigned long size, grow;
71843
71844 size = address - vma->vm_start;
71845 @@ -1773,6 +2034,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
71846 }
71847 }
71848 }
71849 + if (locknext)
71850 + vma_unlock_anon_vma(vma->vm_next);
71851 vma_unlock_anon_vma(vma);
71852 khugepaged_enter_vma_merge(vma);
71853 return error;
71854 @@ -1786,6 +2049,8 @@ int expand_downwards(struct vm_area_struct *vma,
71855 unsigned long address)
71856 {
71857 int error;
71858 + bool lockprev = false;
71859 + struct vm_area_struct *prev;
71860
71861 /*
71862 * We must make sure the anon_vma is allocated
71863 @@ -1799,6 +2064,15 @@ int expand_downwards(struct vm_area_struct *vma,
71864 if (error)
71865 return error;
71866
71867 + prev = vma->vm_prev;
71868 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
71869 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
71870 +#endif
71871 + if (lockprev && anon_vma_prepare(prev))
71872 + return -ENOMEM;
71873 + if (lockprev)
71874 + vma_lock_anon_vma(prev);
71875 +
71876 vma_lock_anon_vma(vma);
71877
71878 /*
71879 @@ -1808,9 +2082,17 @@ int expand_downwards(struct vm_area_struct *vma,
71880 */
71881
71882 /* Somebody else might have raced and expanded it already */
71883 - if (address < vma->vm_start) {
71884 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
71885 + error = -ENOMEM;
71886 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
71887 unsigned long size, grow;
71888
71889 +#ifdef CONFIG_PAX_SEGMEXEC
71890 + struct vm_area_struct *vma_m;
71891 +
71892 + vma_m = pax_find_mirror_vma(vma);
71893 +#endif
71894 +
71895 size = vma->vm_end - address;
71896 grow = (vma->vm_start - address) >> PAGE_SHIFT;
71897
71898 @@ -1820,11 +2102,22 @@ int expand_downwards(struct vm_area_struct *vma,
71899 if (!error) {
71900 vma->vm_start = address;
71901 vma->vm_pgoff -= grow;
71902 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
71903 +
71904 +#ifdef CONFIG_PAX_SEGMEXEC
71905 + if (vma_m) {
71906 + vma_m->vm_start -= grow << PAGE_SHIFT;
71907 + vma_m->vm_pgoff -= grow;
71908 + }
71909 +#endif
71910 +
71911 perf_event_mmap(vma);
71912 }
71913 }
71914 }
71915 vma_unlock_anon_vma(vma);
71916 + if (lockprev)
71917 + vma_unlock_anon_vma(prev);
71918 khugepaged_enter_vma_merge(vma);
71919 return error;
71920 }
71921 @@ -1894,6 +2187,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
71922 do {
71923 long nrpages = vma_pages(vma);
71924
71925 +#ifdef CONFIG_PAX_SEGMEXEC
71926 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
71927 + vma = remove_vma(vma);
71928 + continue;
71929 + }
71930 +#endif
71931 +
71932 mm->total_vm -= nrpages;
71933 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
71934 vma = remove_vma(vma);
71935 @@ -1939,6 +2239,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
71936 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
71937 vma->vm_prev = NULL;
71938 do {
71939 +
71940 +#ifdef CONFIG_PAX_SEGMEXEC
71941 + if (vma->vm_mirror) {
71942 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
71943 + vma->vm_mirror->vm_mirror = NULL;
71944 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
71945 + vma->vm_mirror = NULL;
71946 + }
71947 +#endif
71948 +
71949 rb_erase(&vma->vm_rb, &mm->mm_rb);
71950 mm->map_count--;
71951 tail_vma = vma;
71952 @@ -1967,14 +2277,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71953 struct vm_area_struct *new;
71954 int err = -ENOMEM;
71955
71956 +#ifdef CONFIG_PAX_SEGMEXEC
71957 + struct vm_area_struct *vma_m, *new_m = NULL;
71958 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
71959 +#endif
71960 +
71961 if (is_vm_hugetlb_page(vma) && (addr &
71962 ~(huge_page_mask(hstate_vma(vma)))))
71963 return -EINVAL;
71964
71965 +#ifdef CONFIG_PAX_SEGMEXEC
71966 + vma_m = pax_find_mirror_vma(vma);
71967 +#endif
71968 +
71969 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
71970 if (!new)
71971 goto out_err;
71972
71973 +#ifdef CONFIG_PAX_SEGMEXEC
71974 + if (vma_m) {
71975 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
71976 + if (!new_m) {
71977 + kmem_cache_free(vm_area_cachep, new);
71978 + goto out_err;
71979 + }
71980 + }
71981 +#endif
71982 +
71983 /* most fields are the same, copy all, and then fixup */
71984 *new = *vma;
71985
71986 @@ -1987,6 +2316,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
71987 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
71988 }
71989
71990 +#ifdef CONFIG_PAX_SEGMEXEC
71991 + if (vma_m) {
71992 + *new_m = *vma_m;
71993 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
71994 + new_m->vm_mirror = new;
71995 + new->vm_mirror = new_m;
71996 +
71997 + if (new_below)
71998 + new_m->vm_end = addr_m;
71999 + else {
72000 + new_m->vm_start = addr_m;
72001 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
72002 + }
72003 + }
72004 +#endif
72005 +
72006 pol = mpol_dup(vma_policy(vma));
72007 if (IS_ERR(pol)) {
72008 err = PTR_ERR(pol);
72009 @@ -2012,6 +2357,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72010 else
72011 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
72012
72013 +#ifdef CONFIG_PAX_SEGMEXEC
72014 + if (!err && vma_m) {
72015 + if (anon_vma_clone(new_m, vma_m))
72016 + goto out_free_mpol;
72017 +
72018 + mpol_get(pol);
72019 + vma_set_policy(new_m, pol);
72020 +
72021 + if (new_m->vm_file) {
72022 + get_file(new_m->vm_file);
72023 + if (vma_m->vm_flags & VM_EXECUTABLE)
72024 + added_exe_file_vma(mm);
72025 + }
72026 +
72027 + if (new_m->vm_ops && new_m->vm_ops->open)
72028 + new_m->vm_ops->open(new_m);
72029 +
72030 + if (new_below)
72031 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
72032 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
72033 + else
72034 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
72035 +
72036 + if (err) {
72037 + if (new_m->vm_ops && new_m->vm_ops->close)
72038 + new_m->vm_ops->close(new_m);
72039 + if (new_m->vm_file) {
72040 + if (vma_m->vm_flags & VM_EXECUTABLE)
72041 + removed_exe_file_vma(mm);
72042 + fput(new_m->vm_file);
72043 + }
72044 + mpol_put(pol);
72045 + }
72046 + }
72047 +#endif
72048 +
72049 /* Success. */
72050 if (!err)
72051 return 0;
72052 @@ -2024,10 +2405,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72053 removed_exe_file_vma(mm);
72054 fput(new->vm_file);
72055 }
72056 - unlink_anon_vmas(new);
72057 out_free_mpol:
72058 mpol_put(pol);
72059 out_free_vma:
72060 +
72061 +#ifdef CONFIG_PAX_SEGMEXEC
72062 + if (new_m) {
72063 + unlink_anon_vmas(new_m);
72064 + kmem_cache_free(vm_area_cachep, new_m);
72065 + }
72066 +#endif
72067 +
72068 + unlink_anon_vmas(new);
72069 kmem_cache_free(vm_area_cachep, new);
72070 out_err:
72071 return err;
72072 @@ -2040,6 +2429,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
72073 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
72074 unsigned long addr, int new_below)
72075 {
72076 +
72077 +#ifdef CONFIG_PAX_SEGMEXEC
72078 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
72079 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
72080 + if (mm->map_count >= sysctl_max_map_count-1)
72081 + return -ENOMEM;
72082 + } else
72083 +#endif
72084 +
72085 if (mm->map_count >= sysctl_max_map_count)
72086 return -ENOMEM;
72087
72088 @@ -2051,11 +2449,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
72089 * work. This now handles partial unmappings.
72090 * Jeremy Fitzhardinge <jeremy@goop.org>
72091 */
72092 +#ifdef CONFIG_PAX_SEGMEXEC
72093 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72094 {
72095 + int ret = __do_munmap(mm, start, len);
72096 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
72097 + return ret;
72098 +
72099 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
72100 +}
72101 +
72102 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72103 +#else
72104 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72105 +#endif
72106 +{
72107 unsigned long end;
72108 struct vm_area_struct *vma, *prev, *last;
72109
72110 + /*
72111 + * mm->mmap_sem is required to protect against another thread
72112 + * changing the mappings in case we sleep.
72113 + */
72114 + verify_mm_writelocked(mm);
72115 +
72116 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
72117 return -EINVAL;
72118
72119 @@ -2130,6 +2547,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72120 /* Fix up all other VM information */
72121 remove_vma_list(mm, vma);
72122
72123 + track_exec_limit(mm, start, end, 0UL);
72124 +
72125 return 0;
72126 }
72127 EXPORT_SYMBOL(do_munmap);
72128 @@ -2139,6 +2558,13 @@ int vm_munmap(unsigned long start, size_t len)
72129 int ret;
72130 struct mm_struct *mm = current->mm;
72131
72132 +
72133 +#ifdef CONFIG_PAX_SEGMEXEC
72134 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
72135 + (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
72136 + return -EINVAL;
72137 +#endif
72138 +
72139 down_write(&mm->mmap_sem);
72140 ret = do_munmap(mm, start, len);
72141 up_write(&mm->mmap_sem);
72142 @@ -2152,16 +2578,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
72143 return vm_munmap(addr, len);
72144 }
72145
72146 -static inline void verify_mm_writelocked(struct mm_struct *mm)
72147 -{
72148 -#ifdef CONFIG_DEBUG_VM
72149 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
72150 - WARN_ON(1);
72151 - up_read(&mm->mmap_sem);
72152 - }
72153 -#endif
72154 -}
72155 -
72156 /*
72157 * this is really a simplified "do_mmap". it only handles
72158 * anonymous maps. eventually we may be able to do some
72159 @@ -2175,6 +2591,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
72160 struct rb_node ** rb_link, * rb_parent;
72161 pgoff_t pgoff = addr >> PAGE_SHIFT;
72162 int error;
72163 + unsigned long charged;
72164
72165 len = PAGE_ALIGN(len);
72166 if (!len)
72167 @@ -2186,16 +2603,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
72168
72169 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
72170
72171 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
72172 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
72173 + flags &= ~VM_EXEC;
72174 +
72175 +#ifdef CONFIG_PAX_MPROTECT
72176 + if (mm->pax_flags & MF_PAX_MPROTECT)
72177 + flags &= ~VM_MAYEXEC;
72178 +#endif
72179 +
72180 + }
72181 +#endif
72182 +
72183 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
72184 if (error & ~PAGE_MASK)
72185 return error;
72186
72187 + charged = len >> PAGE_SHIFT;
72188 +
72189 /*
72190 * mlock MCL_FUTURE?
72191 */
72192 if (mm->def_flags & VM_LOCKED) {
72193 unsigned long locked, lock_limit;
72194 - locked = len >> PAGE_SHIFT;
72195 + locked = charged;
72196 locked += mm->locked_vm;
72197 lock_limit = rlimit(RLIMIT_MEMLOCK);
72198 lock_limit >>= PAGE_SHIFT;
72199 @@ -2212,22 +2643,22 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
72200 /*
72201 * Clear old maps. this also does some error checking for us
72202 */
72203 - munmap_back:
72204 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
72205 if (vma && vma->vm_start < addr + len) {
72206 if (do_munmap(mm, addr, len))
72207 return -ENOMEM;
72208 - goto munmap_back;
72209 - }
72210 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
72211 + BUG_ON(vma && vma->vm_start < addr + len);
72212 + }
72213
72214 /* Check against address space limits *after* clearing old maps... */
72215 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
72216 + if (!may_expand_vm(mm, charged))
72217 return -ENOMEM;
72218
72219 if (mm->map_count > sysctl_max_map_count)
72220 return -ENOMEM;
72221
72222 - if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
72223 + if (security_vm_enough_memory_mm(mm, charged))
72224 return -ENOMEM;
72225
72226 /* Can we just expand an old private anonymous mapping? */
72227 @@ -2241,7 +2672,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
72228 */
72229 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72230 if (!vma) {
72231 - vm_unacct_memory(len >> PAGE_SHIFT);
72232 + vm_unacct_memory(charged);
72233 return -ENOMEM;
72234 }
72235
72236 @@ -2255,11 +2686,12 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
72237 vma_link(mm, vma, prev, rb_link, rb_parent);
72238 out:
72239 perf_event_mmap(vma);
72240 - mm->total_vm += len >> PAGE_SHIFT;
72241 + mm->total_vm += charged;
72242 if (flags & VM_LOCKED) {
72243 if (!mlock_vma_pages_range(vma, addr, addr + len))
72244 - mm->locked_vm += (len >> PAGE_SHIFT);
72245 + mm->locked_vm += charged;
72246 }
72247 + track_exec_limit(mm, addr, addr + len, flags);
72248 return addr;
72249 }
72250
72251 @@ -2315,8 +2747,10 @@ void exit_mmap(struct mm_struct *mm)
72252 * Walk the list again, actually closing and freeing it,
72253 * with preemption enabled, without holding any MM locks.
72254 */
72255 - while (vma)
72256 + while (vma) {
72257 + vma->vm_mirror = NULL;
72258 vma = remove_vma(vma);
72259 + }
72260
72261 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
72262 }
72263 @@ -2330,6 +2764,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
72264 struct vm_area_struct * __vma, * prev;
72265 struct rb_node ** rb_link, * rb_parent;
72266
72267 +#ifdef CONFIG_PAX_SEGMEXEC
72268 + struct vm_area_struct *vma_m = NULL;
72269 +#endif
72270 +
72271 + if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
72272 + return -EPERM;
72273 +
72274 /*
72275 * The vm_pgoff of a purely anonymous vma should be irrelevant
72276 * until its first write fault, when page's anon_vma and index
72277 @@ -2352,7 +2793,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
72278 if ((vma->vm_flags & VM_ACCOUNT) &&
72279 security_vm_enough_memory_mm(mm, vma_pages(vma)))
72280 return -ENOMEM;
72281 +
72282 +#ifdef CONFIG_PAX_SEGMEXEC
72283 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
72284 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72285 + if (!vma_m)
72286 + return -ENOMEM;
72287 + }
72288 +#endif
72289 +
72290 vma_link(mm, vma, prev, rb_link, rb_parent);
72291 +
72292 +#ifdef CONFIG_PAX_SEGMEXEC
72293 + if (vma_m)
72294 + BUG_ON(pax_mirror_vma(vma_m, vma));
72295 +#endif
72296 +
72297 return 0;
72298 }
72299
72300 @@ -2371,6 +2827,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
72301 struct mempolicy *pol;
72302 bool faulted_in_anon_vma = true;
72303
72304 + BUG_ON(vma->vm_mirror);
72305 +
72306 /*
72307 * If anonymous vma has not yet been faulted, update new pgoff
72308 * to match new location, to increase its chance of merging.
72309 @@ -2438,6 +2896,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
72310 return NULL;
72311 }
72312
72313 +#ifdef CONFIG_PAX_SEGMEXEC
72314 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
72315 +{
72316 + struct vm_area_struct *prev_m;
72317 + struct rb_node **rb_link_m, *rb_parent_m;
72318 + struct mempolicy *pol_m;
72319 +
72320 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
72321 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
72322 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
72323 + *vma_m = *vma;
72324 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
72325 + if (anon_vma_clone(vma_m, vma))
72326 + return -ENOMEM;
72327 + pol_m = vma_policy(vma_m);
72328 + mpol_get(pol_m);
72329 + vma_set_policy(vma_m, pol_m);
72330 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
72331 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
72332 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
72333 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
72334 + if (vma_m->vm_file)
72335 + get_file(vma_m->vm_file);
72336 + if (vma_m->vm_ops && vma_m->vm_ops->open)
72337 + vma_m->vm_ops->open(vma_m);
72338 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
72339 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
72340 + vma_m->vm_mirror = vma;
72341 + vma->vm_mirror = vma_m;
72342 + return 0;
72343 +}
72344 +#endif
72345 +
72346 /*
72347 * Return true if the calling process may expand its vm space by the passed
72348 * number of pages
72349 @@ -2449,6 +2940,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
72350
72351 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
72352
72353 +#ifdef CONFIG_PAX_RANDMMAP
72354 + if (mm->pax_flags & MF_PAX_RANDMMAP)
72355 + cur -= mm->brk_gap;
72356 +#endif
72357 +
72358 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
72359 if (cur + npages > lim)
72360 return 0;
72361 return 1;
72362 @@ -2519,6 +3016,22 @@ int install_special_mapping(struct mm_struct *mm,
72363 vma->vm_start = addr;
72364 vma->vm_end = addr + len;
72365
72366 +#ifdef CONFIG_PAX_MPROTECT
72367 + if (mm->pax_flags & MF_PAX_MPROTECT) {
72368 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
72369 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
72370 + return -EPERM;
72371 + if (!(vm_flags & VM_EXEC))
72372 + vm_flags &= ~VM_MAYEXEC;
72373 +#else
72374 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
72375 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
72376 +#endif
72377 + else
72378 + vm_flags &= ~VM_MAYWRITE;
72379 + }
72380 +#endif
72381 +
72382 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
72383 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
72384
72385 diff --git a/mm/mprotect.c b/mm/mprotect.c
72386 index a409926..8b32e6d 100644
72387 --- a/mm/mprotect.c
72388 +++ b/mm/mprotect.c
72389 @@ -23,10 +23,17 @@
72390 #include <linux/mmu_notifier.h>
72391 #include <linux/migrate.h>
72392 #include <linux/perf_event.h>
72393 +
72394 +#ifdef CONFIG_PAX_MPROTECT
72395 +#include <linux/elf.h>
72396 +#include <linux/binfmts.h>
72397 +#endif
72398 +
72399 #include <asm/uaccess.h>
72400 #include <asm/pgtable.h>
72401 #include <asm/cacheflush.h>
72402 #include <asm/tlbflush.h>
72403 +#include <asm/mmu_context.h>
72404
72405 #ifndef pgprot_modify
72406 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
72407 @@ -141,6 +148,48 @@ static void change_protection(struct vm_area_struct *vma,
72408 flush_tlb_range(vma, start, end);
72409 }
72410
72411 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
72412 +/* called while holding the mmap semaphor for writing except stack expansion */
72413 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
72414 +{
72415 + unsigned long oldlimit, newlimit = 0UL;
72416 +
72417 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
72418 + return;
72419 +
72420 + spin_lock(&mm->page_table_lock);
72421 + oldlimit = mm->context.user_cs_limit;
72422 + if ((prot & VM_EXEC) && oldlimit < end)
72423 + /* USER_CS limit moved up */
72424 + newlimit = end;
72425 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
72426 + /* USER_CS limit moved down */
72427 + newlimit = start;
72428 +
72429 + if (newlimit) {
72430 + mm->context.user_cs_limit = newlimit;
72431 +
72432 +#ifdef CONFIG_SMP
72433 + wmb();
72434 + cpus_clear(mm->context.cpu_user_cs_mask);
72435 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
72436 +#endif
72437 +
72438 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
72439 + }
72440 + spin_unlock(&mm->page_table_lock);
72441 + if (newlimit == end) {
72442 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
72443 +
72444 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
72445 + if (is_vm_hugetlb_page(vma))
72446 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
72447 + else
72448 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
72449 + }
72450 +}
72451 +#endif
72452 +
72453 int
72454 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
72455 unsigned long start, unsigned long end, unsigned long newflags)
72456 @@ -153,11 +202,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
72457 int error;
72458 int dirty_accountable = 0;
72459
72460 +#ifdef CONFIG_PAX_SEGMEXEC
72461 + struct vm_area_struct *vma_m = NULL;
72462 + unsigned long start_m, end_m;
72463 +
72464 + start_m = start + SEGMEXEC_TASK_SIZE;
72465 + end_m = end + SEGMEXEC_TASK_SIZE;
72466 +#endif
72467 +
72468 if (newflags == oldflags) {
72469 *pprev = vma;
72470 return 0;
72471 }
72472
72473 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
72474 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
72475 +
72476 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
72477 + return -ENOMEM;
72478 +
72479 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
72480 + return -ENOMEM;
72481 + }
72482 +
72483 /*
72484 * If we make a private mapping writable we increase our commit;
72485 * but (without finer accounting) cannot reduce our commit if we
72486 @@ -174,6 +241,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
72487 }
72488 }
72489
72490 +#ifdef CONFIG_PAX_SEGMEXEC
72491 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
72492 + if (start != vma->vm_start) {
72493 + error = split_vma(mm, vma, start, 1);
72494 + if (error)
72495 + goto fail;
72496 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
72497 + *pprev = (*pprev)->vm_next;
72498 + }
72499 +
72500 + if (end != vma->vm_end) {
72501 + error = split_vma(mm, vma, end, 0);
72502 + if (error)
72503 + goto fail;
72504 + }
72505 +
72506 + if (pax_find_mirror_vma(vma)) {
72507 + error = __do_munmap(mm, start_m, end_m - start_m);
72508 + if (error)
72509 + goto fail;
72510 + } else {
72511 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72512 + if (!vma_m) {
72513 + error = -ENOMEM;
72514 + goto fail;
72515 + }
72516 + vma->vm_flags = newflags;
72517 + error = pax_mirror_vma(vma_m, vma);
72518 + if (error) {
72519 + vma->vm_flags = oldflags;
72520 + goto fail;
72521 + }
72522 + }
72523 + }
72524 +#endif
72525 +
72526 /*
72527 * First try to merge with previous and/or next vma.
72528 */
72529 @@ -204,9 +307,21 @@ success:
72530 * vm_flags and vm_page_prot are protected by the mmap_sem
72531 * held in write mode.
72532 */
72533 +
72534 +#ifdef CONFIG_PAX_SEGMEXEC
72535 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
72536 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
72537 +#endif
72538 +
72539 vma->vm_flags = newflags;
72540 +
72541 +#ifdef CONFIG_PAX_MPROTECT
72542 + if (mm->binfmt && mm->binfmt->handle_mprotect)
72543 + mm->binfmt->handle_mprotect(vma, newflags);
72544 +#endif
72545 +
72546 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
72547 - vm_get_page_prot(newflags));
72548 + vm_get_page_prot(vma->vm_flags));
72549
72550 if (vma_wants_writenotify(vma)) {
72551 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
72552 @@ -248,6 +363,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72553 end = start + len;
72554 if (end <= start)
72555 return -ENOMEM;
72556 +
72557 +#ifdef CONFIG_PAX_SEGMEXEC
72558 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
72559 + if (end > SEGMEXEC_TASK_SIZE)
72560 + return -EINVAL;
72561 + } else
72562 +#endif
72563 +
72564 + if (end > TASK_SIZE)
72565 + return -EINVAL;
72566 +
72567 if (!arch_validate_prot(prot))
72568 return -EINVAL;
72569
72570 @@ -255,7 +381,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72571 /*
72572 * Does the application expect PROT_READ to imply PROT_EXEC:
72573 */
72574 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
72575 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
72576 prot |= PROT_EXEC;
72577
72578 vm_flags = calc_vm_prot_bits(prot);
72579 @@ -288,6 +414,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72580 if (start > vma->vm_start)
72581 prev = vma;
72582
72583 +#ifdef CONFIG_PAX_MPROTECT
72584 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
72585 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
72586 +#endif
72587 +
72588 for (nstart = start ; ; ) {
72589 unsigned long newflags;
72590
72591 @@ -297,6 +428,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72592
72593 /* newflags >> 4 shift VM_MAY% in place of VM_% */
72594 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
72595 + if (prot & (PROT_WRITE | PROT_EXEC))
72596 + gr_log_rwxmprotect(vma->vm_file);
72597 +
72598 + error = -EACCES;
72599 + goto out;
72600 + }
72601 +
72602 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
72603 error = -EACCES;
72604 goto out;
72605 }
72606 @@ -311,6 +450,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
72607 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
72608 if (error)
72609 goto out;
72610 +
72611 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
72612 +
72613 nstart = tmp;
72614
72615 if (nstart < prev->vm_end)
72616 diff --git a/mm/mremap.c b/mm/mremap.c
72617 index db8d983..76506cb 100644
72618 --- a/mm/mremap.c
72619 +++ b/mm/mremap.c
72620 @@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
72621 continue;
72622 pte = ptep_get_and_clear(mm, old_addr, old_pte);
72623 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
72624 +
72625 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
72626 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
72627 + pte = pte_exprotect(pte);
72628 +#endif
72629 +
72630 set_pte_at(mm, new_addr, new_pte, pte);
72631 }
72632
72633 @@ -299,6 +305,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
72634 if (is_vm_hugetlb_page(vma))
72635 goto Einval;
72636
72637 +#ifdef CONFIG_PAX_SEGMEXEC
72638 + if (pax_find_mirror_vma(vma))
72639 + goto Einval;
72640 +#endif
72641 +
72642 /* We can't remap across vm area boundaries */
72643 if (old_len > vma->vm_end - addr)
72644 goto Efault;
72645 @@ -355,20 +366,25 @@ static unsigned long mremap_to(unsigned long addr,
72646 unsigned long ret = -EINVAL;
72647 unsigned long charged = 0;
72648 unsigned long map_flags;
72649 + unsigned long pax_task_size = TASK_SIZE;
72650
72651 if (new_addr & ~PAGE_MASK)
72652 goto out;
72653
72654 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
72655 +#ifdef CONFIG_PAX_SEGMEXEC
72656 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
72657 + pax_task_size = SEGMEXEC_TASK_SIZE;
72658 +#endif
72659 +
72660 + pax_task_size -= PAGE_SIZE;
72661 +
72662 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
72663 goto out;
72664
72665 /* Check if the location we're moving into overlaps the
72666 * old location at all, and fail if it does.
72667 */
72668 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
72669 - goto out;
72670 -
72671 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
72672 + if (addr + old_len > new_addr && new_addr + new_len > addr)
72673 goto out;
72674
72675 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
72676 @@ -440,6 +456,7 @@ unsigned long do_mremap(unsigned long addr,
72677 struct vm_area_struct *vma;
72678 unsigned long ret = -EINVAL;
72679 unsigned long charged = 0;
72680 + unsigned long pax_task_size = TASK_SIZE;
72681
72682 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
72683 goto out;
72684 @@ -458,6 +475,17 @@ unsigned long do_mremap(unsigned long addr,
72685 if (!new_len)
72686 goto out;
72687
72688 +#ifdef CONFIG_PAX_SEGMEXEC
72689 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
72690 + pax_task_size = SEGMEXEC_TASK_SIZE;
72691 +#endif
72692 +
72693 + pax_task_size -= PAGE_SIZE;
72694 +
72695 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
72696 + old_len > pax_task_size || addr > pax_task_size-old_len)
72697 + goto out;
72698 +
72699 if (flags & MREMAP_FIXED) {
72700 if (flags & MREMAP_MAYMOVE)
72701 ret = mremap_to(addr, old_len, new_addr, new_len);
72702 @@ -507,6 +535,7 @@ unsigned long do_mremap(unsigned long addr,
72703 addr + new_len);
72704 }
72705 ret = addr;
72706 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
72707 goto out;
72708 }
72709 }
72710 @@ -533,7 +562,13 @@ unsigned long do_mremap(unsigned long addr,
72711 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
72712 if (ret)
72713 goto out;
72714 +
72715 + map_flags = vma->vm_flags;
72716 ret = move_vma(vma, addr, old_len, new_len, new_addr);
72717 + if (!(ret & ~PAGE_MASK)) {
72718 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
72719 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
72720 + }
72721 }
72722 out:
72723 if (ret & ~PAGE_MASK)
72724 diff --git a/mm/nommu.c b/mm/nommu.c
72725 index bb8f4f0..40d3e02 100644
72726 --- a/mm/nommu.c
72727 +++ b/mm/nommu.c
72728 @@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
72729 int sysctl_overcommit_ratio = 50; /* default is 50% */
72730 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
72731 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
72732 -int heap_stack_gap = 0;
72733
72734 atomic_long_t mmap_pages_allocated;
72735
72736 @@ -827,15 +826,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
72737 EXPORT_SYMBOL(find_vma);
72738
72739 /*
72740 - * find a VMA
72741 - * - we don't extend stack VMAs under NOMMU conditions
72742 - */
72743 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
72744 -{
72745 - return find_vma(mm, addr);
72746 -}
72747 -
72748 -/*
72749 * expand a stack to a given address
72750 * - not supported under NOMMU conditions
72751 */
72752 @@ -1580,6 +1570,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
72753
72754 /* most fields are the same, copy all, and then fixup */
72755 *new = *vma;
72756 + INIT_LIST_HEAD(&new->anon_vma_chain);
72757 *region = *vma->vm_region;
72758 new->vm_region = region;
72759
72760 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
72761 index 918330f..ae99ae1 100644
72762 --- a/mm/page_alloc.c
72763 +++ b/mm/page_alloc.c
72764 @@ -335,7 +335,7 @@ out:
72765 * This usage means that zero-order pages may not be compound.
72766 */
72767
72768 -static void free_compound_page(struct page *page)
72769 +void free_compound_page(struct page *page)
72770 {
72771 __free_pages_ok(page, compound_order(page));
72772 }
72773 @@ -692,6 +692,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
72774 int i;
72775 int bad = 0;
72776
72777 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
72778 + unsigned long index = 1UL << order;
72779 +#endif
72780 +
72781 trace_mm_page_free(page, order);
72782 kmemcheck_free_shadow(page, order);
72783
72784 @@ -707,6 +711,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
72785 debug_check_no_obj_freed(page_address(page),
72786 PAGE_SIZE << order);
72787 }
72788 +
72789 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
72790 + for (; index; --index)
72791 + sanitize_highpage(page + index - 1);
72792 +#endif
72793 +
72794 arch_free_page(page, order);
72795 kernel_map_pages(page, 1 << order, 0);
72796
72797 @@ -830,8 +840,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
72798 arch_alloc_page(page, order);
72799 kernel_map_pages(page, 1 << order, 1);
72800
72801 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
72802 if (gfp_flags & __GFP_ZERO)
72803 prep_zero_page(page, order, gfp_flags);
72804 +#endif
72805
72806 if (order && (gfp_flags & __GFP_COMP))
72807 prep_compound_page(page, order);
72808 @@ -3523,7 +3535,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
72809 unsigned long pfn;
72810
72811 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
72812 +#ifdef CONFIG_X86_32
72813 + /* boot failures in VMware 8 on 32bit vanilla since
72814 + this change */
72815 + if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
72816 +#else
72817 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
72818 +#endif
72819 return 1;
72820 }
72821 return 0;
72822 diff --git a/mm/percpu.c b/mm/percpu.c
72823 index bb4be74..a43ea85 100644
72824 --- a/mm/percpu.c
72825 +++ b/mm/percpu.c
72826 @@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
72827 static unsigned int pcpu_high_unit_cpu __read_mostly;
72828
72829 /* the address of the first chunk which starts with the kernel static area */
72830 -void *pcpu_base_addr __read_mostly;
72831 +void *pcpu_base_addr __read_only;
72832 EXPORT_SYMBOL_GPL(pcpu_base_addr);
72833
72834 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
72835 diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
72836 index c20ff48..137702a 100644
72837 --- a/mm/process_vm_access.c
72838 +++ b/mm/process_vm_access.c
72839 @@ -13,6 +13,7 @@
72840 #include <linux/uio.h>
72841 #include <linux/sched.h>
72842 #include <linux/highmem.h>
72843 +#include <linux/security.h>
72844 #include <linux/ptrace.h>
72845 #include <linux/slab.h>
72846 #include <linux/syscalls.h>
72847 @@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
72848 size_t iov_l_curr_offset = 0;
72849 ssize_t iov_len;
72850
72851 + return -ENOSYS; // PaX: until properly audited
72852 +
72853 /*
72854 * Work out how many pages of struct pages we're going to need
72855 * when eventually calling get_user_pages
72856 */
72857 for (i = 0; i < riovcnt; i++) {
72858 iov_len = rvec[i].iov_len;
72859 - if (iov_len > 0) {
72860 - nr_pages_iov = ((unsigned long)rvec[i].iov_base
72861 - + iov_len)
72862 - / PAGE_SIZE - (unsigned long)rvec[i].iov_base
72863 - / PAGE_SIZE + 1;
72864 - nr_pages = max(nr_pages, nr_pages_iov);
72865 - }
72866 + if (iov_len <= 0)
72867 + continue;
72868 + nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
72869 + (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
72870 + nr_pages = max(nr_pages, nr_pages_iov);
72871 }
72872
72873 if (nr_pages == 0)
72874 @@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
72875 goto free_proc_pages;
72876 }
72877
72878 + if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
72879 + rc = -EPERM;
72880 + goto put_task_struct;
72881 + }
72882 +
72883 mm = mm_access(task, PTRACE_MODE_ATTACH);
72884 if (!mm || IS_ERR(mm)) {
72885 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
72886 diff --git a/mm/rmap.c b/mm/rmap.c
72887 index 5b5ad58..0f77903 100644
72888 --- a/mm/rmap.c
72889 +++ b/mm/rmap.c
72890 @@ -167,6 +167,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72891 struct anon_vma *anon_vma = vma->anon_vma;
72892 struct anon_vma_chain *avc;
72893
72894 +#ifdef CONFIG_PAX_SEGMEXEC
72895 + struct anon_vma_chain *avc_m = NULL;
72896 +#endif
72897 +
72898 might_sleep();
72899 if (unlikely(!anon_vma)) {
72900 struct mm_struct *mm = vma->vm_mm;
72901 @@ -176,6 +180,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72902 if (!avc)
72903 goto out_enomem;
72904
72905 +#ifdef CONFIG_PAX_SEGMEXEC
72906 + avc_m = anon_vma_chain_alloc(GFP_KERNEL);
72907 + if (!avc_m)
72908 + goto out_enomem_free_avc;
72909 +#endif
72910 +
72911 anon_vma = find_mergeable_anon_vma(vma);
72912 allocated = NULL;
72913 if (!anon_vma) {
72914 @@ -189,6 +199,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72915 /* page_table_lock to protect against threads */
72916 spin_lock(&mm->page_table_lock);
72917 if (likely(!vma->anon_vma)) {
72918 +
72919 +#ifdef CONFIG_PAX_SEGMEXEC
72920 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
72921 +
72922 + if (vma_m) {
72923 + BUG_ON(vma_m->anon_vma);
72924 + vma_m->anon_vma = anon_vma;
72925 + anon_vma_chain_link(vma_m, avc_m, anon_vma);
72926 + avc_m = NULL;
72927 + }
72928 +#endif
72929 +
72930 vma->anon_vma = anon_vma;
72931 anon_vma_chain_link(vma, avc, anon_vma);
72932 allocated = NULL;
72933 @@ -199,12 +221,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
72934
72935 if (unlikely(allocated))
72936 put_anon_vma(allocated);
72937 +
72938 +#ifdef CONFIG_PAX_SEGMEXEC
72939 + if (unlikely(avc_m))
72940 + anon_vma_chain_free(avc_m);
72941 +#endif
72942 +
72943 if (unlikely(avc))
72944 anon_vma_chain_free(avc);
72945 }
72946 return 0;
72947
72948 out_enomem_free_avc:
72949 +
72950 +#ifdef CONFIG_PAX_SEGMEXEC
72951 + if (avc_m)
72952 + anon_vma_chain_free(avc_m);
72953 +#endif
72954 +
72955 anon_vma_chain_free(avc);
72956 out_enomem:
72957 return -ENOMEM;
72958 @@ -240,7 +274,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
72959 * Attach the anon_vmas from src to dst.
72960 * Returns 0 on success, -ENOMEM on failure.
72961 */
72962 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
72963 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
72964 {
72965 struct anon_vma_chain *avc, *pavc;
72966 struct anon_vma *root = NULL;
72967 @@ -318,7 +352,7 @@ void anon_vma_moveto_tail(struct vm_area_struct *dst)
72968 * the corresponding VMA in the parent process is attached to.
72969 * Returns 0 on success, non-zero on failure.
72970 */
72971 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
72972 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
72973 {
72974 struct anon_vma_chain *avc;
72975 struct anon_vma *anon_vma;
72976 diff --git a/mm/shmem.c b/mm/shmem.c
72977 index 9d65a02..7c877e7 100644
72978 --- a/mm/shmem.c
72979 +++ b/mm/shmem.c
72980 @@ -31,7 +31,7 @@
72981 #include <linux/export.h>
72982 #include <linux/swap.h>
72983
72984 -static struct vfsmount *shm_mnt;
72985 +struct vfsmount *shm_mnt;
72986
72987 #ifdef CONFIG_SHMEM
72988 /*
72989 @@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
72990 #define BOGO_DIRENT_SIZE 20
72991
72992 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
72993 -#define SHORT_SYMLINK_LEN 128
72994 +#define SHORT_SYMLINK_LEN 64
72995
72996 struct shmem_xattr {
72997 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
72998 @@ -2236,8 +2236,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
72999 int err = -ENOMEM;
73000
73001 /* Round up to L1_CACHE_BYTES to resist false sharing */
73002 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
73003 - L1_CACHE_BYTES), GFP_KERNEL);
73004 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
73005 if (!sbinfo)
73006 return -ENOMEM;
73007
73008 diff --git a/mm/slab.c b/mm/slab.c
73009 index e901a36..4923e4d 100644
73010 --- a/mm/slab.c
73011 +++ b/mm/slab.c
73012 @@ -153,7 +153,7 @@
73013
73014 /* Legal flag mask for kmem_cache_create(). */
73015 #if DEBUG
73016 -# define CREATE_MASK (SLAB_RED_ZONE | \
73017 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
73018 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
73019 SLAB_CACHE_DMA | \
73020 SLAB_STORE_USER | \
73021 @@ -161,7 +161,7 @@
73022 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
73023 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
73024 #else
73025 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
73026 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
73027 SLAB_CACHE_DMA | \
73028 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
73029 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
73030 @@ -290,7 +290,7 @@ struct kmem_list3 {
73031 * Need this for bootstrapping a per node allocator.
73032 */
73033 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
73034 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
73035 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
73036 #define CACHE_CACHE 0
73037 #define SIZE_AC MAX_NUMNODES
73038 #define SIZE_L3 (2 * MAX_NUMNODES)
73039 @@ -391,10 +391,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
73040 if ((x)->max_freeable < i) \
73041 (x)->max_freeable = i; \
73042 } while (0)
73043 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
73044 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
73045 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
73046 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
73047 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
73048 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
73049 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
73050 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
73051 #else
73052 #define STATS_INC_ACTIVE(x) do { } while (0)
73053 #define STATS_DEC_ACTIVE(x) do { } while (0)
73054 @@ -542,7 +542,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
73055 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
73056 */
73057 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
73058 - const struct slab *slab, void *obj)
73059 + const struct slab *slab, const void *obj)
73060 {
73061 u32 offset = (obj - slab->s_mem);
73062 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
73063 @@ -563,12 +563,13 @@ EXPORT_SYMBOL(malloc_sizes);
73064 struct cache_names {
73065 char *name;
73066 char *name_dma;
73067 + char *name_usercopy;
73068 };
73069
73070 static struct cache_names __initdata cache_names[] = {
73071 -#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
73072 +#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)", .name_usercopy = "size-" #x "(USERCOPY)" },
73073 #include <linux/kmalloc_sizes.h>
73074 - {NULL,}
73075 + {NULL}
73076 #undef CACHE
73077 };
73078
73079 @@ -756,6 +757,12 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
73080 if (unlikely(gfpflags & GFP_DMA))
73081 return csizep->cs_dmacachep;
73082 #endif
73083 +
73084 +#ifdef CONFIG_PAX_USERCOPY
73085 + if (unlikely(gfpflags & GFP_USERCOPY))
73086 + return csizep->cs_usercopycachep;
73087 +#endif
73088 +
73089 return csizep->cs_cachep;
73090 }
73091
73092 @@ -1588,7 +1595,7 @@ void __init kmem_cache_init(void)
73093 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
73094 sizes[INDEX_AC].cs_size,
73095 ARCH_KMALLOC_MINALIGN,
73096 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
73097 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73098 NULL);
73099
73100 if (INDEX_AC != INDEX_L3) {
73101 @@ -1596,7 +1603,7 @@ void __init kmem_cache_init(void)
73102 kmem_cache_create(names[INDEX_L3].name,
73103 sizes[INDEX_L3].cs_size,
73104 ARCH_KMALLOC_MINALIGN,
73105 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
73106 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73107 NULL);
73108 }
73109
73110 @@ -1614,7 +1621,7 @@ void __init kmem_cache_init(void)
73111 sizes->cs_cachep = kmem_cache_create(names->name,
73112 sizes->cs_size,
73113 ARCH_KMALLOC_MINALIGN,
73114 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
73115 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73116 NULL);
73117 }
73118 #ifdef CONFIG_ZONE_DMA
73119 @@ -1626,6 +1633,16 @@ void __init kmem_cache_init(void)
73120 SLAB_PANIC,
73121 NULL);
73122 #endif
73123 +
73124 +#ifdef CONFIG_PAX_USERCOPY
73125 + sizes->cs_usercopycachep = kmem_cache_create(
73126 + names->name_usercopy,
73127 + sizes->cs_size,
73128 + ARCH_KMALLOC_MINALIGN,
73129 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73130 + NULL);
73131 +#endif
73132 +
73133 sizes++;
73134 names++;
73135 }
73136 @@ -4390,10 +4407,10 @@ static int s_show(struct seq_file *m, void *p)
73137 }
73138 /* cpu stats */
73139 {
73140 - unsigned long allochit = atomic_read(&cachep->allochit);
73141 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
73142 - unsigned long freehit = atomic_read(&cachep->freehit);
73143 - unsigned long freemiss = atomic_read(&cachep->freemiss);
73144 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
73145 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
73146 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
73147 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
73148
73149 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
73150 allochit, allocmiss, freehit, freemiss);
73151 @@ -4652,13 +4669,90 @@ static int __init slab_proc_init(void)
73152 {
73153 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
73154 #ifdef CONFIG_DEBUG_SLAB_LEAK
73155 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
73156 + proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
73157 #endif
73158 return 0;
73159 }
73160 module_init(slab_proc_init);
73161 #endif
73162
73163 +bool is_usercopy_alloc(const void *ptr)
73164 +{
73165 +#ifdef CONFIG_PAX_USERCOPY
73166 + struct page *page;
73167 + struct kmem_cache *cachep;
73168 + struct slab *slabp;
73169 +
73170 + if (ZERO_OR_NULL_PTR(ptr))
73171 + return false;
73172 +
73173 + if (!virt_addr_valid(ptr))
73174 + return false;
73175 +
73176 + page = virt_to_head_page(ptr);
73177 +
73178 + if (!PageSlab(page))
73179 + return false;
73180 +
73181 + cachep = page_get_cache(page);
73182 + if (!(cachep->flags & SLAB_USERCOPY))
73183 + return false;
73184 +
73185 + return true;
73186 +#endif
73187 +
73188 + return false;
73189 +}
73190 +
73191 +void check_object_size(const void *ptr, unsigned long n, bool to)
73192 +{
73193 +
73194 +#ifdef CONFIG_PAX_USERCOPY
73195 + struct page *page;
73196 + struct kmem_cache *cachep = NULL;
73197 + struct slab *slabp;
73198 + unsigned int objnr;
73199 + unsigned long offset;
73200 + const char *type;
73201 +
73202 + if (!n)
73203 + return;
73204 +
73205 + type = "<null>";
73206 + if (ZERO_OR_NULL_PTR(ptr))
73207 + goto report;
73208 +
73209 + if (!virt_addr_valid(ptr))
73210 + return;
73211 +
73212 + page = virt_to_head_page(ptr);
73213 +
73214 + type = "<process stack>";
73215 + if (!PageSlab(page)) {
73216 + if (object_is_on_stack(ptr, n) == -1)
73217 + goto report;
73218 + return;
73219 + }
73220 +
73221 + cachep = page_get_cache(page);
73222 + type = cachep->name;
73223 + if (!(cachep->flags & SLAB_USERCOPY))
73224 + goto report;
73225 +
73226 + slabp = page_get_slab(page);
73227 + objnr = obj_to_index(cachep, slabp, ptr);
73228 + BUG_ON(objnr >= cachep->num);
73229 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
73230 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
73231 + return;
73232 +
73233 +report:
73234 + pax_report_usercopy(ptr, n, to, type);
73235 +#endif
73236 +
73237 +}
73238 +EXPORT_SYMBOL(check_object_size);
73239 +
73240 /**
73241 * ksize - get the actual amount of memory allocated for a given object
73242 * @objp: Pointer to the object
73243 diff --git a/mm/slob.c b/mm/slob.c
73244 index 8105be4..76a8cac 100644
73245 --- a/mm/slob.c
73246 +++ b/mm/slob.c
73247 @@ -29,7 +29,7 @@
73248 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
73249 * alloc_pages() directly, allocating compound pages so the page order
73250 * does not have to be separately tracked, and also stores the exact
73251 - * allocation size in page->private so that it can be used to accurately
73252 + * allocation size in slob_page->size so that it can be used to accurately
73253 * provide ksize(). These objects are detected in kfree() because slob_page()
73254 * is false for them.
73255 *
73256 @@ -58,6 +58,7 @@
73257 */
73258
73259 #include <linux/kernel.h>
73260 +#include <linux/sched.h>
73261 #include <linux/slab.h>
73262 #include <linux/mm.h>
73263 #include <linux/swap.h> /* struct reclaim_state */
73264 @@ -102,7 +103,8 @@ struct slob_page {
73265 unsigned long flags; /* mandatory */
73266 atomic_t _count; /* mandatory */
73267 slobidx_t units; /* free units left in page */
73268 - unsigned long pad[2];
73269 + unsigned long pad[1];
73270 + unsigned long size; /* size when >=PAGE_SIZE */
73271 slob_t *free; /* first free slob_t in page */
73272 struct list_head list; /* linked list of free pages */
73273 };
73274 @@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
73275 */
73276 static inline int is_slob_page(struct slob_page *sp)
73277 {
73278 - return PageSlab((struct page *)sp);
73279 + return PageSlab((struct page *)sp) && !sp->size;
73280 }
73281
73282 static inline void set_slob_page(struct slob_page *sp)
73283 @@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
73284
73285 static inline struct slob_page *slob_page(const void *addr)
73286 {
73287 - return (struct slob_page *)virt_to_page(addr);
73288 + return (struct slob_page *)virt_to_head_page(addr);
73289 }
73290
73291 /*
73292 @@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
73293 /*
73294 * Return the size of a slob block.
73295 */
73296 -static slobidx_t slob_units(slob_t *s)
73297 +static slobidx_t slob_units(const slob_t *s)
73298 {
73299 if (s->units > 0)
73300 return s->units;
73301 @@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
73302 /*
73303 * Return the next free slob block pointer after this one.
73304 */
73305 -static slob_t *slob_next(slob_t *s)
73306 +static slob_t *slob_next(const slob_t *s)
73307 {
73308 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
73309 slobidx_t next;
73310 @@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
73311 /*
73312 * Returns true if s is the last free block in its page.
73313 */
73314 -static int slob_last(slob_t *s)
73315 +static int slob_last(const slob_t *s)
73316 {
73317 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
73318 }
73319 @@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
73320 if (!page)
73321 return NULL;
73322
73323 + set_slob_page(page);
73324 return page_address(page);
73325 }
73326
73327 @@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
73328 if (!b)
73329 return NULL;
73330 sp = slob_page(b);
73331 - set_slob_page(sp);
73332
73333 spin_lock_irqsave(&slob_lock, flags);
73334 sp->units = SLOB_UNITS(PAGE_SIZE);
73335 sp->free = b;
73336 + sp->size = 0;
73337 INIT_LIST_HEAD(&sp->list);
73338 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
73339 set_slob_page_free(sp, slob_list);
73340 @@ -476,10 +479,9 @@ out:
73341 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
73342 */
73343
73344 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73345 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
73346 {
73347 - unsigned int *m;
73348 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73349 + slob_t *m;
73350 void *ret;
73351
73352 gfp &= gfp_allowed_mask;
73353 @@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73354
73355 if (!m)
73356 return NULL;
73357 - *m = size;
73358 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
73359 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
73360 + m[0].units = size;
73361 + m[1].units = align;
73362 ret = (void *)m + align;
73363
73364 trace_kmalloc_node(_RET_IP_, ret,
73365 @@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73366 gfp |= __GFP_COMP;
73367 ret = slob_new_pages(gfp, order, node);
73368 if (ret) {
73369 - struct page *page;
73370 - page = virt_to_page(ret);
73371 - page->private = size;
73372 + struct slob_page *sp;
73373 + sp = slob_page(ret);
73374 + sp->size = size;
73375 }
73376
73377 trace_kmalloc_node(_RET_IP_, ret,
73378 size, PAGE_SIZE << order, gfp, node);
73379 }
73380
73381 - kmemleak_alloc(ret, size, 1, gfp);
73382 + return ret;
73383 +}
73384 +
73385 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73386 +{
73387 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73388 + void *ret = __kmalloc_node_align(size, gfp, node, align);
73389 +
73390 + if (!ZERO_OR_NULL_PTR(ret))
73391 + kmemleak_alloc(ret, size, 1, gfp);
73392 return ret;
73393 }
73394 EXPORT_SYMBOL(__kmalloc_node);
73395 @@ -533,13 +547,97 @@ void kfree(const void *block)
73396 sp = slob_page(block);
73397 if (is_slob_page(sp)) {
73398 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73399 - unsigned int *m = (unsigned int *)(block - align);
73400 - slob_free(m, *m + align);
73401 - } else
73402 + slob_t *m = (slob_t *)(block - align);
73403 + slob_free(m, m[0].units + align);
73404 + } else {
73405 + clear_slob_page(sp);
73406 + free_slob_page(sp);
73407 + sp->size = 0;
73408 put_page(&sp->page);
73409 + }
73410 }
73411 EXPORT_SYMBOL(kfree);
73412
73413 +bool is_usercopy_alloc(const void *ptr)
73414 +{
73415 + return false;
73416 +}
73417 +
73418 +void check_object_size(const void *ptr, unsigned long n, bool to)
73419 +{
73420 +
73421 +#ifdef CONFIG_PAX_USERCOPY
73422 + struct slob_page *sp;
73423 + const slob_t *free;
73424 + const void *base;
73425 + unsigned long flags;
73426 + const char *type;
73427 +
73428 + if (!n)
73429 + return;
73430 +
73431 + type = "<null>";
73432 + if (ZERO_OR_NULL_PTR(ptr))
73433 + goto report;
73434 +
73435 + if (!virt_addr_valid(ptr))
73436 + return;
73437 +
73438 + type = "<process stack>";
73439 + sp = slob_page(ptr);
73440 + if (!PageSlab((struct page *)sp)) {
73441 + if (object_is_on_stack(ptr, n) == -1)
73442 + goto report;
73443 + return;
73444 + }
73445 +
73446 + type = "<slob>";
73447 + if (sp->size) {
73448 + base = page_address(&sp->page);
73449 + if (base <= ptr && n <= sp->size - (ptr - base))
73450 + return;
73451 + goto report;
73452 + }
73453 +
73454 + /* some tricky double walking to find the chunk */
73455 + spin_lock_irqsave(&slob_lock, flags);
73456 + base = (void *)((unsigned long)ptr & PAGE_MASK);
73457 + free = sp->free;
73458 +
73459 + while (!slob_last(free) && (void *)free <= ptr) {
73460 + base = free + slob_units(free);
73461 + free = slob_next(free);
73462 + }
73463 +
73464 + while (base < (void *)free) {
73465 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
73466 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
73467 + int offset;
73468 +
73469 + if (ptr < base + align)
73470 + break;
73471 +
73472 + offset = ptr - base - align;
73473 + if (offset >= m) {
73474 + base += size;
73475 + continue;
73476 + }
73477 +
73478 + if (n > m - offset)
73479 + break;
73480 +
73481 + spin_unlock_irqrestore(&slob_lock, flags);
73482 + return;
73483 + }
73484 +
73485 + spin_unlock_irqrestore(&slob_lock, flags);
73486 +report:
73487 + pax_report_usercopy(ptr, n, to, type);
73488 +#endif
73489 +
73490 +}
73491 +EXPORT_SYMBOL(check_object_size);
73492 +
73493 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
73494 size_t ksize(const void *block)
73495 {
73496 @@ -552,10 +650,10 @@ size_t ksize(const void *block)
73497 sp = slob_page(block);
73498 if (is_slob_page(sp)) {
73499 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73500 - unsigned int *m = (unsigned int *)(block - align);
73501 - return SLOB_UNITS(*m) * SLOB_UNIT;
73502 + slob_t *m = (slob_t *)(block - align);
73503 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
73504 } else
73505 - return sp->page.private;
73506 + return sp->size;
73507 }
73508 EXPORT_SYMBOL(ksize);
73509
73510 @@ -571,8 +669,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73511 {
73512 struct kmem_cache *c;
73513
73514 +#ifdef CONFIG_PAX_USERCOPY
73515 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
73516 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
73517 +#else
73518 c = slob_alloc(sizeof(struct kmem_cache),
73519 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
73520 +#endif
73521
73522 if (c) {
73523 c->name = name;
73524 @@ -614,17 +717,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
73525
73526 lockdep_trace_alloc(flags);
73527
73528 +#ifdef CONFIG_PAX_USERCOPY
73529 + b = __kmalloc_node_align(c->size, flags, node, c->align);
73530 +#else
73531 if (c->size < PAGE_SIZE) {
73532 b = slob_alloc(c->size, flags, c->align, node);
73533 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
73534 SLOB_UNITS(c->size) * SLOB_UNIT,
73535 flags, node);
73536 } else {
73537 + struct slob_page *sp;
73538 +
73539 b = slob_new_pages(flags, get_order(c->size), node);
73540 + sp = slob_page(b);
73541 + sp->size = c->size;
73542 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
73543 PAGE_SIZE << get_order(c->size),
73544 flags, node);
73545 }
73546 +#endif
73547
73548 if (c->ctor)
73549 c->ctor(b);
73550 @@ -636,10 +747,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
73551
73552 static void __kmem_cache_free(void *b, int size)
73553 {
73554 - if (size < PAGE_SIZE)
73555 + struct slob_page *sp = slob_page(b);
73556 +
73557 + if (is_slob_page(sp))
73558 slob_free(b, size);
73559 - else
73560 + else {
73561 + clear_slob_page(sp);
73562 + free_slob_page(sp);
73563 + sp->size = 0;
73564 slob_free_pages(b, get_order(size));
73565 + }
73566 }
73567
73568 static void kmem_rcu_free(struct rcu_head *head)
73569 @@ -652,17 +769,31 @@ static void kmem_rcu_free(struct rcu_head *head)
73570
73571 void kmem_cache_free(struct kmem_cache *c, void *b)
73572 {
73573 + int size = c->size;
73574 +
73575 +#ifdef CONFIG_PAX_USERCOPY
73576 + if (size + c->align < PAGE_SIZE) {
73577 + size += c->align;
73578 + b -= c->align;
73579 + }
73580 +#endif
73581 +
73582 kmemleak_free_recursive(b, c->flags);
73583 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
73584 struct slob_rcu *slob_rcu;
73585 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
73586 - slob_rcu->size = c->size;
73587 + slob_rcu = b + (size - sizeof(struct slob_rcu));
73588 + slob_rcu->size = size;
73589 call_rcu(&slob_rcu->head, kmem_rcu_free);
73590 } else {
73591 - __kmem_cache_free(b, c->size);
73592 + __kmem_cache_free(b, size);
73593 }
73594
73595 +#ifdef CONFIG_PAX_USERCOPY
73596 + trace_kfree(_RET_IP_, b);
73597 +#else
73598 trace_kmem_cache_free(_RET_IP_, b);
73599 +#endif
73600 +
73601 }
73602 EXPORT_SYMBOL(kmem_cache_free);
73603
73604 diff --git a/mm/slub.c b/mm/slub.c
73605 index 71de9b5..8248521 100644
73606 --- a/mm/slub.c
73607 +++ b/mm/slub.c
73608 @@ -209,7 +209,7 @@ struct track {
73609
73610 enum track_item { TRACK_ALLOC, TRACK_FREE };
73611
73612 -#ifdef CONFIG_SYSFS
73613 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73614 static int sysfs_slab_add(struct kmem_cache *);
73615 static int sysfs_slab_alias(struct kmem_cache *, const char *);
73616 static void sysfs_slab_remove(struct kmem_cache *);
73617 @@ -538,7 +538,7 @@ static void print_track(const char *s, struct track *t)
73618 if (!t->addr)
73619 return;
73620
73621 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
73622 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
73623 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
73624 #ifdef CONFIG_STACKTRACE
73625 {
73626 @@ -2603,6 +2603,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
73627
73628 page = virt_to_head_page(x);
73629
73630 + BUG_ON(!PageSlab(page));
73631 +
73632 slab_free(s, page, x, _RET_IP_);
73633
73634 trace_kmem_cache_free(_RET_IP_, x);
73635 @@ -2636,7 +2638,7 @@ static int slub_min_objects;
73636 * Merge control. If this is set then no merging of slab caches will occur.
73637 * (Could be removed. This was introduced to pacify the merge skeptics.)
73638 */
73639 -static int slub_nomerge;
73640 +static int slub_nomerge = 1;
73641
73642 /*
73643 * Calculate the order of allocation given an slab object size.
73644 @@ -3089,7 +3091,7 @@ static int kmem_cache_open(struct kmem_cache *s,
73645 else
73646 s->cpu_partial = 30;
73647
73648 - s->refcount = 1;
73649 + atomic_set(&s->refcount, 1);
73650 #ifdef CONFIG_NUMA
73651 s->remote_node_defrag_ratio = 1000;
73652 #endif
73653 @@ -3193,8 +3195,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
73654 void kmem_cache_destroy(struct kmem_cache *s)
73655 {
73656 down_write(&slub_lock);
73657 - s->refcount--;
73658 - if (!s->refcount) {
73659 + if (atomic_dec_and_test(&s->refcount)) {
73660 list_del(&s->list);
73661 up_write(&slub_lock);
73662 if (kmem_cache_close(s)) {
73663 @@ -3223,6 +3224,10 @@ static struct kmem_cache *kmem_cache;
73664 static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
73665 #endif
73666
73667 +#ifdef CONFIG_PAX_USERCOPY
73668 +static struct kmem_cache *kmalloc_usercopy_caches[SLUB_PAGE_SHIFT];
73669 +#endif
73670 +
73671 static int __init setup_slub_min_order(char *str)
73672 {
73673 get_option(&str, &slub_min_order);
73674 @@ -3337,6 +3342,13 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
73675 return kmalloc_dma_caches[index];
73676
73677 #endif
73678 +
73679 +#ifdef CONFIG_PAX_USERCOPY
73680 + if (flags & SLAB_USERCOPY)
73681 + return kmalloc_usercopy_caches[index];
73682 +
73683 +#endif
73684 +
73685 return kmalloc_caches[index];
73686 }
73687
73688 @@ -3405,6 +3417,77 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
73689 EXPORT_SYMBOL(__kmalloc_node);
73690 #endif
73691
73692 +bool is_usercopy_alloc(const void *ptr)
73693 +{
73694 +#ifdef CONFIG_PAX_USERCOPY
73695 + struct page *page;
73696 + struct kmem_cache *s;
73697 +
73698 + if (ZERO_OR_NULL_PTR(ptr))
73699 + return false;
73700 +
73701 + if (!virt_addr_valid(ptr))
73702 + return false;
73703 +
73704 + page = virt_to_head_page(ptr);
73705 +
73706 + if (!PageSlab(page))
73707 + return false;
73708 +
73709 + s = page->slab;
73710 + if (!(s->flags & SLAB_USERCOPY))
73711 + return false;
73712 +
73713 + return true;
73714 +#endif
73715 +
73716 + return false;
73717 +}
73718 +
73719 +void check_object_size(const void *ptr, unsigned long n, bool to)
73720 +{
73721 +
73722 +#ifdef CONFIG_PAX_USERCOPY
73723 + struct page *page;
73724 + struct kmem_cache *s = NULL;
73725 + unsigned long offset;
73726 + const char *type;
73727 +
73728 + if (!n)
73729 + return;
73730 +
73731 + type = "<null>";
73732 + if (ZERO_OR_NULL_PTR(ptr))
73733 + goto report;
73734 +
73735 + if (!virt_addr_valid(ptr))
73736 + return;
73737 +
73738 + page = virt_to_head_page(ptr);
73739 +
73740 + type = "<process stack>";
73741 + if (!PageSlab(page)) {
73742 + if (object_is_on_stack(ptr, n) == -1)
73743 + goto report;
73744 + return;
73745 + }
73746 +
73747 + s = page->slab;
73748 + type = s->name;
73749 + if (!(s->flags & SLAB_USERCOPY))
73750 + goto report;
73751 +
73752 + offset = (ptr - page_address(page)) % s->size;
73753 + if (offset <= s->objsize && n <= s->objsize - offset)
73754 + return;
73755 +
73756 +report:
73757 + pax_report_usercopy(ptr, n, to, type);
73758 +#endif
73759 +
73760 +}
73761 +EXPORT_SYMBOL(check_object_size);
73762 +
73763 size_t ksize(const void *object)
73764 {
73765 struct page *page;
73766 @@ -3679,7 +3762,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
73767 int node;
73768
73769 list_add(&s->list, &slab_caches);
73770 - s->refcount = -1;
73771 + atomic_set(&s->refcount, -1);
73772
73773 for_each_node_state(node, N_NORMAL_MEMORY) {
73774 struct kmem_cache_node *n = get_node(s, node);
73775 @@ -3799,17 +3882,17 @@ void __init kmem_cache_init(void)
73776
73777 /* Caches that are not of the two-to-the-power-of size */
73778 if (KMALLOC_MIN_SIZE <= 32) {
73779 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
73780 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
73781 caches++;
73782 }
73783
73784 if (KMALLOC_MIN_SIZE <= 64) {
73785 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
73786 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
73787 caches++;
73788 }
73789
73790 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
73791 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
73792 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
73793 caches++;
73794 }
73795
73796 @@ -3851,6 +3934,22 @@ void __init kmem_cache_init(void)
73797 }
73798 }
73799 #endif
73800 +
73801 +#ifdef CONFIG_PAX_USERCOPY
73802 + for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
73803 + struct kmem_cache *s = kmalloc_caches[i];
73804 +
73805 + if (s && s->size) {
73806 + char *name = kasprintf(GFP_NOWAIT,
73807 + "usercopy-kmalloc-%d", s->objsize);
73808 +
73809 + BUG_ON(!name);
73810 + kmalloc_usercopy_caches[i] = create_kmalloc_cache(name,
73811 + s->objsize, SLAB_USERCOPY);
73812 + }
73813 + }
73814 +#endif
73815 +
73816 printk(KERN_INFO
73817 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
73818 " CPUs=%d, Nodes=%d\n",
73819 @@ -3877,7 +3976,7 @@ static int slab_unmergeable(struct kmem_cache *s)
73820 /*
73821 * We may have set a slab to be unmergeable during bootstrap.
73822 */
73823 - if (s->refcount < 0)
73824 + if (atomic_read(&s->refcount) < 0)
73825 return 1;
73826
73827 return 0;
73828 @@ -3936,7 +4035,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73829 down_write(&slub_lock);
73830 s = find_mergeable(size, align, flags, name, ctor);
73831 if (s) {
73832 - s->refcount++;
73833 + atomic_inc(&s->refcount);
73834 /*
73835 * Adjust the object sizes so that we clear
73836 * the complete object on kzalloc.
73837 @@ -3945,7 +4044,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
73838 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
73839
73840 if (sysfs_slab_alias(s, name)) {
73841 - s->refcount--;
73842 + atomic_dec(&s->refcount);
73843 goto err;
73844 }
73845 up_write(&slub_lock);
73846 @@ -4074,7 +4173,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
73847 }
73848 #endif
73849
73850 -#ifdef CONFIG_SYSFS
73851 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73852 static int count_inuse(struct page *page)
73853 {
73854 return page->inuse;
73855 @@ -4461,12 +4560,12 @@ static void resiliency_test(void)
73856 validate_slab_cache(kmalloc_caches[9]);
73857 }
73858 #else
73859 -#ifdef CONFIG_SYSFS
73860 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73861 static void resiliency_test(void) {};
73862 #endif
73863 #endif
73864
73865 -#ifdef CONFIG_SYSFS
73866 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73867 enum slab_stat_type {
73868 SL_ALL, /* All slabs */
73869 SL_PARTIAL, /* Only partially allocated slabs */
73870 @@ -4709,7 +4808,7 @@ SLAB_ATTR_RO(ctor);
73871
73872 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
73873 {
73874 - return sprintf(buf, "%d\n", s->refcount - 1);
73875 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
73876 }
73877 SLAB_ATTR_RO(aliases);
73878
73879 @@ -5280,6 +5379,7 @@ static char *create_unique_id(struct kmem_cache *s)
73880 return name;
73881 }
73882
73883 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73884 static int sysfs_slab_add(struct kmem_cache *s)
73885 {
73886 int err;
73887 @@ -5342,6 +5442,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
73888 kobject_del(&s->kobj);
73889 kobject_put(&s->kobj);
73890 }
73891 +#endif
73892
73893 /*
73894 * Need to buffer aliases during bootup until sysfs becomes
73895 @@ -5355,6 +5456,7 @@ struct saved_alias {
73896
73897 static struct saved_alias *alias_list;
73898
73899 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73900 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
73901 {
73902 struct saved_alias *al;
73903 @@ -5377,6 +5479,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
73904 alias_list = al;
73905 return 0;
73906 }
73907 +#endif
73908
73909 static int __init slab_sysfs_init(void)
73910 {
73911 diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
73912 index 1b7e22a..3fcd4f3 100644
73913 --- a/mm/sparse-vmemmap.c
73914 +++ b/mm/sparse-vmemmap.c
73915 @@ -128,7 +128,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
73916 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
73917 if (!p)
73918 return NULL;
73919 - pud_populate(&init_mm, pud, p);
73920 + pud_populate_kernel(&init_mm, pud, p);
73921 }
73922 return pud;
73923 }
73924 @@ -140,7 +140,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
73925 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
73926 if (!p)
73927 return NULL;
73928 - pgd_populate(&init_mm, pgd, p);
73929 + pgd_populate_kernel(&init_mm, pgd, p);
73930 }
73931 return pgd;
73932 }
73933 diff --git a/mm/swap.c b/mm/swap.c
73934 index 5c13f13..f1cfc13 100644
73935 --- a/mm/swap.c
73936 +++ b/mm/swap.c
73937 @@ -30,6 +30,7 @@
73938 #include <linux/backing-dev.h>
73939 #include <linux/memcontrol.h>
73940 #include <linux/gfp.h>
73941 +#include <linux/hugetlb.h>
73942
73943 #include "internal.h"
73944
73945 @@ -70,6 +71,8 @@ static void __put_compound_page(struct page *page)
73946
73947 __page_cache_release(page);
73948 dtor = get_compound_page_dtor(page);
73949 + if (!PageHuge(page))
73950 + BUG_ON(dtor != free_compound_page);
73951 (*dtor)(page);
73952 }
73953
73954 diff --git a/mm/swapfile.c b/mm/swapfile.c
73955 index 38186d9..bfba6d3 100644
73956 --- a/mm/swapfile.c
73957 +++ b/mm/swapfile.c
73958 @@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
73959
73960 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
73961 /* Activity counter to indicate that a swapon or swapoff has occurred */
73962 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
73963 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
73964
73965 static inline unsigned char swap_count(unsigned char ent)
73966 {
73967 @@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
73968 }
73969 filp_close(swap_file, NULL);
73970 err = 0;
73971 - atomic_inc(&proc_poll_event);
73972 + atomic_inc_unchecked(&proc_poll_event);
73973 wake_up_interruptible(&proc_poll_wait);
73974
73975 out_dput:
73976 @@ -1687,8 +1687,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
73977
73978 poll_wait(file, &proc_poll_wait, wait);
73979
73980 - if (seq->poll_event != atomic_read(&proc_poll_event)) {
73981 - seq->poll_event = atomic_read(&proc_poll_event);
73982 + if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
73983 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
73984 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
73985 }
73986
73987 @@ -1786,7 +1786,7 @@ static int swaps_open(struct inode *inode, struct file *file)
73988 return ret;
73989
73990 seq = file->private_data;
73991 - seq->poll_event = atomic_read(&proc_poll_event);
73992 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
73993 return 0;
73994 }
73995
73996 @@ -2123,7 +2123,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
73997 (p->flags & SWP_DISCARDABLE) ? "D" : "");
73998
73999 mutex_unlock(&swapon_mutex);
74000 - atomic_inc(&proc_poll_event);
74001 + atomic_inc_unchecked(&proc_poll_event);
74002 wake_up_interruptible(&proc_poll_wait);
74003
74004 if (S_ISREG(inode->i_mode))
74005 diff --git a/mm/util.c b/mm/util.c
74006 index ae962b3..0bba886 100644
74007 --- a/mm/util.c
74008 +++ b/mm/util.c
74009 @@ -284,6 +284,12 @@ done:
74010 void arch_pick_mmap_layout(struct mm_struct *mm)
74011 {
74012 mm->mmap_base = TASK_UNMAPPED_BASE;
74013 +
74014 +#ifdef CONFIG_PAX_RANDMMAP
74015 + if (mm->pax_flags & MF_PAX_RANDMMAP)
74016 + mm->mmap_base += mm->delta_mmap;
74017 +#endif
74018 +
74019 mm->get_unmapped_area = arch_get_unmapped_area;
74020 mm->unmap_area = arch_unmap_area;
74021 }
74022 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
74023 index 1196c77..2e608e8 100644
74024 --- a/mm/vmalloc.c
74025 +++ b/mm/vmalloc.c
74026 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
74027
74028 pte = pte_offset_kernel(pmd, addr);
74029 do {
74030 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
74031 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
74032 +
74033 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
74034 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
74035 + BUG_ON(!pte_exec(*pte));
74036 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
74037 + continue;
74038 + }
74039 +#endif
74040 +
74041 + {
74042 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
74043 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
74044 + }
74045 } while (pte++, addr += PAGE_SIZE, addr != end);
74046 }
74047
74048 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
74049 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
74050 {
74051 pte_t *pte;
74052 + int ret = -ENOMEM;
74053
74054 /*
74055 * nr is a running index into the array which helps higher level
74056 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
74057 pte = pte_alloc_kernel(pmd, addr);
74058 if (!pte)
74059 return -ENOMEM;
74060 +
74061 + pax_open_kernel();
74062 do {
74063 struct page *page = pages[*nr];
74064
74065 - if (WARN_ON(!pte_none(*pte)))
74066 - return -EBUSY;
74067 - if (WARN_ON(!page))
74068 - return -ENOMEM;
74069 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
74070 + if (pgprot_val(prot) & _PAGE_NX)
74071 +#endif
74072 +
74073 + if (WARN_ON(!pte_none(*pte))) {
74074 + ret = -EBUSY;
74075 + goto out;
74076 + }
74077 + if (WARN_ON(!page)) {
74078 + ret = -ENOMEM;
74079 + goto out;
74080 + }
74081 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
74082 (*nr)++;
74083 } while (pte++, addr += PAGE_SIZE, addr != end);
74084 - return 0;
74085 + ret = 0;
74086 +out:
74087 + pax_close_kernel();
74088 + return ret;
74089 }
74090
74091 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
74092 @@ -119,7 +144,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
74093 pmd_t *pmd;
74094 unsigned long next;
74095
74096 - pmd = pmd_alloc(&init_mm, pud, addr);
74097 + pmd = pmd_alloc_kernel(&init_mm, pud, addr);
74098 if (!pmd)
74099 return -ENOMEM;
74100 do {
74101 @@ -136,7 +161,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
74102 pud_t *pud;
74103 unsigned long next;
74104
74105 - pud = pud_alloc(&init_mm, pgd, addr);
74106 + pud = pud_alloc_kernel(&init_mm, pgd, addr);
74107 if (!pud)
74108 return -ENOMEM;
74109 do {
74110 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
74111 * and fall back on vmalloc() if that fails. Others
74112 * just put it in the vmalloc space.
74113 */
74114 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
74115 +#ifdef CONFIG_MODULES
74116 +#ifdef MODULES_VADDR
74117 unsigned long addr = (unsigned long)x;
74118 if (addr >= MODULES_VADDR && addr < MODULES_END)
74119 return 1;
74120 #endif
74121 +
74122 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
74123 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
74124 + return 1;
74125 +#endif
74126 +
74127 +#endif
74128 +
74129 return is_vmalloc_addr(x);
74130 }
74131
74132 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
74133
74134 if (!pgd_none(*pgd)) {
74135 pud_t *pud = pud_offset(pgd, addr);
74136 +#ifdef CONFIG_X86
74137 + if (!pud_large(*pud))
74138 +#endif
74139 if (!pud_none(*pud)) {
74140 pmd_t *pmd = pmd_offset(pud, addr);
74141 +#ifdef CONFIG_X86
74142 + if (!pmd_large(*pmd))
74143 +#endif
74144 if (!pmd_none(*pmd)) {
74145 pte_t *ptep, pte;
74146
74147 @@ -332,6 +372,10 @@ static void purge_vmap_area_lazy(void);
74148 static struct vmap_area *alloc_vmap_area(unsigned long size,
74149 unsigned long align,
74150 unsigned long vstart, unsigned long vend,
74151 + int node, gfp_t gfp_mask) __size_overflow(1);
74152 +static struct vmap_area *alloc_vmap_area(unsigned long size,
74153 + unsigned long align,
74154 + unsigned long vstart, unsigned long vend,
74155 int node, gfp_t gfp_mask)
74156 {
74157 struct vmap_area *va;
74158 @@ -1320,6 +1364,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
74159 struct vm_struct *area;
74160
74161 BUG_ON(in_interrupt());
74162 +
74163 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
74164 + if (flags & VM_KERNEXEC) {
74165 + if (start != VMALLOC_START || end != VMALLOC_END)
74166 + return NULL;
74167 + start = (unsigned long)MODULES_EXEC_VADDR;
74168 + end = (unsigned long)MODULES_EXEC_END;
74169 + }
74170 +#endif
74171 +
74172 if (flags & VM_IOREMAP) {
74173 int bit = fls(size);
74174
74175 @@ -1552,6 +1606,11 @@ void *vmap(struct page **pages, unsigned int count,
74176 if (count > totalram_pages)
74177 return NULL;
74178
74179 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
74180 + if (!(pgprot_val(prot) & _PAGE_NX))
74181 + flags |= VM_KERNEXEC;
74182 +#endif
74183 +
74184 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
74185 __builtin_return_address(0));
74186 if (!area)
74187 @@ -1653,6 +1712,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
74188 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
74189 goto fail;
74190
74191 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
74192 + if (!(pgprot_val(prot) & _PAGE_NX))
74193 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
74194 + VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
74195 + else
74196 +#endif
74197 +
74198 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
74199 start, end, node, gfp_mask, caller);
74200 if (!area)
74201 @@ -1826,10 +1892,9 @@ EXPORT_SYMBOL(vzalloc_node);
74202 * For tight control over page level allocator and protection flags
74203 * use __vmalloc() instead.
74204 */
74205 -
74206 void *vmalloc_exec(unsigned long size)
74207 {
74208 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
74209 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
74210 -1, __builtin_return_address(0));
74211 }
74212
74213 @@ -2124,6 +2189,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
74214 unsigned long uaddr = vma->vm_start;
74215 unsigned long usize = vma->vm_end - vma->vm_start;
74216
74217 + BUG_ON(vma->vm_mirror);
74218 +
74219 if ((PAGE_SIZE-1) & (unsigned long)addr)
74220 return -EINVAL;
74221
74222 @@ -2376,8 +2443,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
74223 return NULL;
74224 }
74225
74226 - vms = kzalloc(sizeof(vms[0]) * nr_vms, GFP_KERNEL);
74227 - vas = kzalloc(sizeof(vas[0]) * nr_vms, GFP_KERNEL);
74228 + vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
74229 + vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
74230 if (!vas || !vms)
74231 goto err_free2;
74232
74233 diff --git a/mm/vmscan.c b/mm/vmscan.c
74234 index 4607cc6..be5bc0a 100644
74235 --- a/mm/vmscan.c
74236 +++ b/mm/vmscan.c
74237 @@ -3013,7 +3013,10 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
74238 * them before going back to sleep.
74239 */
74240 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
74241 - schedule();
74242 +
74243 + if (!kthread_should_stop())
74244 + schedule();
74245 +
74246 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
74247 } else {
74248 if (remaining)
74249 diff --git a/mm/vmstat.c b/mm/vmstat.c
74250 index 7db1b9b..e9f6b07 100644
74251 --- a/mm/vmstat.c
74252 +++ b/mm/vmstat.c
74253 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
74254 *
74255 * vm_stat contains the global counters
74256 */
74257 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
74258 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
74259 EXPORT_SYMBOL(vm_stat);
74260
74261 #ifdef CONFIG_SMP
74262 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
74263 v = p->vm_stat_diff[i];
74264 p->vm_stat_diff[i] = 0;
74265 local_irq_restore(flags);
74266 - atomic_long_add(v, &zone->vm_stat[i]);
74267 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
74268 global_diff[i] += v;
74269 #ifdef CONFIG_NUMA
74270 /* 3 seconds idle till flush */
74271 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
74272
74273 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
74274 if (global_diff[i])
74275 - atomic_long_add(global_diff[i], &vm_stat[i]);
74276 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
74277 }
74278
74279 #endif
74280 @@ -1208,10 +1208,20 @@ static int __init setup_vmstat(void)
74281 start_cpu_timer(cpu);
74282 #endif
74283 #ifdef CONFIG_PROC_FS
74284 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
74285 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
74286 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
74287 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
74288 + {
74289 + mode_t gr_mode = S_IRUGO;
74290 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
74291 + gr_mode = S_IRUSR;
74292 +#endif
74293 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
74294 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
74295 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
74296 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
74297 +#else
74298 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
74299 +#endif
74300 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
74301 + }
74302 #endif
74303 return 0;
74304 }
74305 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
74306 index efea35b..9c8dd0b 100644
74307 --- a/net/8021q/vlan.c
74308 +++ b/net/8021q/vlan.c
74309 @@ -554,8 +554,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
74310 err = -EPERM;
74311 if (!capable(CAP_NET_ADMIN))
74312 break;
74313 - if ((args.u.name_type >= 0) &&
74314 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
74315 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
74316 struct vlan_net *vn;
74317
74318 vn = net_generic(net, vlan_net_id);
74319 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
74320 index fccae26..e7ece2f 100644
74321 --- a/net/9p/trans_fd.c
74322 +++ b/net/9p/trans_fd.c
74323 @@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
74324 oldfs = get_fs();
74325 set_fs(get_ds());
74326 /* The cast to a user pointer is valid due to the set_fs() */
74327 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
74328 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
74329 set_fs(oldfs);
74330
74331 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
74332 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
74333 index 876fbe8..8bbea9f 100644
74334 --- a/net/atm/atm_misc.c
74335 +++ b/net/atm/atm_misc.c
74336 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
74337 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
74338 return 1;
74339 atm_return(vcc, truesize);
74340 - atomic_inc(&vcc->stats->rx_drop);
74341 + atomic_inc_unchecked(&vcc->stats->rx_drop);
74342 return 0;
74343 }
74344 EXPORT_SYMBOL(atm_charge);
74345 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
74346 }
74347 }
74348 atm_return(vcc, guess);
74349 - atomic_inc(&vcc->stats->rx_drop);
74350 + atomic_inc_unchecked(&vcc->stats->rx_drop);
74351 return NULL;
74352 }
74353 EXPORT_SYMBOL(atm_alloc_charge);
74354 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
74355
74356 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
74357 {
74358 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
74359 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
74360 __SONET_ITEMS
74361 #undef __HANDLE_ITEM
74362 }
74363 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
74364
74365 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
74366 {
74367 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
74368 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
74369 __SONET_ITEMS
74370 #undef __HANDLE_ITEM
74371 }
74372 diff --git a/net/atm/lec.h b/net/atm/lec.h
74373 index dfc0719..47c5322 100644
74374 --- a/net/atm/lec.h
74375 +++ b/net/atm/lec.h
74376 @@ -48,7 +48,7 @@ struct lane2_ops {
74377 const u8 *tlvs, u32 sizeoftlvs);
74378 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
74379 const u8 *tlvs, u32 sizeoftlvs);
74380 -};
74381 +} __no_const;
74382
74383 /*
74384 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
74385 diff --git a/net/atm/mpc.h b/net/atm/mpc.h
74386 index 0919a88..a23d54e 100644
74387 --- a/net/atm/mpc.h
74388 +++ b/net/atm/mpc.h
74389 @@ -33,7 +33,7 @@ struct mpoa_client {
74390 struct mpc_parameters parameters; /* parameters for this client */
74391
74392 const struct net_device_ops *old_ops;
74393 - struct net_device_ops new_ops;
74394 + net_device_ops_no_const new_ops;
74395 };
74396
74397
74398 diff --git a/net/atm/proc.c b/net/atm/proc.c
74399 index 0d020de..011c7bb 100644
74400 --- a/net/atm/proc.c
74401 +++ b/net/atm/proc.c
74402 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
74403 const struct k_atm_aal_stats *stats)
74404 {
74405 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
74406 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
74407 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
74408 - atomic_read(&stats->rx_drop));
74409 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
74410 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
74411 + atomic_read_unchecked(&stats->rx_drop));
74412 }
74413
74414 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
74415 diff --git a/net/atm/resources.c b/net/atm/resources.c
74416 index 23f45ce..c748f1a 100644
74417 --- a/net/atm/resources.c
74418 +++ b/net/atm/resources.c
74419 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
74420 static void copy_aal_stats(struct k_atm_aal_stats *from,
74421 struct atm_aal_stats *to)
74422 {
74423 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
74424 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
74425 __AAL_STAT_ITEMS
74426 #undef __HANDLE_ITEM
74427 }
74428 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
74429 static void subtract_aal_stats(struct k_atm_aal_stats *from,
74430 struct atm_aal_stats *to)
74431 {
74432 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
74433 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
74434 __AAL_STAT_ITEMS
74435 #undef __HANDLE_ITEM
74436 }
74437 diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
74438 index a6d5d63..1cc6c2b 100644
74439 --- a/net/batman-adv/bat_iv_ogm.c
74440 +++ b/net/batman-adv/bat_iv_ogm.c
74441 @@ -539,7 +539,7 @@ static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
74442
74443 /* change sequence number to network order */
74444 batman_ogm_packet->seqno =
74445 - htonl((uint32_t)atomic_read(&hard_iface->seqno));
74446 + htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
74447
74448 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
74449 batman_ogm_packet->tt_crc = htons((uint16_t)
74450 @@ -559,7 +559,7 @@ static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
74451 else
74452 batman_ogm_packet->gw_flags = NO_FLAGS;
74453
74454 - atomic_inc(&hard_iface->seqno);
74455 + atomic_inc_unchecked(&hard_iface->seqno);
74456
74457 slide_own_bcast_window(hard_iface);
74458 bat_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
74459 @@ -917,7 +917,7 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
74460 return;
74461
74462 /* could be changed by schedule_own_packet() */
74463 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
74464 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
74465
74466 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
74467
74468 diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
74469 index 3778977..f6a9450 100644
74470 --- a/net/batman-adv/hard-interface.c
74471 +++ b/net/batman-adv/hard-interface.c
74472 @@ -328,8 +328,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
74473 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
74474 dev_add_pack(&hard_iface->batman_adv_ptype);
74475
74476 - atomic_set(&hard_iface->seqno, 1);
74477 - atomic_set(&hard_iface->frag_seqno, 1);
74478 + atomic_set_unchecked(&hard_iface->seqno, 1);
74479 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
74480 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
74481 hard_iface->net_dev->name);
74482
74483 diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
74484 index a5590f4..8d31969 100644
74485 --- a/net/batman-adv/soft-interface.c
74486 +++ b/net/batman-adv/soft-interface.c
74487 @@ -645,7 +645,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
74488
74489 /* set broadcast sequence number */
74490 bcast_packet->seqno =
74491 - htonl(atomic_inc_return(&bat_priv->bcast_seqno));
74492 + htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
74493
74494 add_bcast_packet_to_list(bat_priv, skb, 1);
74495
74496 @@ -841,7 +841,7 @@ struct net_device *softif_create(const char *name)
74497 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
74498
74499 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
74500 - atomic_set(&bat_priv->bcast_seqno, 1);
74501 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
74502 atomic_set(&bat_priv->ttvn, 0);
74503 atomic_set(&bat_priv->tt_local_changes, 0);
74504 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
74505 diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
74506 index 302efb5..1590365 100644
74507 --- a/net/batman-adv/types.h
74508 +++ b/net/batman-adv/types.h
74509 @@ -38,8 +38,8 @@ struct hard_iface {
74510 int16_t if_num;
74511 char if_status;
74512 struct net_device *net_dev;
74513 - atomic_t seqno;
74514 - atomic_t frag_seqno;
74515 + atomic_unchecked_t seqno;
74516 + atomic_unchecked_t frag_seqno;
74517 unsigned char *packet_buff;
74518 int packet_len;
74519 struct kobject *hardif_obj;
74520 @@ -155,7 +155,7 @@ struct bat_priv {
74521 atomic_t orig_interval; /* uint */
74522 atomic_t hop_penalty; /* uint */
74523 atomic_t log_level; /* uint */
74524 - atomic_t bcast_seqno;
74525 + atomic_unchecked_t bcast_seqno;
74526 atomic_t bcast_queue_left;
74527 atomic_t batman_queue_left;
74528 atomic_t ttvn; /* translation table version number */
74529 diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
74530 index 676f6a6..3b4e668 100644
74531 --- a/net/batman-adv/unicast.c
74532 +++ b/net/batman-adv/unicast.c
74533 @@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
74534 frag1->flags = UNI_FRAG_HEAD | large_tail;
74535 frag2->flags = large_tail;
74536
74537 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
74538 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
74539 frag1->seqno = htons(seqno - 1);
74540 frag2->seqno = htons(seqno);
74541
74542 diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
74543 index 5238b6b..c9798ce 100644
74544 --- a/net/bluetooth/hci_conn.c
74545 +++ b/net/bluetooth/hci_conn.c
74546 @@ -233,7 +233,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
74547 memset(&cp, 0, sizeof(cp));
74548
74549 cp.handle = cpu_to_le16(conn->handle);
74550 - memcpy(cp.ltk, ltk, sizeof(ltk));
74551 + memcpy(cp.ltk, ltk, sizeof(cp.ltk));
74552
74553 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
74554 }
74555 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
74556 index 6f9c25b..d19fd66 100644
74557 --- a/net/bluetooth/l2cap_core.c
74558 +++ b/net/bluetooth/l2cap_core.c
74559 @@ -2466,8 +2466,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
74560 break;
74561
74562 case L2CAP_CONF_RFC:
74563 - if (olen == sizeof(rfc))
74564 - memcpy(&rfc, (void *)val, olen);
74565 + if (olen != sizeof(rfc))
74566 + break;
74567 +
74568 + memcpy(&rfc, (void *)val, olen);
74569
74570 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
74571 rfc.mode != chan->mode)
74572 @@ -2585,8 +2587,10 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
74573
74574 switch (type) {
74575 case L2CAP_CONF_RFC:
74576 - if (olen == sizeof(rfc))
74577 - memcpy(&rfc, (void *)val, olen);
74578 + if (olen != sizeof(rfc))
74579 + break;
74580 +
74581 + memcpy(&rfc, (void *)val, olen);
74582 goto done;
74583 }
74584 }
74585 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
74586 index 5fe2ff3..10968b5 100644
74587 --- a/net/bridge/netfilter/ebtables.c
74588 +++ b/net/bridge/netfilter/ebtables.c
74589 @@ -1523,7 +1523,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
74590 tmp.valid_hooks = t->table->valid_hooks;
74591 }
74592 mutex_unlock(&ebt_mutex);
74593 - if (copy_to_user(user, &tmp, *len) != 0){
74594 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
74595 BUGPRINT("c2u Didn't work\n");
74596 ret = -EFAULT;
74597 break;
74598 diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
74599 index aa6f716..7bf4c21 100644
74600 --- a/net/caif/caif_dev.c
74601 +++ b/net/caif/caif_dev.c
74602 @@ -562,9 +562,9 @@ static int __init caif_device_init(void)
74603
74604 static void __exit caif_device_exit(void)
74605 {
74606 - unregister_pernet_subsys(&caif_net_ops);
74607 unregister_netdevice_notifier(&caif_device_notifier);
74608 dev_remove_pack(&caif_packet_type);
74609 + unregister_pernet_subsys(&caif_net_ops);
74610 }
74611
74612 module_init(caif_device_init);
74613 diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
74614 index 5cf5222..6f704ad 100644
74615 --- a/net/caif/cfctrl.c
74616 +++ b/net/caif/cfctrl.c
74617 @@ -9,6 +9,7 @@
74618 #include <linux/stddef.h>
74619 #include <linux/spinlock.h>
74620 #include <linux/slab.h>
74621 +#include <linux/sched.h>
74622 #include <net/caif/caif_layer.h>
74623 #include <net/caif/cfpkt.h>
74624 #include <net/caif/cfctrl.h>
74625 @@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void)
74626 memset(&dev_info, 0, sizeof(dev_info));
74627 dev_info.id = 0xff;
74628 cfsrvl_init(&this->serv, 0, &dev_info, false);
74629 - atomic_set(&this->req_seq_no, 1);
74630 - atomic_set(&this->rsp_seq_no, 1);
74631 + atomic_set_unchecked(&this->req_seq_no, 1);
74632 + atomic_set_unchecked(&this->rsp_seq_no, 1);
74633 this->serv.layer.receive = cfctrl_recv;
74634 sprintf(this->serv.layer.name, "ctrl");
74635 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
74636 @@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
74637 struct cfctrl_request_info *req)
74638 {
74639 spin_lock_bh(&ctrl->info_list_lock);
74640 - atomic_inc(&ctrl->req_seq_no);
74641 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
74642 + atomic_inc_unchecked(&ctrl->req_seq_no);
74643 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
74644 list_add_tail(&req->list, &ctrl->list);
74645 spin_unlock_bh(&ctrl->info_list_lock);
74646 }
74647 @@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
74648 if (p != first)
74649 pr_warn("Requests are not received in order\n");
74650
74651 - atomic_set(&ctrl->rsp_seq_no,
74652 + atomic_set_unchecked(&ctrl->rsp_seq_no,
74653 p->sequence_no);
74654 list_del(&p->list);
74655 goto out;
74656 diff --git a/net/can/gw.c b/net/can/gw.c
74657 index 3d79b12..8de85fa 100644
74658 --- a/net/can/gw.c
74659 +++ b/net/can/gw.c
74660 @@ -96,7 +96,7 @@ struct cf_mod {
74661 struct {
74662 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
74663 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
74664 - } csumfunc;
74665 + } __no_const csumfunc;
74666 };
74667
74668
74669 diff --git a/net/compat.c b/net/compat.c
74670 index e055708..3f80795 100644
74671 --- a/net/compat.c
74672 +++ b/net/compat.c
74673 @@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
74674 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
74675 __get_user(kmsg->msg_flags, &umsg->msg_flags))
74676 return -EFAULT;
74677 - kmsg->msg_name = compat_ptr(tmp1);
74678 - kmsg->msg_iov = compat_ptr(tmp2);
74679 - kmsg->msg_control = compat_ptr(tmp3);
74680 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
74681 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
74682 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
74683 return 0;
74684 }
74685
74686 @@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
74687
74688 if (kern_msg->msg_namelen) {
74689 if (mode == VERIFY_READ) {
74690 - int err = move_addr_to_kernel(kern_msg->msg_name,
74691 + int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
74692 kern_msg->msg_namelen,
74693 kern_address);
74694 if (err < 0)
74695 @@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
74696 kern_msg->msg_name = NULL;
74697
74698 tot_len = iov_from_user_compat_to_kern(kern_iov,
74699 - (struct compat_iovec __user *)kern_msg->msg_iov,
74700 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
74701 kern_msg->msg_iovlen);
74702 if (tot_len >= 0)
74703 kern_msg->msg_iov = kern_iov;
74704 @@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
74705
74706 #define CMSG_COMPAT_FIRSTHDR(msg) \
74707 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
74708 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
74709 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
74710 (struct compat_cmsghdr __user *)NULL)
74711
74712 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
74713 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
74714 (ucmlen) <= (unsigned long) \
74715 ((mhdr)->msg_controllen - \
74716 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
74717 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
74718
74719 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
74720 struct compat_cmsghdr __user *cmsg, int cmsg_len)
74721 {
74722 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
74723 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
74724 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
74725 msg->msg_controllen)
74726 return NULL;
74727 return (struct compat_cmsghdr __user *)ptr;
74728 @@ -219,7 +219,7 @@ Efault:
74729
74730 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
74731 {
74732 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
74733 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
74734 struct compat_cmsghdr cmhdr;
74735 int cmlen;
74736
74737 @@ -275,7 +275,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
74738
74739 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
74740 {
74741 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
74742 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
74743 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
74744 int fdnum = scm->fp->count;
74745 struct file **fp = scm->fp->fp;
74746 @@ -372,7 +372,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
74747 return -EFAULT;
74748 old_fs = get_fs();
74749 set_fs(KERNEL_DS);
74750 - err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
74751 + err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
74752 set_fs(old_fs);
74753
74754 return err;
74755 @@ -433,7 +433,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
74756 len = sizeof(ktime);
74757 old_fs = get_fs();
74758 set_fs(KERNEL_DS);
74759 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
74760 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
74761 set_fs(old_fs);
74762
74763 if (!err) {
74764 @@ -576,7 +576,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74765 case MCAST_JOIN_GROUP:
74766 case MCAST_LEAVE_GROUP:
74767 {
74768 - struct compat_group_req __user *gr32 = (void *)optval;
74769 + struct compat_group_req __user *gr32 = (void __user *)optval;
74770 struct group_req __user *kgr =
74771 compat_alloc_user_space(sizeof(struct group_req));
74772 u32 interface;
74773 @@ -597,7 +597,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74774 case MCAST_BLOCK_SOURCE:
74775 case MCAST_UNBLOCK_SOURCE:
74776 {
74777 - struct compat_group_source_req __user *gsr32 = (void *)optval;
74778 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
74779 struct group_source_req __user *kgsr = compat_alloc_user_space(
74780 sizeof(struct group_source_req));
74781 u32 interface;
74782 @@ -618,7 +618,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
74783 }
74784 case MCAST_MSFILTER:
74785 {
74786 - struct compat_group_filter __user *gf32 = (void *)optval;
74787 + struct compat_group_filter __user *gf32 = (void __user *)optval;
74788 struct group_filter __user *kgf;
74789 u32 interface, fmode, numsrc;
74790
74791 @@ -656,7 +656,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
74792 char __user *optval, int __user *optlen,
74793 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
74794 {
74795 - struct compat_group_filter __user *gf32 = (void *)optval;
74796 + struct compat_group_filter __user *gf32 = (void __user *)optval;
74797 struct group_filter __user *kgf;
74798 int __user *koptlen;
74799 u32 interface, fmode, numsrc;
74800 diff --git a/net/core/datagram.c b/net/core/datagram.c
74801 index e4fbfd6..6a6ac94 100644
74802 --- a/net/core/datagram.c
74803 +++ b/net/core/datagram.c
74804 @@ -290,7 +290,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
74805 }
74806
74807 kfree_skb(skb);
74808 - atomic_inc(&sk->sk_drops);
74809 + atomic_inc_unchecked(&sk->sk_drops);
74810 sk_mem_reclaim_partial(sk);
74811
74812 return err;
74813 diff --git a/net/core/dev.c b/net/core/dev.c
74814 index 533c586..f78a55f 100644
74815 --- a/net/core/dev.c
74816 +++ b/net/core/dev.c
74817 @@ -1136,9 +1136,13 @@ void dev_load(struct net *net, const char *name)
74818 if (no_module && capable(CAP_NET_ADMIN))
74819 no_module = request_module("netdev-%s", name);
74820 if (no_module && capable(CAP_SYS_MODULE)) {
74821 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
74822 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
74823 +#else
74824 if (!request_module("%s", name))
74825 pr_err("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
74826 name);
74827 +#endif
74828 }
74829 }
74830 EXPORT_SYMBOL(dev_load);
74831 @@ -1602,7 +1606,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
74832 {
74833 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
74834 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
74835 - atomic_long_inc(&dev->rx_dropped);
74836 + atomic_long_inc_unchecked(&dev->rx_dropped);
74837 kfree_skb(skb);
74838 return NET_RX_DROP;
74839 }
74840 @@ -1612,7 +1616,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
74841 nf_reset(skb);
74842
74843 if (unlikely(!is_skb_forwardable(dev, skb))) {
74844 - atomic_long_inc(&dev->rx_dropped);
74845 + atomic_long_inc_unchecked(&dev->rx_dropped);
74846 kfree_skb(skb);
74847 return NET_RX_DROP;
74848 }
74849 @@ -2042,7 +2046,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
74850
74851 struct dev_gso_cb {
74852 void (*destructor)(struct sk_buff *skb);
74853 -};
74854 +} __no_const;
74855
74856 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
74857
74858 @@ -2877,7 +2881,7 @@ enqueue:
74859
74860 local_irq_restore(flags);
74861
74862 - atomic_long_inc(&skb->dev->rx_dropped);
74863 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
74864 kfree_skb(skb);
74865 return NET_RX_DROP;
74866 }
74867 @@ -2949,7 +2953,7 @@ int netif_rx_ni(struct sk_buff *skb)
74868 }
74869 EXPORT_SYMBOL(netif_rx_ni);
74870
74871 -static void net_tx_action(struct softirq_action *h)
74872 +static void net_tx_action(void)
74873 {
74874 struct softnet_data *sd = &__get_cpu_var(softnet_data);
74875
74876 @@ -3237,7 +3241,7 @@ ncls:
74877 if (pt_prev) {
74878 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
74879 } else {
74880 - atomic_long_inc(&skb->dev->rx_dropped);
74881 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
74882 kfree_skb(skb);
74883 /* Jamal, now you will not able to escape explaining
74884 * me how you were going to use this. :-)
74885 @@ -3797,7 +3801,7 @@ void netif_napi_del(struct napi_struct *napi)
74886 }
74887 EXPORT_SYMBOL(netif_napi_del);
74888
74889 -static void net_rx_action(struct softirq_action *h)
74890 +static void net_rx_action(void)
74891 {
74892 struct softnet_data *sd = &__get_cpu_var(softnet_data);
74893 unsigned long time_limit = jiffies + 2;
74894 @@ -4267,8 +4271,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
74895 else
74896 seq_printf(seq, "%04x", ntohs(pt->type));
74897
74898 +#ifdef CONFIG_GRKERNSEC_HIDESYM
74899 + seq_printf(seq, " %-8s %p\n",
74900 + pt->dev ? pt->dev->name : "", NULL);
74901 +#else
74902 seq_printf(seq, " %-8s %pF\n",
74903 pt->dev ? pt->dev->name : "", pt->func);
74904 +#endif
74905 }
74906
74907 return 0;
74908 @@ -5818,7 +5827,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
74909 } else {
74910 netdev_stats_to_stats64(storage, &dev->stats);
74911 }
74912 - storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
74913 + storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
74914 return storage;
74915 }
74916 EXPORT_SYMBOL(dev_get_stats);
74917 diff --git a/net/core/flow.c b/net/core/flow.c
74918 index e318c7e..168b1d0 100644
74919 --- a/net/core/flow.c
74920 +++ b/net/core/flow.c
74921 @@ -61,7 +61,7 @@ struct flow_cache {
74922 struct timer_list rnd_timer;
74923 };
74924
74925 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
74926 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
74927 EXPORT_SYMBOL(flow_cache_genid);
74928 static struct flow_cache flow_cache_global;
74929 static struct kmem_cache *flow_cachep __read_mostly;
74930 @@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
74931
74932 static int flow_entry_valid(struct flow_cache_entry *fle)
74933 {
74934 - if (atomic_read(&flow_cache_genid) != fle->genid)
74935 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
74936 return 0;
74937 if (fle->object && !fle->object->ops->check(fle->object))
74938 return 0;
74939 @@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
74940 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
74941 fcp->hash_count++;
74942 }
74943 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
74944 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
74945 flo = fle->object;
74946 if (!flo)
74947 goto ret_object;
74948 @@ -280,7 +280,7 @@ nocache:
74949 }
74950 flo = resolver(net, key, family, dir, flo, ctx);
74951 if (fle) {
74952 - fle->genid = atomic_read(&flow_cache_genid);
74953 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
74954 if (!IS_ERR(flo))
74955 fle->object = flo;
74956 else
74957 diff --git a/net/core/iovec.c b/net/core/iovec.c
74958 index 7e7aeb0..2a998cb 100644
74959 --- a/net/core/iovec.c
74960 +++ b/net/core/iovec.c
74961 @@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
74962 if (m->msg_namelen) {
74963 if (mode == VERIFY_READ) {
74964 void __user *namep;
74965 - namep = (void __user __force *) m->msg_name;
74966 + namep = (void __force_user *) m->msg_name;
74967 err = move_addr_to_kernel(namep, m->msg_namelen,
74968 address);
74969 if (err < 0)
74970 @@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
74971 }
74972
74973 size = m->msg_iovlen * sizeof(struct iovec);
74974 - if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
74975 + if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
74976 return -EFAULT;
74977
74978 m->msg_iov = iov;
74979 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
74980 index 90430b7..0032ec0 100644
74981 --- a/net/core/rtnetlink.c
74982 +++ b/net/core/rtnetlink.c
74983 @@ -56,7 +56,7 @@ struct rtnl_link {
74984 rtnl_doit_func doit;
74985 rtnl_dumpit_func dumpit;
74986 rtnl_calcit_func calcit;
74987 -};
74988 +} __no_const;
74989
74990 static DEFINE_MUTEX(rtnl_mutex);
74991
74992 diff --git a/net/core/scm.c b/net/core/scm.c
74993 index 611c5ef..88f6d6d 100644
74994 --- a/net/core/scm.c
74995 +++ b/net/core/scm.c
74996 @@ -219,7 +219,7 @@ EXPORT_SYMBOL(__scm_send);
74997 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
74998 {
74999 struct cmsghdr __user *cm
75000 - = (__force struct cmsghdr __user *)msg->msg_control;
75001 + = (struct cmsghdr __force_user *)msg->msg_control;
75002 struct cmsghdr cmhdr;
75003 int cmlen = CMSG_LEN(len);
75004 int err;
75005 @@ -242,7 +242,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
75006 err = -EFAULT;
75007 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
75008 goto out;
75009 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
75010 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
75011 goto out;
75012 cmlen = CMSG_SPACE(len);
75013 if (msg->msg_controllen < cmlen)
75014 @@ -258,7 +258,7 @@ EXPORT_SYMBOL(put_cmsg);
75015 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
75016 {
75017 struct cmsghdr __user *cm
75018 - = (__force struct cmsghdr __user*)msg->msg_control;
75019 + = (struct cmsghdr __force_user *)msg->msg_control;
75020
75021 int fdmax = 0;
75022 int fdnum = scm->fp->count;
75023 @@ -278,7 +278,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
75024 if (fdnum < fdmax)
75025 fdmax = fdnum;
75026
75027 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
75028 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
75029 i++, cmfptr++)
75030 {
75031 int new_fd;
75032 diff --git a/net/core/sock.c b/net/core/sock.c
75033 index 0f8402e..f0b6338 100644
75034 --- a/net/core/sock.c
75035 +++ b/net/core/sock.c
75036 @@ -340,7 +340,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75037 struct sk_buff_head *list = &sk->sk_receive_queue;
75038
75039 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
75040 - atomic_inc(&sk->sk_drops);
75041 + atomic_inc_unchecked(&sk->sk_drops);
75042 trace_sock_rcvqueue_full(sk, skb);
75043 return -ENOMEM;
75044 }
75045 @@ -350,7 +350,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75046 return err;
75047
75048 if (!sk_rmem_schedule(sk, skb->truesize)) {
75049 - atomic_inc(&sk->sk_drops);
75050 + atomic_inc_unchecked(&sk->sk_drops);
75051 return -ENOBUFS;
75052 }
75053
75054 @@ -370,7 +370,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75055 skb_dst_force(skb);
75056
75057 spin_lock_irqsave(&list->lock, flags);
75058 - skb->dropcount = atomic_read(&sk->sk_drops);
75059 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
75060 __skb_queue_tail(list, skb);
75061 spin_unlock_irqrestore(&list->lock, flags);
75062
75063 @@ -390,7 +390,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
75064 skb->dev = NULL;
75065
75066 if (sk_rcvqueues_full(sk, skb)) {
75067 - atomic_inc(&sk->sk_drops);
75068 + atomic_inc_unchecked(&sk->sk_drops);
75069 goto discard_and_relse;
75070 }
75071 if (nested)
75072 @@ -408,7 +408,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
75073 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
75074 } else if (sk_add_backlog(sk, skb)) {
75075 bh_unlock_sock(sk);
75076 - atomic_inc(&sk->sk_drops);
75077 + atomic_inc_unchecked(&sk->sk_drops);
75078 goto discard_and_relse;
75079 }
75080
75081 @@ -984,7 +984,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
75082 if (len > sizeof(peercred))
75083 len = sizeof(peercred);
75084 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
75085 - if (copy_to_user(optval, &peercred, len))
75086 + if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
75087 return -EFAULT;
75088 goto lenout;
75089 }
75090 @@ -997,7 +997,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
75091 return -ENOTCONN;
75092 if (lv < len)
75093 return -EINVAL;
75094 - if (copy_to_user(optval, address, len))
75095 + if (len > sizeof(address) || copy_to_user(optval, address, len))
75096 return -EFAULT;
75097 goto lenout;
75098 }
75099 @@ -1043,7 +1043,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
75100
75101 if (len > lv)
75102 len = lv;
75103 - if (copy_to_user(optval, &v, len))
75104 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
75105 return -EFAULT;
75106 lenout:
75107 if (put_user(len, optlen))
75108 @@ -2131,7 +2131,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
75109 */
75110 smp_wmb();
75111 atomic_set(&sk->sk_refcnt, 1);
75112 - atomic_set(&sk->sk_drops, 0);
75113 + atomic_set_unchecked(&sk->sk_drops, 0);
75114 }
75115 EXPORT_SYMBOL(sock_init_data);
75116
75117 diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
75118 index b9868e1..849f809 100644
75119 --- a/net/core/sock_diag.c
75120 +++ b/net/core/sock_diag.c
75121 @@ -16,20 +16,27 @@ static DEFINE_MUTEX(sock_diag_table_mutex);
75122
75123 int sock_diag_check_cookie(void *sk, __u32 *cookie)
75124 {
75125 +#ifndef CONFIG_GRKERNSEC_HIDESYM
75126 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
75127 cookie[1] != INET_DIAG_NOCOOKIE) &&
75128 ((u32)(unsigned long)sk != cookie[0] ||
75129 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
75130 return -ESTALE;
75131 else
75132 +#endif
75133 return 0;
75134 }
75135 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
75136
75137 void sock_diag_save_cookie(void *sk, __u32 *cookie)
75138 {
75139 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75140 + cookie[0] = 0;
75141 + cookie[1] = 0;
75142 +#else
75143 cookie[0] = (u32)(unsigned long)sk;
75144 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
75145 +#endif
75146 }
75147 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
75148
75149 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
75150 index 02e75d1..9a57a7c 100644
75151 --- a/net/decnet/sysctl_net_decnet.c
75152 +++ b/net/decnet/sysctl_net_decnet.c
75153 @@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
75154
75155 if (len > *lenp) len = *lenp;
75156
75157 - if (copy_to_user(buffer, addr, len))
75158 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
75159 return -EFAULT;
75160
75161 *lenp = len;
75162 @@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
75163
75164 if (len > *lenp) len = *lenp;
75165
75166 - if (copy_to_user(buffer, devname, len))
75167 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
75168 return -EFAULT;
75169
75170 *lenp = len;
75171 diff --git a/net/econet/Kconfig b/net/econet/Kconfig
75172 index 39a2d29..f39c0fe 100644
75173 --- a/net/econet/Kconfig
75174 +++ b/net/econet/Kconfig
75175 @@ -4,7 +4,7 @@
75176
75177 config ECONET
75178 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
75179 - depends on EXPERIMENTAL && INET
75180 + depends on EXPERIMENTAL && INET && BROKEN
75181 ---help---
75182 Econet is a fairly old and slow networking protocol mainly used by
75183 Acorn computers to access file and print servers. It uses native
75184 diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
75185 index c48adc5..667c1d4 100644
75186 --- a/net/ipv4/cipso_ipv4.c
75187 +++ b/net/ipv4/cipso_ipv4.c
75188 @@ -1725,8 +1725,10 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
75189 case CIPSO_V4_TAG_LOCAL:
75190 /* This is a non-standard tag that we only allow for
75191 * local connections, so if the incoming interface is
75192 - * not the loopback device drop the packet. */
75193 - if (!(skb->dev->flags & IFF_LOOPBACK)) {
75194 + * not the loopback device drop the packet. Further,
75195 + * there is no legitimate reason for setting this from
75196 + * userspace so reject it if skb is NULL. */
75197 + if (skb == NULL || !(skb->dev->flags & IFF_LOOPBACK)) {
75198 err_offset = opt_iter;
75199 goto validate_return_locked;
75200 }
75201 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
75202 index cbe3a68..a879b75 100644
75203 --- a/net/ipv4/fib_frontend.c
75204 +++ b/net/ipv4/fib_frontend.c
75205 @@ -969,12 +969,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
75206 #ifdef CONFIG_IP_ROUTE_MULTIPATH
75207 fib_sync_up(dev);
75208 #endif
75209 - atomic_inc(&net->ipv4.dev_addr_genid);
75210 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
75211 rt_cache_flush(dev_net(dev), -1);
75212 break;
75213 case NETDEV_DOWN:
75214 fib_del_ifaddr(ifa, NULL);
75215 - atomic_inc(&net->ipv4.dev_addr_genid);
75216 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
75217 if (ifa->ifa_dev->ifa_list == NULL) {
75218 /* Last address was deleted from this interface.
75219 * Disable IP.
75220 @@ -1010,7 +1010,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
75221 #ifdef CONFIG_IP_ROUTE_MULTIPATH
75222 fib_sync_up(dev);
75223 #endif
75224 - atomic_inc(&net->ipv4.dev_addr_genid);
75225 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
75226 rt_cache_flush(dev_net(dev), -1);
75227 break;
75228 case NETDEV_DOWN:
75229 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
75230 index 8861f91..ab1e3c1 100644
75231 --- a/net/ipv4/fib_semantics.c
75232 +++ b/net/ipv4/fib_semantics.c
75233 @@ -698,7 +698,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
75234 nh->nh_saddr = inet_select_addr(nh->nh_dev,
75235 nh->nh_gw,
75236 nh->nh_parent->fib_scope);
75237 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
75238 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
75239
75240 return nh->nh_saddr;
75241 }
75242 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
75243 index 984ec65..97ac518 100644
75244 --- a/net/ipv4/inet_hashtables.c
75245 +++ b/net/ipv4/inet_hashtables.c
75246 @@ -18,12 +18,15 @@
75247 #include <linux/sched.h>
75248 #include <linux/slab.h>
75249 #include <linux/wait.h>
75250 +#include <linux/security.h>
75251
75252 #include <net/inet_connection_sock.h>
75253 #include <net/inet_hashtables.h>
75254 #include <net/secure_seq.h>
75255 #include <net/ip.h>
75256
75257 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
75258 +
75259 /*
75260 * Allocate and initialize a new local port bind bucket.
75261 * The bindhash mutex for snum's hash chain must be held here.
75262 @@ -530,6 +533,8 @@ ok:
75263 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
75264 spin_unlock(&head->lock);
75265
75266 + gr_update_task_in_ip_table(current, inet_sk(sk));
75267 +
75268 if (tw) {
75269 inet_twsk_deschedule(tw, death_row);
75270 while (twrefcnt) {
75271 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
75272 index dfba343..c827d50 100644
75273 --- a/net/ipv4/inetpeer.c
75274 +++ b/net/ipv4/inetpeer.c
75275 @@ -487,8 +487,8 @@ relookup:
75276 if (p) {
75277 p->daddr = *daddr;
75278 atomic_set(&p->refcnt, 1);
75279 - atomic_set(&p->rid, 0);
75280 - atomic_set(&p->ip_id_count,
75281 + atomic_set_unchecked(&p->rid, 0);
75282 + atomic_set_unchecked(&p->ip_id_count,
75283 (daddr->family == AF_INET) ?
75284 secure_ip_id(daddr->addr.a4) :
75285 secure_ipv6_id(daddr->addr.a6));
75286 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
75287 index 3727e23..517f5df 100644
75288 --- a/net/ipv4/ip_fragment.c
75289 +++ b/net/ipv4/ip_fragment.c
75290 @@ -318,7 +318,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
75291 return 0;
75292
75293 start = qp->rid;
75294 - end = atomic_inc_return(&peer->rid);
75295 + end = atomic_inc_return_unchecked(&peer->rid);
75296 qp->rid = end;
75297
75298 rc = qp->q.fragments && (end - start) > max;
75299 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
75300 index 2fd0fba..83fac99 100644
75301 --- a/net/ipv4/ip_sockglue.c
75302 +++ b/net/ipv4/ip_sockglue.c
75303 @@ -1137,7 +1137,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
75304 len = min_t(unsigned int, len, opt->optlen);
75305 if (put_user(len, optlen))
75306 return -EFAULT;
75307 - if (copy_to_user(optval, opt->__data, len))
75308 + if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
75309 + copy_to_user(optval, opt->__data, len))
75310 return -EFAULT;
75311 return 0;
75312 }
75313 @@ -1268,7 +1269,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
75314 if (sk->sk_type != SOCK_STREAM)
75315 return -ENOPROTOOPT;
75316
75317 - msg.msg_control = optval;
75318 + msg.msg_control = (void __force_kernel *)optval;
75319 msg.msg_controllen = len;
75320 msg.msg_flags = flags;
75321
75322 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
75323 index 92ac7e7..13f93d9 100644
75324 --- a/net/ipv4/ipconfig.c
75325 +++ b/net/ipv4/ipconfig.c
75326 @@ -321,7 +321,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
75327
75328 mm_segment_t oldfs = get_fs();
75329 set_fs(get_ds());
75330 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
75331 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
75332 set_fs(oldfs);
75333 return res;
75334 }
75335 @@ -332,7 +332,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
75336
75337 mm_segment_t oldfs = get_fs();
75338 set_fs(get_ds());
75339 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
75340 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
75341 set_fs(oldfs);
75342 return res;
75343 }
75344 @@ -343,7 +343,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
75345
75346 mm_segment_t oldfs = get_fs();
75347 set_fs(get_ds());
75348 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
75349 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
75350 set_fs(oldfs);
75351 return res;
75352 }
75353 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
75354 index 50009c7..5996a9f 100644
75355 --- a/net/ipv4/ping.c
75356 +++ b/net/ipv4/ping.c
75357 @@ -838,7 +838,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
75358 sk_rmem_alloc_get(sp),
75359 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
75360 atomic_read(&sp->sk_refcnt), sp,
75361 - atomic_read(&sp->sk_drops), len);
75362 + atomic_read_unchecked(&sp->sk_drops), len);
75363 }
75364
75365 static int ping_seq_show(struct seq_file *seq, void *v)
75366 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
75367 index bbd604c..4d5469c 100644
75368 --- a/net/ipv4/raw.c
75369 +++ b/net/ipv4/raw.c
75370 @@ -304,7 +304,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
75371 int raw_rcv(struct sock *sk, struct sk_buff *skb)
75372 {
75373 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
75374 - atomic_inc(&sk->sk_drops);
75375 + atomic_inc_unchecked(&sk->sk_drops);
75376 kfree_skb(skb);
75377 return NET_RX_DROP;
75378 }
75379 @@ -740,16 +740,20 @@ static int raw_init(struct sock *sk)
75380
75381 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
75382 {
75383 + struct icmp_filter filter;
75384 +
75385 if (optlen > sizeof(struct icmp_filter))
75386 optlen = sizeof(struct icmp_filter);
75387 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
75388 + if (copy_from_user(&filter, optval, optlen))
75389 return -EFAULT;
75390 + raw_sk(sk)->filter = filter;
75391 return 0;
75392 }
75393
75394 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
75395 {
75396 int len, ret = -EFAULT;
75397 + struct icmp_filter filter;
75398
75399 if (get_user(len, optlen))
75400 goto out;
75401 @@ -759,8 +763,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
75402 if (len > sizeof(struct icmp_filter))
75403 len = sizeof(struct icmp_filter);
75404 ret = -EFAULT;
75405 - if (put_user(len, optlen) ||
75406 - copy_to_user(optval, &raw_sk(sk)->filter, len))
75407 + filter = raw_sk(sk)->filter;
75408 + if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
75409 goto out;
75410 ret = 0;
75411 out: return ret;
75412 @@ -988,7 +992,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
75413 sk_wmem_alloc_get(sp),
75414 sk_rmem_alloc_get(sp),
75415 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
75416 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
75417 + atomic_read(&sp->sk_refcnt),
75418 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75419 + NULL,
75420 +#else
75421 + sp,
75422 +#endif
75423 + atomic_read_unchecked(&sp->sk_drops));
75424 }
75425
75426 static int raw_seq_show(struct seq_file *seq, void *v)
75427 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
75428 index 167ea10..4b15883 100644
75429 --- a/net/ipv4/route.c
75430 +++ b/net/ipv4/route.c
75431 @@ -312,7 +312,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
75432
75433 static inline int rt_genid(struct net *net)
75434 {
75435 - return atomic_read(&net->ipv4.rt_genid);
75436 + return atomic_read_unchecked(&net->ipv4.rt_genid);
75437 }
75438
75439 #ifdef CONFIG_PROC_FS
75440 @@ -936,7 +936,7 @@ static void rt_cache_invalidate(struct net *net)
75441 unsigned char shuffle;
75442
75443 get_random_bytes(&shuffle, sizeof(shuffle));
75444 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
75445 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
75446 inetpeer_invalidate_tree(AF_INET);
75447 }
75448
75449 @@ -3009,7 +3009,7 @@ static int rt_fill_info(struct net *net,
75450 error = rt->dst.error;
75451 if (peer) {
75452 inet_peer_refcheck(rt->peer);
75453 - id = atomic_read(&peer->ip_id_count) & 0xffff;
75454 + id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
75455 if (peer->tcp_ts_stamp) {
75456 ts = peer->tcp_ts;
75457 tsage = get_seconds() - peer->tcp_ts_stamp;
75458 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
75459 index 0cb86ce..8e7fda8 100644
75460 --- a/net/ipv4/tcp_ipv4.c
75461 +++ b/net/ipv4/tcp_ipv4.c
75462 @@ -90,6 +90,10 @@ int sysctl_tcp_low_latency __read_mostly;
75463 EXPORT_SYMBOL(sysctl_tcp_low_latency);
75464
75465
75466 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75467 +extern int grsec_enable_blackhole;
75468 +#endif
75469 +
75470 #ifdef CONFIG_TCP_MD5SIG
75471 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
75472 __be32 daddr, __be32 saddr, const struct tcphdr *th);
75473 @@ -1641,6 +1645,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
75474 return 0;
75475
75476 reset:
75477 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75478 + if (!grsec_enable_blackhole)
75479 +#endif
75480 tcp_v4_send_reset(rsk, skb);
75481 discard:
75482 kfree_skb(skb);
75483 @@ -1703,12 +1710,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
75484 TCP_SKB_CB(skb)->sacked = 0;
75485
75486 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
75487 - if (!sk)
75488 + if (!sk) {
75489 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75490 + ret = 1;
75491 +#endif
75492 goto no_tcp_socket;
75493 -
75494 + }
75495 process:
75496 - if (sk->sk_state == TCP_TIME_WAIT)
75497 + if (sk->sk_state == TCP_TIME_WAIT) {
75498 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75499 + ret = 2;
75500 +#endif
75501 goto do_time_wait;
75502 + }
75503
75504 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
75505 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
75506 @@ -1758,6 +1772,10 @@ no_tcp_socket:
75507 bad_packet:
75508 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
75509 } else {
75510 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75511 + if (!grsec_enable_blackhole || (ret == 1 &&
75512 + (skb->dev->flags & IFF_LOOPBACK)))
75513 +#endif
75514 tcp_v4_send_reset(NULL, skb);
75515 }
75516
75517 @@ -2419,7 +2437,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
75518 0, /* non standard timer */
75519 0, /* open_requests have no inode */
75520 atomic_read(&sk->sk_refcnt),
75521 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75522 + NULL,
75523 +#else
75524 req,
75525 +#endif
75526 len);
75527 }
75528
75529 @@ -2469,7 +2491,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
75530 sock_i_uid(sk),
75531 icsk->icsk_probes_out,
75532 sock_i_ino(sk),
75533 - atomic_read(&sk->sk_refcnt), sk,
75534 + atomic_read(&sk->sk_refcnt),
75535 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75536 + NULL,
75537 +#else
75538 + sk,
75539 +#endif
75540 jiffies_to_clock_t(icsk->icsk_rto),
75541 jiffies_to_clock_t(icsk->icsk_ack.ato),
75542 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
75543 @@ -2497,7 +2524,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
75544 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
75545 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
75546 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
75547 - atomic_read(&tw->tw_refcnt), tw, len);
75548 + atomic_read(&tw->tw_refcnt),
75549 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75550 + NULL,
75551 +#else
75552 + tw,
75553 +#endif
75554 + len);
75555 }
75556
75557 #define TMPSZ 150
75558 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
75559 index 3cabafb..640525b 100644
75560 --- a/net/ipv4/tcp_minisocks.c
75561 +++ b/net/ipv4/tcp_minisocks.c
75562 @@ -27,6 +27,10 @@
75563 #include <net/inet_common.h>
75564 #include <net/xfrm.h>
75565
75566 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75567 +extern int grsec_enable_blackhole;
75568 +#endif
75569 +
75570 int sysctl_tcp_syncookies __read_mostly = 1;
75571 EXPORT_SYMBOL(sysctl_tcp_syncookies);
75572
75573 @@ -753,6 +757,10 @@ listen_overflow:
75574
75575 embryonic_reset:
75576 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
75577 +
75578 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75579 + if (!grsec_enable_blackhole)
75580 +#endif
75581 if (!(flg & TCP_FLAG_RST))
75582 req->rsk_ops->send_reset(sk, skb);
75583
75584 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
75585 index a981cdc..48f4c3a 100644
75586 --- a/net/ipv4/tcp_probe.c
75587 +++ b/net/ipv4/tcp_probe.c
75588 @@ -204,7 +204,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
75589 if (cnt + width >= len)
75590 break;
75591
75592 - if (copy_to_user(buf + cnt, tbuf, width))
75593 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
75594 return -EFAULT;
75595 cnt += width;
75596 }
75597 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
75598 index 34d4a02..3b57f86 100644
75599 --- a/net/ipv4/tcp_timer.c
75600 +++ b/net/ipv4/tcp_timer.c
75601 @@ -22,6 +22,10 @@
75602 #include <linux/gfp.h>
75603 #include <net/tcp.h>
75604
75605 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75606 +extern int grsec_lastack_retries;
75607 +#endif
75608 +
75609 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
75610 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
75611 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
75612 @@ -196,6 +200,13 @@ static int tcp_write_timeout(struct sock *sk)
75613 }
75614 }
75615
75616 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75617 + if ((sk->sk_state == TCP_LAST_ACK) &&
75618 + (grsec_lastack_retries > 0) &&
75619 + (grsec_lastack_retries < retry_until))
75620 + retry_until = grsec_lastack_retries;
75621 +#endif
75622 +
75623 if (retransmits_timed_out(sk, retry_until,
75624 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
75625 /* Has it gone just too far? */
75626 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
75627 index fe14105..0618260 100644
75628 --- a/net/ipv4/udp.c
75629 +++ b/net/ipv4/udp.c
75630 @@ -87,6 +87,7 @@
75631 #include <linux/types.h>
75632 #include <linux/fcntl.h>
75633 #include <linux/module.h>
75634 +#include <linux/security.h>
75635 #include <linux/socket.h>
75636 #include <linux/sockios.h>
75637 #include <linux/igmp.h>
75638 @@ -109,6 +110,10 @@
75639 #include <trace/events/udp.h>
75640 #include "udp_impl.h"
75641
75642 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75643 +extern int grsec_enable_blackhole;
75644 +#endif
75645 +
75646 struct udp_table udp_table __read_mostly;
75647 EXPORT_SYMBOL(udp_table);
75648
75649 @@ -567,6 +572,9 @@ found:
75650 return s;
75651 }
75652
75653 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
75654 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
75655 +
75656 /*
75657 * This routine is called by the ICMP module when it gets some
75658 * sort of error condition. If err < 0 then the socket should
75659 @@ -858,9 +866,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
75660 dport = usin->sin_port;
75661 if (dport == 0)
75662 return -EINVAL;
75663 +
75664 + err = gr_search_udp_sendmsg(sk, usin);
75665 + if (err)
75666 + return err;
75667 } else {
75668 if (sk->sk_state != TCP_ESTABLISHED)
75669 return -EDESTADDRREQ;
75670 +
75671 + err = gr_search_udp_sendmsg(sk, NULL);
75672 + if (err)
75673 + return err;
75674 +
75675 daddr = inet->inet_daddr;
75676 dport = inet->inet_dport;
75677 /* Open fast path for connected socket.
75678 @@ -1102,7 +1119,7 @@ static unsigned int first_packet_length(struct sock *sk)
75679 udp_lib_checksum_complete(skb)) {
75680 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
75681 IS_UDPLITE(sk));
75682 - atomic_inc(&sk->sk_drops);
75683 + atomic_inc_unchecked(&sk->sk_drops);
75684 __skb_unlink(skb, rcvq);
75685 __skb_queue_tail(&list_kill, skb);
75686 }
75687 @@ -1188,6 +1205,10 @@ try_again:
75688 if (!skb)
75689 goto out;
75690
75691 + err = gr_search_udp_recvmsg(sk, skb);
75692 + if (err)
75693 + goto out_free;
75694 +
75695 ulen = skb->len - sizeof(struct udphdr);
75696 copied = len;
75697 if (copied > ulen)
75698 @@ -1489,7 +1510,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
75699
75700 drop:
75701 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
75702 - atomic_inc(&sk->sk_drops);
75703 + atomic_inc_unchecked(&sk->sk_drops);
75704 kfree_skb(skb);
75705 return -1;
75706 }
75707 @@ -1508,7 +1529,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
75708 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
75709
75710 if (!skb1) {
75711 - atomic_inc(&sk->sk_drops);
75712 + atomic_inc_unchecked(&sk->sk_drops);
75713 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
75714 IS_UDPLITE(sk));
75715 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
75716 @@ -1677,6 +1698,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
75717 goto csum_error;
75718
75719 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
75720 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75721 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
75722 +#endif
75723 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
75724
75725 /*
75726 @@ -2094,8 +2118,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
75727 sk_wmem_alloc_get(sp),
75728 sk_rmem_alloc_get(sp),
75729 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
75730 - atomic_read(&sp->sk_refcnt), sp,
75731 - atomic_read(&sp->sk_drops), len);
75732 + atomic_read(&sp->sk_refcnt),
75733 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75734 + NULL,
75735 +#else
75736 + sp,
75737 +#endif
75738 + atomic_read_unchecked(&sp->sk_drops), len);
75739 }
75740
75741 int udp4_seq_show(struct seq_file *seq, void *v)
75742 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
75743 index 7d5cb97..c56564f 100644
75744 --- a/net/ipv6/addrconf.c
75745 +++ b/net/ipv6/addrconf.c
75746 @@ -2142,7 +2142,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
75747 p.iph.ihl = 5;
75748 p.iph.protocol = IPPROTO_IPV6;
75749 p.iph.ttl = 64;
75750 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
75751 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
75752
75753 if (ops->ndo_do_ioctl) {
75754 mm_segment_t oldfs = get_fs();
75755 diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
75756 index 02dd203..e03fcc9 100644
75757 --- a/net/ipv6/inet6_connection_sock.c
75758 +++ b/net/ipv6/inet6_connection_sock.c
75759 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
75760 #ifdef CONFIG_XFRM
75761 {
75762 struct rt6_info *rt = (struct rt6_info *)dst;
75763 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
75764 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
75765 }
75766 #endif
75767 }
75768 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
75769 #ifdef CONFIG_XFRM
75770 if (dst) {
75771 struct rt6_info *rt = (struct rt6_info *)dst;
75772 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
75773 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
75774 __sk_dst_reset(sk);
75775 dst = NULL;
75776 }
75777 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
75778 index 63dd1f8..e7f53ca 100644
75779 --- a/net/ipv6/ipv6_sockglue.c
75780 +++ b/net/ipv6/ipv6_sockglue.c
75781 @@ -990,7 +990,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
75782 if (sk->sk_type != SOCK_STREAM)
75783 return -ENOPROTOOPT;
75784
75785 - msg.msg_control = optval;
75786 + msg.msg_control = (void __force_kernel *)optval;
75787 msg.msg_controllen = len;
75788 msg.msg_flags = flags;
75789
75790 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
75791 index 5bddea7..82d9d67 100644
75792 --- a/net/ipv6/raw.c
75793 +++ b/net/ipv6/raw.c
75794 @@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
75795 {
75796 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
75797 skb_checksum_complete(skb)) {
75798 - atomic_inc(&sk->sk_drops);
75799 + atomic_inc_unchecked(&sk->sk_drops);
75800 kfree_skb(skb);
75801 return NET_RX_DROP;
75802 }
75803 @@ -405,7 +405,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
75804 struct raw6_sock *rp = raw6_sk(sk);
75805
75806 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
75807 - atomic_inc(&sk->sk_drops);
75808 + atomic_inc_unchecked(&sk->sk_drops);
75809 kfree_skb(skb);
75810 return NET_RX_DROP;
75811 }
75812 @@ -429,7 +429,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
75813
75814 if (inet->hdrincl) {
75815 if (skb_checksum_complete(skb)) {
75816 - atomic_inc(&sk->sk_drops);
75817 + atomic_inc_unchecked(&sk->sk_drops);
75818 kfree_skb(skb);
75819 return NET_RX_DROP;
75820 }
75821 @@ -602,7 +602,7 @@ out:
75822 return err;
75823 }
75824
75825 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
75826 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
75827 struct flowi6 *fl6, struct dst_entry **dstp,
75828 unsigned int flags)
75829 {
75830 @@ -914,12 +914,15 @@ do_confirm:
75831 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
75832 char __user *optval, int optlen)
75833 {
75834 + struct icmp6_filter filter;
75835 +
75836 switch (optname) {
75837 case ICMPV6_FILTER:
75838 if (optlen > sizeof(struct icmp6_filter))
75839 optlen = sizeof(struct icmp6_filter);
75840 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
75841 + if (copy_from_user(&filter, optval, optlen))
75842 return -EFAULT;
75843 + raw6_sk(sk)->filter = filter;
75844 return 0;
75845 default:
75846 return -ENOPROTOOPT;
75847 @@ -932,6 +935,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
75848 char __user *optval, int __user *optlen)
75849 {
75850 int len;
75851 + struct icmp6_filter filter;
75852
75853 switch (optname) {
75854 case ICMPV6_FILTER:
75855 @@ -943,7 +947,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
75856 len = sizeof(struct icmp6_filter);
75857 if (put_user(len, optlen))
75858 return -EFAULT;
75859 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
75860 + filter = raw6_sk(sk)->filter;
75861 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
75862 return -EFAULT;
75863 return 0;
75864 default:
75865 @@ -1250,7 +1255,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
75866 0, 0L, 0,
75867 sock_i_uid(sp), 0,
75868 sock_i_ino(sp),
75869 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
75870 + atomic_read(&sp->sk_refcnt),
75871 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75872 + NULL,
75873 +#else
75874 + sp,
75875 +#endif
75876 + atomic_read_unchecked(&sp->sk_drops));
75877 }
75878
75879 static int raw6_seq_show(struct seq_file *seq, void *v)
75880 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
75881 index 98256cf..7f16dbd 100644
75882 --- a/net/ipv6/tcp_ipv6.c
75883 +++ b/net/ipv6/tcp_ipv6.c
75884 @@ -94,6 +94,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
75885 }
75886 #endif
75887
75888 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75889 +extern int grsec_enable_blackhole;
75890 +#endif
75891 +
75892 static void tcp_v6_hash(struct sock *sk)
75893 {
75894 if (sk->sk_state != TCP_CLOSE) {
75895 @@ -1542,6 +1546,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
75896 return 0;
75897
75898 reset:
75899 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75900 + if (!grsec_enable_blackhole)
75901 +#endif
75902 tcp_v6_send_reset(sk, skb);
75903 discard:
75904 if (opt_skb)
75905 @@ -1623,12 +1630,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
75906 TCP_SKB_CB(skb)->sacked = 0;
75907
75908 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
75909 - if (!sk)
75910 + if (!sk) {
75911 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75912 + ret = 1;
75913 +#endif
75914 goto no_tcp_socket;
75915 + }
75916
75917 process:
75918 - if (sk->sk_state == TCP_TIME_WAIT)
75919 + if (sk->sk_state == TCP_TIME_WAIT) {
75920 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75921 + ret = 2;
75922 +#endif
75923 goto do_time_wait;
75924 + }
75925
75926 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
75927 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
75928 @@ -1676,6 +1691,10 @@ no_tcp_socket:
75929 bad_packet:
75930 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
75931 } else {
75932 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75933 + if (!grsec_enable_blackhole || (ret == 1 &&
75934 + (skb->dev->flags & IFF_LOOPBACK)))
75935 +#endif
75936 tcp_v6_send_reset(NULL, skb);
75937 }
75938
75939 @@ -1930,7 +1949,13 @@ static void get_openreq6(struct seq_file *seq,
75940 uid,
75941 0, /* non standard timer */
75942 0, /* open_requests have no inode */
75943 - 0, req);
75944 + 0,
75945 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75946 + NULL
75947 +#else
75948 + req
75949 +#endif
75950 + );
75951 }
75952
75953 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
75954 @@ -1980,7 +2005,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
75955 sock_i_uid(sp),
75956 icsk->icsk_probes_out,
75957 sock_i_ino(sp),
75958 - atomic_read(&sp->sk_refcnt), sp,
75959 + atomic_read(&sp->sk_refcnt),
75960 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75961 + NULL,
75962 +#else
75963 + sp,
75964 +#endif
75965 jiffies_to_clock_t(icsk->icsk_rto),
75966 jiffies_to_clock_t(icsk->icsk_ack.ato),
75967 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
75968 @@ -2015,7 +2045,13 @@ static void get_timewait6_sock(struct seq_file *seq,
75969 dest->s6_addr32[2], dest->s6_addr32[3], destp,
75970 tw->tw_substate, 0, 0,
75971 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
75972 - atomic_read(&tw->tw_refcnt), tw);
75973 + atomic_read(&tw->tw_refcnt),
75974 +#ifdef CONFIG_GRKERNSEC_HIDESYM
75975 + NULL
75976 +#else
75977 + tw
75978 +#endif
75979 + );
75980 }
75981
75982 static int tcp6_seq_show(struct seq_file *seq, void *v)
75983 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
75984 index 37b0699..d323408 100644
75985 --- a/net/ipv6/udp.c
75986 +++ b/net/ipv6/udp.c
75987 @@ -50,6 +50,10 @@
75988 #include <linux/seq_file.h>
75989 #include "udp_impl.h"
75990
75991 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75992 +extern int grsec_enable_blackhole;
75993 +#endif
75994 +
75995 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
75996 {
75997 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
75998 @@ -551,7 +555,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
75999
76000 return 0;
76001 drop:
76002 - atomic_inc(&sk->sk_drops);
76003 + atomic_inc_unchecked(&sk->sk_drops);
76004 drop_no_sk_drops_inc:
76005 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
76006 kfree_skb(skb);
76007 @@ -627,7 +631,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
76008 continue;
76009 }
76010 drop:
76011 - atomic_inc(&sk->sk_drops);
76012 + atomic_inc_unchecked(&sk->sk_drops);
76013 UDP6_INC_STATS_BH(sock_net(sk),
76014 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
76015 UDP6_INC_STATS_BH(sock_net(sk),
76016 @@ -782,6 +786,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
76017 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
76018 proto == IPPROTO_UDPLITE);
76019
76020 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76021 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
76022 +#endif
76023 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
76024
76025 kfree_skb(skb);
76026 @@ -798,7 +805,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
76027 if (!sock_owned_by_user(sk))
76028 udpv6_queue_rcv_skb(sk, skb);
76029 else if (sk_add_backlog(sk, skb)) {
76030 - atomic_inc(&sk->sk_drops);
76031 + atomic_inc_unchecked(&sk->sk_drops);
76032 bh_unlock_sock(sk);
76033 sock_put(sk);
76034 goto discard;
76035 @@ -1411,8 +1418,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
76036 0, 0L, 0,
76037 sock_i_uid(sp), 0,
76038 sock_i_ino(sp),
76039 - atomic_read(&sp->sk_refcnt), sp,
76040 - atomic_read(&sp->sk_drops));
76041 + atomic_read(&sp->sk_refcnt),
76042 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76043 + NULL,
76044 +#else
76045 + sp,
76046 +#endif
76047 + atomic_read_unchecked(&sp->sk_drops));
76048 }
76049
76050 int udp6_seq_show(struct seq_file *seq, void *v)
76051 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
76052 index 6b9d5a0..4dffaf1 100644
76053 --- a/net/irda/ircomm/ircomm_tty.c
76054 +++ b/net/irda/ircomm/ircomm_tty.c
76055 @@ -281,16 +281,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
76056 add_wait_queue(&self->open_wait, &wait);
76057
76058 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
76059 - __FILE__,__LINE__, tty->driver->name, self->open_count );
76060 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
76061
76062 /* As far as I can see, we protect open_count - Jean II */
76063 spin_lock_irqsave(&self->spinlock, flags);
76064 if (!tty_hung_up_p(filp)) {
76065 extra_count = 1;
76066 - self->open_count--;
76067 + local_dec(&self->open_count);
76068 }
76069 spin_unlock_irqrestore(&self->spinlock, flags);
76070 - self->blocked_open++;
76071 + local_inc(&self->blocked_open);
76072
76073 while (1) {
76074 if (tty->termios->c_cflag & CBAUD) {
76075 @@ -330,7 +330,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
76076 }
76077
76078 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
76079 - __FILE__,__LINE__, tty->driver->name, self->open_count );
76080 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
76081
76082 schedule();
76083 }
76084 @@ -341,13 +341,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
76085 if (extra_count) {
76086 /* ++ is not atomic, so this should be protected - Jean II */
76087 spin_lock_irqsave(&self->spinlock, flags);
76088 - self->open_count++;
76089 + local_inc(&self->open_count);
76090 spin_unlock_irqrestore(&self->spinlock, flags);
76091 }
76092 - self->blocked_open--;
76093 + local_dec(&self->blocked_open);
76094
76095 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
76096 - __FILE__,__LINE__, tty->driver->name, self->open_count);
76097 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
76098
76099 if (!retval)
76100 self->flags |= ASYNC_NORMAL_ACTIVE;
76101 @@ -412,14 +412,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
76102 }
76103 /* ++ is not atomic, so this should be protected - Jean II */
76104 spin_lock_irqsave(&self->spinlock, flags);
76105 - self->open_count++;
76106 + local_inc(&self->open_count);
76107
76108 tty->driver_data = self;
76109 self->tty = tty;
76110 spin_unlock_irqrestore(&self->spinlock, flags);
76111
76112 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
76113 - self->line, self->open_count);
76114 + self->line, local_read(&self->open_count));
76115
76116 /* Not really used by us, but lets do it anyway */
76117 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
76118 @@ -505,7 +505,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
76119 return;
76120 }
76121
76122 - if ((tty->count == 1) && (self->open_count != 1)) {
76123 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
76124 /*
76125 * Uh, oh. tty->count is 1, which means that the tty
76126 * structure will be freed. state->count should always
76127 @@ -515,16 +515,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
76128 */
76129 IRDA_DEBUG(0, "%s(), bad serial port count; "
76130 "tty->count is 1, state->count is %d\n", __func__ ,
76131 - self->open_count);
76132 - self->open_count = 1;
76133 + local_read(&self->open_count));
76134 + local_set(&self->open_count, 1);
76135 }
76136
76137 - if (--self->open_count < 0) {
76138 + if (local_dec_return(&self->open_count) < 0) {
76139 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
76140 - __func__, self->line, self->open_count);
76141 - self->open_count = 0;
76142 + __func__, self->line, local_read(&self->open_count));
76143 + local_set(&self->open_count, 0);
76144 }
76145 - if (self->open_count) {
76146 + if (local_read(&self->open_count)) {
76147 spin_unlock_irqrestore(&self->spinlock, flags);
76148
76149 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
76150 @@ -556,7 +556,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
76151 tty->closing = 0;
76152 self->tty = NULL;
76153
76154 - if (self->blocked_open) {
76155 + if (local_read(&self->blocked_open)) {
76156 if (self->close_delay)
76157 schedule_timeout_interruptible(self->close_delay);
76158 wake_up_interruptible(&self->open_wait);
76159 @@ -1008,7 +1008,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
76160 spin_lock_irqsave(&self->spinlock, flags);
76161 self->flags &= ~ASYNC_NORMAL_ACTIVE;
76162 self->tty = NULL;
76163 - self->open_count = 0;
76164 + local_set(&self->open_count, 0);
76165 spin_unlock_irqrestore(&self->spinlock, flags);
76166
76167 wake_up_interruptible(&self->open_wait);
76168 @@ -1355,7 +1355,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
76169 seq_putc(m, '\n');
76170
76171 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
76172 - seq_printf(m, "Open count: %d\n", self->open_count);
76173 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
76174 seq_printf(m, "Max data size: %d\n", self->max_data_size);
76175 seq_printf(m, "Max header size: %d\n", self->max_header_size);
76176
76177 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
76178 index cd6f7a9..e63fe89 100644
76179 --- a/net/iucv/af_iucv.c
76180 +++ b/net/iucv/af_iucv.c
76181 @@ -782,10 +782,10 @@ static int iucv_sock_autobind(struct sock *sk)
76182
76183 write_lock_bh(&iucv_sk_list.lock);
76184
76185 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
76186 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
76187 while (__iucv_get_sock_by_name(name)) {
76188 sprintf(name, "%08x",
76189 - atomic_inc_return(&iucv_sk_list.autobind_name));
76190 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
76191 }
76192
76193 write_unlock_bh(&iucv_sk_list.lock);
76194 diff --git a/net/key/af_key.c b/net/key/af_key.c
76195 index 7e5d927..cdbb54e 100644
76196 --- a/net/key/af_key.c
76197 +++ b/net/key/af_key.c
76198 @@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
76199 static u32 get_acqseq(void)
76200 {
76201 u32 res;
76202 - static atomic_t acqseq;
76203 + static atomic_unchecked_t acqseq;
76204
76205 do {
76206 - res = atomic_inc_return(&acqseq);
76207 + res = atomic_inc_return_unchecked(&acqseq);
76208 } while (!res);
76209 return res;
76210 }
76211 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
76212 index db8fae5..ff070cd 100644
76213 --- a/net/mac80211/ieee80211_i.h
76214 +++ b/net/mac80211/ieee80211_i.h
76215 @@ -28,6 +28,7 @@
76216 #include <net/ieee80211_radiotap.h>
76217 #include <net/cfg80211.h>
76218 #include <net/mac80211.h>
76219 +#include <asm/local.h>
76220 #include "key.h"
76221 #include "sta_info.h"
76222
76223 @@ -842,7 +843,7 @@ struct ieee80211_local {
76224 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
76225 spinlock_t queue_stop_reason_lock;
76226
76227 - int open_count;
76228 + local_t open_count;
76229 int monitors, cooked_mntrs;
76230 /* number of interfaces with corresponding FIF_ flags */
76231 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
76232 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
76233 index 48f937e..4ccd7b8 100644
76234 --- a/net/mac80211/iface.c
76235 +++ b/net/mac80211/iface.c
76236 @@ -222,7 +222,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
76237 break;
76238 }
76239
76240 - if (local->open_count == 0) {
76241 + if (local_read(&local->open_count) == 0) {
76242 res = drv_start(local);
76243 if (res)
76244 goto err_del_bss;
76245 @@ -246,7 +246,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
76246 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
76247
76248 if (!is_valid_ether_addr(dev->dev_addr)) {
76249 - if (!local->open_count)
76250 + if (!local_read(&local->open_count))
76251 drv_stop(local);
76252 return -EADDRNOTAVAIL;
76253 }
76254 @@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
76255 mutex_unlock(&local->mtx);
76256
76257 if (coming_up)
76258 - local->open_count++;
76259 + local_inc(&local->open_count);
76260
76261 if (hw_reconf_flags)
76262 ieee80211_hw_config(local, hw_reconf_flags);
76263 @@ -360,7 +360,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
76264 err_del_interface:
76265 drv_remove_interface(local, sdata);
76266 err_stop:
76267 - if (!local->open_count)
76268 + if (!local_read(&local->open_count))
76269 drv_stop(local);
76270 err_del_bss:
76271 sdata->bss = NULL;
76272 @@ -491,7 +491,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
76273 }
76274
76275 if (going_down)
76276 - local->open_count--;
76277 + local_dec(&local->open_count);
76278
76279 switch (sdata->vif.type) {
76280 case NL80211_IFTYPE_AP_VLAN:
76281 @@ -562,7 +562,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
76282
76283 ieee80211_recalc_ps(local, -1);
76284
76285 - if (local->open_count == 0) {
76286 + if (local_read(&local->open_count) == 0) {
76287 if (local->ops->napi_poll)
76288 napi_disable(&local->napi);
76289 ieee80211_clear_tx_pending(local);
76290 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
76291 index 1633648..d45ebfa 100644
76292 --- a/net/mac80211/main.c
76293 +++ b/net/mac80211/main.c
76294 @@ -164,7 +164,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
76295 local->hw.conf.power_level = power;
76296 }
76297
76298 - if (changed && local->open_count) {
76299 + if (changed && local_read(&local->open_count)) {
76300 ret = drv_config(local, changed);
76301 /*
76302 * Goal:
76303 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
76304 index ef8eba1..5c63952 100644
76305 --- a/net/mac80211/pm.c
76306 +++ b/net/mac80211/pm.c
76307 @@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
76308 struct ieee80211_sub_if_data *sdata;
76309 struct sta_info *sta;
76310
76311 - if (!local->open_count)
76312 + if (!local_read(&local->open_count))
76313 goto suspend;
76314
76315 ieee80211_scan_cancel(local);
76316 @@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
76317 cancel_work_sync(&local->dynamic_ps_enable_work);
76318 del_timer_sync(&local->dynamic_ps_timer);
76319
76320 - local->wowlan = wowlan && local->open_count;
76321 + local->wowlan = wowlan && local_read(&local->open_count);
76322 if (local->wowlan) {
76323 int err = drv_suspend(local, wowlan);
76324 if (err < 0) {
76325 @@ -128,7 +128,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
76326 }
76327
76328 /* stop hardware - this must stop RX */
76329 - if (local->open_count)
76330 + if (local_read(&local->open_count))
76331 ieee80211_stop_device(local);
76332
76333 suspend:
76334 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
76335 index 3313c11..bec9f17 100644
76336 --- a/net/mac80211/rate.c
76337 +++ b/net/mac80211/rate.c
76338 @@ -494,7 +494,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
76339
76340 ASSERT_RTNL();
76341
76342 - if (local->open_count)
76343 + if (local_read(&local->open_count))
76344 return -EBUSY;
76345
76346 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
76347 diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
76348 index c97a065..ff61928 100644
76349 --- a/net/mac80211/rc80211_pid_debugfs.c
76350 +++ b/net/mac80211/rc80211_pid_debugfs.c
76351 @@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
76352
76353 spin_unlock_irqrestore(&events->lock, status);
76354
76355 - if (copy_to_user(buf, pb, p))
76356 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
76357 return -EFAULT;
76358
76359 return p;
76360 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
76361 index eb9d7c0..d34b832 100644
76362 --- a/net/mac80211/util.c
76363 +++ b/net/mac80211/util.c
76364 @@ -1179,7 +1179,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
76365 }
76366 #endif
76367 /* everything else happens only if HW was up & running */
76368 - if (!local->open_count)
76369 + if (!local_read(&local->open_count))
76370 goto wake_up;
76371
76372 /*
76373 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
76374 index 0c6f67e..d02cdfc 100644
76375 --- a/net/netfilter/Kconfig
76376 +++ b/net/netfilter/Kconfig
76377 @@ -836,6 +836,16 @@ config NETFILTER_XT_MATCH_ESP
76378
76379 To compile it as a module, choose M here. If unsure, say N.
76380
76381 +config NETFILTER_XT_MATCH_GRADM
76382 + tristate '"gradm" match support'
76383 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
76384 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
76385 + ---help---
76386 + The gradm match allows to match on grsecurity RBAC being enabled.
76387 + It is useful when iptables rules are applied early on bootup to
76388 + prevent connections to the machine (except from a trusted host)
76389 + while the RBAC system is disabled.
76390 +
76391 config NETFILTER_XT_MATCH_HASHLIMIT
76392 tristate '"hashlimit" match support'
76393 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
76394 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
76395 index ca36765..0882e7c 100644
76396 --- a/net/netfilter/Makefile
76397 +++ b/net/netfilter/Makefile
76398 @@ -86,6 +86,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
76399 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
76400 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
76401 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
76402 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
76403 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
76404 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
76405 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
76406 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
76407 index 29fa5ba..8debc79 100644
76408 --- a/net/netfilter/ipvs/ip_vs_conn.c
76409 +++ b/net/netfilter/ipvs/ip_vs_conn.c
76410 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
76411 /* Increase the refcnt counter of the dest */
76412 atomic_inc(&dest->refcnt);
76413
76414 - conn_flags = atomic_read(&dest->conn_flags);
76415 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
76416 if (cp->protocol != IPPROTO_UDP)
76417 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
76418 /* Bind with the destination and its corresponding transmitter */
76419 @@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
76420 atomic_set(&cp->refcnt, 1);
76421
76422 atomic_set(&cp->n_control, 0);
76423 - atomic_set(&cp->in_pkts, 0);
76424 + atomic_set_unchecked(&cp->in_pkts, 0);
76425
76426 atomic_inc(&ipvs->conn_count);
76427 if (flags & IP_VS_CONN_F_NO_CPORT)
76428 @@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
76429
76430 /* Don't drop the entry if its number of incoming packets is not
76431 located in [0, 8] */
76432 - i = atomic_read(&cp->in_pkts);
76433 + i = atomic_read_unchecked(&cp->in_pkts);
76434 if (i > 8 || i < 0) return 0;
76435
76436 if (!todrop_rate[i]) return 0;
76437 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
76438 index 00bdb1d..6725a48 100644
76439 --- a/net/netfilter/ipvs/ip_vs_core.c
76440 +++ b/net/netfilter/ipvs/ip_vs_core.c
76441 @@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
76442 ret = cp->packet_xmit(skb, cp, pd->pp);
76443 /* do not touch skb anymore */
76444
76445 - atomic_inc(&cp->in_pkts);
76446 + atomic_inc_unchecked(&cp->in_pkts);
76447 ip_vs_conn_put(cp);
76448 return ret;
76449 }
76450 @@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
76451 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
76452 pkts = sysctl_sync_threshold(ipvs);
76453 else
76454 - pkts = atomic_add_return(1, &cp->in_pkts);
76455 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
76456
76457 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
76458 cp->protocol == IPPROTO_SCTP) {
76459 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
76460 index f558998..7dfb054 100644
76461 --- a/net/netfilter/ipvs/ip_vs_ctl.c
76462 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
76463 @@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
76464 ip_vs_rs_hash(ipvs, dest);
76465 write_unlock_bh(&ipvs->rs_lock);
76466 }
76467 - atomic_set(&dest->conn_flags, conn_flags);
76468 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
76469
76470 /* bind the service */
76471 if (!dest->svc) {
76472 @@ -1521,11 +1521,12 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
76473 {
76474 struct net_device *dev = ptr;
76475 struct net *net = dev_net(dev);
76476 + struct netns_ipvs *ipvs = net_ipvs(net);
76477 struct ip_vs_service *svc;
76478 struct ip_vs_dest *dest;
76479 unsigned int idx;
76480
76481 - if (event != NETDEV_UNREGISTER)
76482 + if (event != NETDEV_UNREGISTER || !ipvs)
76483 return NOTIFY_DONE;
76484 IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name);
76485 EnterFunction(2);
76486 @@ -1551,7 +1552,7 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
76487 }
76488 }
76489
76490 - list_for_each_entry(dest, &net_ipvs(net)->dest_trash, n_list) {
76491 + list_for_each_entry(dest, &ipvs->dest_trash, n_list) {
76492 __ip_vs_dev_reset(dest, dev);
76493 }
76494 mutex_unlock(&__ip_vs_mutex);
76495 @@ -2028,7 +2029,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
76496 " %-7s %-6d %-10d %-10d\n",
76497 &dest->addr.in6,
76498 ntohs(dest->port),
76499 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
76500 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
76501 atomic_read(&dest->weight),
76502 atomic_read(&dest->activeconns),
76503 atomic_read(&dest->inactconns));
76504 @@ -2039,7 +2040,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
76505 "%-7s %-6d %-10d %-10d\n",
76506 ntohl(dest->addr.ip),
76507 ntohs(dest->port),
76508 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
76509 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
76510 atomic_read(&dest->weight),
76511 atomic_read(&dest->activeconns),
76512 atomic_read(&dest->inactconns));
76513 @@ -2509,7 +2510,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
76514
76515 entry.addr = dest->addr.ip;
76516 entry.port = dest->port;
76517 - entry.conn_flags = atomic_read(&dest->conn_flags);
76518 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
76519 entry.weight = atomic_read(&dest->weight);
76520 entry.u_threshold = dest->u_threshold;
76521 entry.l_threshold = dest->l_threshold;
76522 @@ -3042,7 +3043,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
76523 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
76524
76525 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
76526 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
76527 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
76528 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
76529 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
76530 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
76531 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
76532 index 8a0d6d6..90ec197 100644
76533 --- a/net/netfilter/ipvs/ip_vs_sync.c
76534 +++ b/net/netfilter/ipvs/ip_vs_sync.c
76535 @@ -649,7 +649,7 @@ control:
76536 * i.e only increment in_pkts for Templates.
76537 */
76538 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
76539 - int pkts = atomic_add_return(1, &cp->in_pkts);
76540 + int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
76541
76542 if (pkts % sysctl_sync_period(ipvs) != 1)
76543 return;
76544 @@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
76545
76546 if (opt)
76547 memcpy(&cp->in_seq, opt, sizeof(*opt));
76548 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
76549 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
76550 cp->state = state;
76551 cp->old_state = cp->state;
76552 /*
76553 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
76554 index 7fd66de..e6fb361 100644
76555 --- a/net/netfilter/ipvs/ip_vs_xmit.c
76556 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
76557 @@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
76558 else
76559 rc = NF_ACCEPT;
76560 /* do not touch skb anymore */
76561 - atomic_inc(&cp->in_pkts);
76562 + atomic_inc_unchecked(&cp->in_pkts);
76563 goto out;
76564 }
76565
76566 @@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
76567 else
76568 rc = NF_ACCEPT;
76569 /* do not touch skb anymore */
76570 - atomic_inc(&cp->in_pkts);
76571 + atomic_inc_unchecked(&cp->in_pkts);
76572 goto out;
76573 }
76574
76575 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
76576 index 66b2c54..c7884e3 100644
76577 --- a/net/netfilter/nfnetlink_log.c
76578 +++ b/net/netfilter/nfnetlink_log.c
76579 @@ -70,7 +70,7 @@ struct nfulnl_instance {
76580 };
76581
76582 static DEFINE_SPINLOCK(instances_lock);
76583 -static atomic_t global_seq;
76584 +static atomic_unchecked_t global_seq;
76585
76586 #define INSTANCE_BUCKETS 16
76587 static struct hlist_head instance_table[INSTANCE_BUCKETS];
76588 @@ -502,7 +502,7 @@ __build_packet_message(struct nfulnl_instance *inst,
76589 /* global sequence number */
76590 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
76591 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
76592 - htonl(atomic_inc_return(&global_seq)));
76593 + htonl(atomic_inc_return_unchecked(&global_seq)));
76594
76595 if (data_len) {
76596 struct nlattr *nla;
76597 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
76598 new file mode 100644
76599 index 0000000..6905327
76600 --- /dev/null
76601 +++ b/net/netfilter/xt_gradm.c
76602 @@ -0,0 +1,51 @@
76603 +/*
76604 + * gradm match for netfilter
76605 + * Copyright © Zbigniew Krzystolik, 2010
76606 + *
76607 + * This program is free software; you can redistribute it and/or modify
76608 + * it under the terms of the GNU General Public License; either version
76609 + * 2 or 3 as published by the Free Software Foundation.
76610 + */
76611 +#include <linux/module.h>
76612 +#include <linux/moduleparam.h>
76613 +#include <linux/skbuff.h>
76614 +#include <linux/netfilter/x_tables.h>
76615 +#include <linux/grsecurity.h>
76616 +#include <linux/netfilter/xt_gradm.h>
76617 +
76618 +static bool
76619 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
76620 +{
76621 + const struct xt_gradm_mtinfo *info = par->matchinfo;
76622 + bool retval = false;
76623 + if (gr_acl_is_enabled())
76624 + retval = true;
76625 + return retval ^ info->invflags;
76626 +}
76627 +
76628 +static struct xt_match gradm_mt_reg __read_mostly = {
76629 + .name = "gradm",
76630 + .revision = 0,
76631 + .family = NFPROTO_UNSPEC,
76632 + .match = gradm_mt,
76633 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
76634 + .me = THIS_MODULE,
76635 +};
76636 +
76637 +static int __init gradm_mt_init(void)
76638 +{
76639 + return xt_register_match(&gradm_mt_reg);
76640 +}
76641 +
76642 +static void __exit gradm_mt_exit(void)
76643 +{
76644 + xt_unregister_match(&gradm_mt_reg);
76645 +}
76646 +
76647 +module_init(gradm_mt_init);
76648 +module_exit(gradm_mt_exit);
76649 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
76650 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
76651 +MODULE_LICENSE("GPL");
76652 +MODULE_ALIAS("ipt_gradm");
76653 +MODULE_ALIAS("ip6t_gradm");
76654 diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
76655 index 4fe4fb4..87a89e5 100644
76656 --- a/net/netfilter/xt_statistic.c
76657 +++ b/net/netfilter/xt_statistic.c
76658 @@ -19,7 +19,7 @@
76659 #include <linux/module.h>
76660
76661 struct xt_statistic_priv {
76662 - atomic_t count;
76663 + atomic_unchecked_t count;
76664 } ____cacheline_aligned_in_smp;
76665
76666 MODULE_LICENSE("GPL");
76667 @@ -42,9 +42,9 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
76668 break;
76669 case XT_STATISTIC_MODE_NTH:
76670 do {
76671 - oval = atomic_read(&info->master->count);
76672 + oval = atomic_read_unchecked(&info->master->count);
76673 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
76674 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
76675 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
76676 if (nval == 0)
76677 ret = !ret;
76678 break;
76679 @@ -64,7 +64,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
76680 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
76681 if (info->master == NULL)
76682 return -ENOMEM;
76683 - atomic_set(&info->master->count, info->u.nth.count);
76684 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
76685
76686 return 0;
76687 }
76688 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
76689 index faa48f7..65f7f54 100644
76690 --- a/net/netlink/af_netlink.c
76691 +++ b/net/netlink/af_netlink.c
76692 @@ -741,7 +741,7 @@ static void netlink_overrun(struct sock *sk)
76693 sk->sk_error_report(sk);
76694 }
76695 }
76696 - atomic_inc(&sk->sk_drops);
76697 + atomic_inc_unchecked(&sk->sk_drops);
76698 }
76699
76700 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
76701 @@ -2013,7 +2013,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
76702 sk_wmem_alloc_get(s),
76703 nlk->cb,
76704 atomic_read(&s->sk_refcnt),
76705 - atomic_read(&s->sk_drops),
76706 + atomic_read_unchecked(&s->sk_drops),
76707 sock_i_ino(s)
76708 );
76709
76710 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
76711 index 06592d8..64860f6 100644
76712 --- a/net/netrom/af_netrom.c
76713 +++ b/net/netrom/af_netrom.c
76714 @@ -838,6 +838,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
76715 struct sock *sk = sock->sk;
76716 struct nr_sock *nr = nr_sk(sk);
76717
76718 + memset(sax, 0, sizeof(*sax));
76719 lock_sock(sk);
76720 if (peer != 0) {
76721 if (sk->sk_state != TCP_ESTABLISHED) {
76722 @@ -852,7 +853,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
76723 *uaddr_len = sizeof(struct full_sockaddr_ax25);
76724 } else {
76725 sax->fsa_ax25.sax25_family = AF_NETROM;
76726 - sax->fsa_ax25.sax25_ndigis = 0;
76727 sax->fsa_ax25.sax25_call = nr->source_addr;
76728 *uaddr_len = sizeof(struct sockaddr_ax25);
76729 }
76730 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
76731 index 4f2c0df..f0ff342 100644
76732 --- a/net/packet/af_packet.c
76733 +++ b/net/packet/af_packet.c
76734 @@ -1687,7 +1687,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
76735
76736 spin_lock(&sk->sk_receive_queue.lock);
76737 po->stats.tp_packets++;
76738 - skb->dropcount = atomic_read(&sk->sk_drops);
76739 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
76740 __skb_queue_tail(&sk->sk_receive_queue, skb);
76741 spin_unlock(&sk->sk_receive_queue.lock);
76742 sk->sk_data_ready(sk, skb->len);
76743 @@ -1696,7 +1696,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
76744 drop_n_acct:
76745 spin_lock(&sk->sk_receive_queue.lock);
76746 po->stats.tp_drops++;
76747 - atomic_inc(&sk->sk_drops);
76748 + atomic_inc_unchecked(&sk->sk_drops);
76749 spin_unlock(&sk->sk_receive_queue.lock);
76750
76751 drop_n_restore:
76752 @@ -3294,7 +3294,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
76753 case PACKET_HDRLEN:
76754 if (len > sizeof(int))
76755 len = sizeof(int);
76756 - if (copy_from_user(&val, optval, len))
76757 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
76758 return -EFAULT;
76759 switch (val) {
76760 case TPACKET_V1:
76761 @@ -3344,7 +3344,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
76762
76763 if (put_user(len, optlen))
76764 return -EFAULT;
76765 - if (copy_to_user(optval, data, len))
76766 + if (len > sizeof(st) || copy_to_user(optval, data, len))
76767 return -EFAULT;
76768 return 0;
76769 }
76770 diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
76771 index d65f699..05aa6ce 100644
76772 --- a/net/phonet/af_phonet.c
76773 +++ b/net/phonet/af_phonet.c
76774 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_proto_get(unsigned int protocol)
76775 {
76776 struct phonet_protocol *pp;
76777
76778 - if (protocol >= PHONET_NPROTO)
76779 + if (protocol < 0 || protocol >= PHONET_NPROTO)
76780 return NULL;
76781
76782 rcu_read_lock();
76783 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_register(unsigned int protocol,
76784 {
76785 int err = 0;
76786
76787 - if (protocol >= PHONET_NPROTO)
76788 + if (protocol < 0 || protocol >= PHONET_NPROTO)
76789 return -EINVAL;
76790
76791 err = proto_register(pp->prot, 1);
76792 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
76793 index 9726fe6..fc4e3a4 100644
76794 --- a/net/phonet/pep.c
76795 +++ b/net/phonet/pep.c
76796 @@ -388,7 +388,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
76797
76798 case PNS_PEP_CTRL_REQ:
76799 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
76800 - atomic_inc(&sk->sk_drops);
76801 + atomic_inc_unchecked(&sk->sk_drops);
76802 break;
76803 }
76804 __skb_pull(skb, 4);
76805 @@ -409,7 +409,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
76806 }
76807
76808 if (pn->rx_credits == 0) {
76809 - atomic_inc(&sk->sk_drops);
76810 + atomic_inc_unchecked(&sk->sk_drops);
76811 err = -ENOBUFS;
76812 break;
76813 }
76814 @@ -580,7 +580,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
76815 }
76816
76817 if (pn->rx_credits == 0) {
76818 - atomic_inc(&sk->sk_drops);
76819 + atomic_inc_unchecked(&sk->sk_drops);
76820 err = NET_RX_DROP;
76821 break;
76822 }
76823 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
76824 index 4c7eff3..59c727f 100644
76825 --- a/net/phonet/socket.c
76826 +++ b/net/phonet/socket.c
76827 @@ -613,8 +613,13 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
76828 pn->resource, sk->sk_state,
76829 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
76830 sock_i_uid(sk), sock_i_ino(sk),
76831 - atomic_read(&sk->sk_refcnt), sk,
76832 - atomic_read(&sk->sk_drops), &len);
76833 + atomic_read(&sk->sk_refcnt),
76834 +#ifdef CONFIG_GRKERNSEC_HIDESYM
76835 + NULL,
76836 +#else
76837 + sk,
76838 +#endif
76839 + atomic_read_unchecked(&sk->sk_drops), &len);
76840 }
76841 seq_printf(seq, "%*s\n", 127 - len, "");
76842 return 0;
76843 diff --git a/net/rds/cong.c b/net/rds/cong.c
76844 index e5b65ac..f3b6fb7 100644
76845 --- a/net/rds/cong.c
76846 +++ b/net/rds/cong.c
76847 @@ -78,7 +78,7 @@
76848 * finds that the saved generation number is smaller than the global generation
76849 * number, it wakes up the process.
76850 */
76851 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
76852 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
76853
76854 /*
76855 * Congestion monitoring
76856 @@ -233,7 +233,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
76857 rdsdebug("waking map %p for %pI4\n",
76858 map, &map->m_addr);
76859 rds_stats_inc(s_cong_update_received);
76860 - atomic_inc(&rds_cong_generation);
76861 + atomic_inc_unchecked(&rds_cong_generation);
76862 if (waitqueue_active(&map->m_waitq))
76863 wake_up(&map->m_waitq);
76864 if (waitqueue_active(&rds_poll_waitq))
76865 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
76866
76867 int rds_cong_updated_since(unsigned long *recent)
76868 {
76869 - unsigned long gen = atomic_read(&rds_cong_generation);
76870 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
76871
76872 if (likely(*recent == gen))
76873 return 0;
76874 diff --git a/net/rds/ib.h b/net/rds/ib.h
76875 index edfaaaf..8c89879 100644
76876 --- a/net/rds/ib.h
76877 +++ b/net/rds/ib.h
76878 @@ -128,7 +128,7 @@ struct rds_ib_connection {
76879 /* sending acks */
76880 unsigned long i_ack_flags;
76881 #ifdef KERNEL_HAS_ATOMIC64
76882 - atomic64_t i_ack_next; /* next ACK to send */
76883 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
76884 #else
76885 spinlock_t i_ack_lock; /* protect i_ack_next */
76886 u64 i_ack_next; /* next ACK to send */
76887 diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
76888 index a1e1162..265e129 100644
76889 --- a/net/rds/ib_cm.c
76890 +++ b/net/rds/ib_cm.c
76891 @@ -718,7 +718,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
76892 /* Clear the ACK state */
76893 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
76894 #ifdef KERNEL_HAS_ATOMIC64
76895 - atomic64_set(&ic->i_ack_next, 0);
76896 + atomic64_set_unchecked(&ic->i_ack_next, 0);
76897 #else
76898 ic->i_ack_next = 0;
76899 #endif
76900 diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
76901 index 8d19491..05a3e65 100644
76902 --- a/net/rds/ib_recv.c
76903 +++ b/net/rds/ib_recv.c
76904 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
76905 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
76906 int ack_required)
76907 {
76908 - atomic64_set(&ic->i_ack_next, seq);
76909 + atomic64_set_unchecked(&ic->i_ack_next, seq);
76910 if (ack_required) {
76911 smp_mb__before_clear_bit();
76912 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
76913 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
76914 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
76915 smp_mb__after_clear_bit();
76916
76917 - return atomic64_read(&ic->i_ack_next);
76918 + return atomic64_read_unchecked(&ic->i_ack_next);
76919 }
76920 #endif
76921
76922 diff --git a/net/rds/iw.h b/net/rds/iw.h
76923 index 04ce3b1..48119a6 100644
76924 --- a/net/rds/iw.h
76925 +++ b/net/rds/iw.h
76926 @@ -134,7 +134,7 @@ struct rds_iw_connection {
76927 /* sending acks */
76928 unsigned long i_ack_flags;
76929 #ifdef KERNEL_HAS_ATOMIC64
76930 - atomic64_t i_ack_next; /* next ACK to send */
76931 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
76932 #else
76933 spinlock_t i_ack_lock; /* protect i_ack_next */
76934 u64 i_ack_next; /* next ACK to send */
76935 diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
76936 index a91e1db..cf3053f 100644
76937 --- a/net/rds/iw_cm.c
76938 +++ b/net/rds/iw_cm.c
76939 @@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_connection *conn)
76940 /* Clear the ACK state */
76941 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
76942 #ifdef KERNEL_HAS_ATOMIC64
76943 - atomic64_set(&ic->i_ack_next, 0);
76944 + atomic64_set_unchecked(&ic->i_ack_next, 0);
76945 #else
76946 ic->i_ack_next = 0;
76947 #endif
76948 diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
76949 index 4503335..db566b4 100644
76950 --- a/net/rds/iw_recv.c
76951 +++ b/net/rds/iw_recv.c
76952 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
76953 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
76954 int ack_required)
76955 {
76956 - atomic64_set(&ic->i_ack_next, seq);
76957 + atomic64_set_unchecked(&ic->i_ack_next, seq);
76958 if (ack_required) {
76959 smp_mb__before_clear_bit();
76960 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
76961 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
76962 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
76963 smp_mb__after_clear_bit();
76964
76965 - return atomic64_read(&ic->i_ack_next);
76966 + return atomic64_read_unchecked(&ic->i_ack_next);
76967 }
76968 #endif
76969
76970 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
76971 index edac9ef..16bcb98 100644
76972 --- a/net/rds/tcp.c
76973 +++ b/net/rds/tcp.c
76974 @@ -59,7 +59,7 @@ void rds_tcp_nonagle(struct socket *sock)
76975 int val = 1;
76976
76977 set_fs(KERNEL_DS);
76978 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
76979 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
76980 sizeof(val));
76981 set_fs(oldfs);
76982 }
76983 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
76984 index 1b4fd68..2234175 100644
76985 --- a/net/rds/tcp_send.c
76986 +++ b/net/rds/tcp_send.c
76987 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
76988
76989 oldfs = get_fs();
76990 set_fs(KERNEL_DS);
76991 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
76992 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
76993 sizeof(val));
76994 set_fs(oldfs);
76995 }
76996 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
76997 index 74c064c..fdec26f 100644
76998 --- a/net/rxrpc/af_rxrpc.c
76999 +++ b/net/rxrpc/af_rxrpc.c
77000 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_ops;
77001 __be32 rxrpc_epoch;
77002
77003 /* current debugging ID */
77004 -atomic_t rxrpc_debug_id;
77005 +atomic_unchecked_t rxrpc_debug_id;
77006
77007 /* count of skbs currently in use */
77008 atomic_t rxrpc_n_skbs;
77009 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
77010 index c3126e8..21facc7 100644
77011 --- a/net/rxrpc/ar-ack.c
77012 +++ b/net/rxrpc/ar-ack.c
77013 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
77014
77015 _enter("{%d,%d,%d,%d},",
77016 call->acks_hard, call->acks_unacked,
77017 - atomic_read(&call->sequence),
77018 + atomic_read_unchecked(&call->sequence),
77019 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
77020
77021 stop = 0;
77022 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
77023
77024 /* each Tx packet has a new serial number */
77025 sp->hdr.serial =
77026 - htonl(atomic_inc_return(&call->conn->serial));
77027 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
77028
77029 hdr = (struct rxrpc_header *) txb->head;
77030 hdr->serial = sp->hdr.serial;
77031 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
77032 */
77033 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
77034 {
77035 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
77036 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
77037 }
77038
77039 /*
77040 @@ -629,7 +629,7 @@ process_further:
77041
77042 latest = ntohl(sp->hdr.serial);
77043 hard = ntohl(ack.firstPacket);
77044 - tx = atomic_read(&call->sequence);
77045 + tx = atomic_read_unchecked(&call->sequence);
77046
77047 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
77048 latest,
77049 @@ -1161,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
77050 goto maybe_reschedule;
77051
77052 send_ACK_with_skew:
77053 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
77054 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
77055 ntohl(ack.serial));
77056 send_ACK:
77057 mtu = call->conn->trans->peer->if_mtu;
77058 @@ -1173,7 +1173,7 @@ send_ACK:
77059 ackinfo.rxMTU = htonl(5692);
77060 ackinfo.jumbo_max = htonl(4);
77061
77062 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
77063 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
77064 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
77065 ntohl(hdr.serial),
77066 ntohs(ack.maxSkew),
77067 @@ -1191,7 +1191,7 @@ send_ACK:
77068 send_message:
77069 _debug("send message");
77070
77071 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
77072 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
77073 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
77074 send_message_2:
77075
77076 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
77077 index bf656c2..48f9d27 100644
77078 --- a/net/rxrpc/ar-call.c
77079 +++ b/net/rxrpc/ar-call.c
77080 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
77081 spin_lock_init(&call->lock);
77082 rwlock_init(&call->state_lock);
77083 atomic_set(&call->usage, 1);
77084 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
77085 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77086 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
77087
77088 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
77089 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
77090 index 4106ca9..a338d7a 100644
77091 --- a/net/rxrpc/ar-connection.c
77092 +++ b/net/rxrpc/ar-connection.c
77093 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
77094 rwlock_init(&conn->lock);
77095 spin_lock_init(&conn->state_lock);
77096 atomic_set(&conn->usage, 1);
77097 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
77098 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77099 conn->avail_calls = RXRPC_MAXCALLS;
77100 conn->size_align = 4;
77101 conn->header_size = sizeof(struct rxrpc_header);
77102 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
77103 index e7ed43a..6afa140 100644
77104 --- a/net/rxrpc/ar-connevent.c
77105 +++ b/net/rxrpc/ar-connevent.c
77106 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
77107
77108 len = iov[0].iov_len + iov[1].iov_len;
77109
77110 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
77111 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
77112 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
77113
77114 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
77115 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
77116 index 1a2b0633..e8d1382 100644
77117 --- a/net/rxrpc/ar-input.c
77118 +++ b/net/rxrpc/ar-input.c
77119 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
77120 /* track the latest serial number on this connection for ACK packet
77121 * information */
77122 serial = ntohl(sp->hdr.serial);
77123 - hi_serial = atomic_read(&call->conn->hi_serial);
77124 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
77125 while (serial > hi_serial)
77126 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
77127 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
77128 serial);
77129
77130 /* request ACK generation for any ACK or DATA packet that requests
77131 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
77132 index 8e22bd3..f66d1c0 100644
77133 --- a/net/rxrpc/ar-internal.h
77134 +++ b/net/rxrpc/ar-internal.h
77135 @@ -272,8 +272,8 @@ struct rxrpc_connection {
77136 int error; /* error code for local abort */
77137 int debug_id; /* debug ID for printks */
77138 unsigned call_counter; /* call ID counter */
77139 - atomic_t serial; /* packet serial number counter */
77140 - atomic_t hi_serial; /* highest serial number received */
77141 + atomic_unchecked_t serial; /* packet serial number counter */
77142 + atomic_unchecked_t hi_serial; /* highest serial number received */
77143 u8 avail_calls; /* number of calls available */
77144 u8 size_align; /* data size alignment (for security) */
77145 u8 header_size; /* rxrpc + security header size */
77146 @@ -346,7 +346,7 @@ struct rxrpc_call {
77147 spinlock_t lock;
77148 rwlock_t state_lock; /* lock for state transition */
77149 atomic_t usage;
77150 - atomic_t sequence; /* Tx data packet sequence counter */
77151 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
77152 u32 abort_code; /* local/remote abort code */
77153 enum { /* current state of call */
77154 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
77155 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
77156 */
77157 extern atomic_t rxrpc_n_skbs;
77158 extern __be32 rxrpc_epoch;
77159 -extern atomic_t rxrpc_debug_id;
77160 +extern atomic_unchecked_t rxrpc_debug_id;
77161 extern struct workqueue_struct *rxrpc_workqueue;
77162
77163 /*
77164 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
77165 index 87f7135..74d3703 100644
77166 --- a/net/rxrpc/ar-local.c
77167 +++ b/net/rxrpc/ar-local.c
77168 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
77169 spin_lock_init(&local->lock);
77170 rwlock_init(&local->services_lock);
77171 atomic_set(&local->usage, 1);
77172 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
77173 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77174 memcpy(&local->srx, srx, sizeof(*srx));
77175 }
77176
77177 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
77178 index 16ae887..d24f12b 100644
77179 --- a/net/rxrpc/ar-output.c
77180 +++ b/net/rxrpc/ar-output.c
77181 @@ -682,9 +682,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
77182 sp->hdr.cid = call->cid;
77183 sp->hdr.callNumber = call->call_id;
77184 sp->hdr.seq =
77185 - htonl(atomic_inc_return(&call->sequence));
77186 + htonl(atomic_inc_return_unchecked(&call->sequence));
77187 sp->hdr.serial =
77188 - htonl(atomic_inc_return(&conn->serial));
77189 + htonl(atomic_inc_return_unchecked(&conn->serial));
77190 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
77191 sp->hdr.userStatus = 0;
77192 sp->hdr.securityIndex = conn->security_ix;
77193 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
77194 index 2754f09..b20e38f 100644
77195 --- a/net/rxrpc/ar-peer.c
77196 +++ b/net/rxrpc/ar-peer.c
77197 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
77198 INIT_LIST_HEAD(&peer->error_targets);
77199 spin_lock_init(&peer->lock);
77200 atomic_set(&peer->usage, 1);
77201 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
77202 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77203 memcpy(&peer->srx, srx, sizeof(*srx));
77204
77205 rxrpc_assess_MTU_size(peer);
77206 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
77207 index 38047f7..9f48511 100644
77208 --- a/net/rxrpc/ar-proc.c
77209 +++ b/net/rxrpc/ar-proc.c
77210 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
77211 atomic_read(&conn->usage),
77212 rxrpc_conn_states[conn->state],
77213 key_serial(conn->key),
77214 - atomic_read(&conn->serial),
77215 - atomic_read(&conn->hi_serial));
77216 + atomic_read_unchecked(&conn->serial),
77217 + atomic_read_unchecked(&conn->hi_serial));
77218
77219 return 0;
77220 }
77221 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
77222 index 92df566..87ec1bf 100644
77223 --- a/net/rxrpc/ar-transport.c
77224 +++ b/net/rxrpc/ar-transport.c
77225 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
77226 spin_lock_init(&trans->client_lock);
77227 rwlock_init(&trans->conn_lock);
77228 atomic_set(&trans->usage, 1);
77229 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
77230 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
77231
77232 if (peer->srx.transport.family == AF_INET) {
77233 switch (peer->srx.transport_type) {
77234 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
77235 index 7635107..4670276 100644
77236 --- a/net/rxrpc/rxkad.c
77237 +++ b/net/rxrpc/rxkad.c
77238 @@ -610,7 +610,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
77239
77240 len = iov[0].iov_len + iov[1].iov_len;
77241
77242 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
77243 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
77244 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
77245
77246 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
77247 @@ -660,7 +660,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
77248
77249 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
77250
77251 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
77252 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
77253 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
77254
77255 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
77256 diff --git a/net/sctp/input.c b/net/sctp/input.c
77257 index 80f71af..be772c0 100644
77258 --- a/net/sctp/input.c
77259 +++ b/net/sctp/input.c
77260 @@ -736,15 +736,12 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
77261
77262 epb = &ep->base;
77263
77264 - if (hlist_unhashed(&epb->node))
77265 - return;
77266 -
77267 epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
77268
77269 head = &sctp_ep_hashtable[epb->hashent];
77270
77271 sctp_write_lock(&head->lock);
77272 - __hlist_del(&epb->node);
77273 + hlist_del_init(&epb->node);
77274 sctp_write_unlock(&head->lock);
77275 }
77276
77277 @@ -825,7 +822,7 @@ static void __sctp_unhash_established(struct sctp_association *asoc)
77278 head = &sctp_assoc_hashtable[epb->hashent];
77279
77280 sctp_write_lock(&head->lock);
77281 - __hlist_del(&epb->node);
77282 + hlist_del_init(&epb->node);
77283 sctp_write_unlock(&head->lock);
77284 }
77285
77286 diff --git a/net/sctp/proc.c b/net/sctp/proc.c
77287 index 1e2eee8..ce3967e 100644
77288 --- a/net/sctp/proc.c
77289 +++ b/net/sctp/proc.c
77290 @@ -319,7 +319,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
77291 seq_printf(seq,
77292 "%8pK %8pK %-3d %-3d %-2d %-4d "
77293 "%4d %8d %8d %7d %5lu %-5d %5d ",
77294 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
77295 + assoc, sk,
77296 + sctp_sk(sk)->type, sk->sk_state,
77297 assoc->state, hash,
77298 assoc->assoc_id,
77299 assoc->sndbuf_used,
77300 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
77301 index 92ba71d..9352c05 100644
77302 --- a/net/sctp/socket.c
77303 +++ b/net/sctp/socket.c
77304 @@ -1231,8 +1231,14 @@ out_free:
77305 SCTP_DEBUG_PRINTK("About to exit __sctp_connect() free asoc: %p"
77306 " kaddrs: %p err: %d\n",
77307 asoc, kaddrs, err);
77308 - if (asoc)
77309 + if (asoc) {
77310 + /* sctp_primitive_ASSOCIATE may have added this association
77311 + * To the hash table, try to unhash it, just in case, its a noop
77312 + * if it wasn't hashed so we're safe
77313 + */
77314 + sctp_unhash_established(asoc);
77315 sctp_association_free(asoc);
77316 + }
77317 return err;
77318 }
77319
77320 @@ -1942,8 +1948,10 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
77321 goto out_unlock;
77322
77323 out_free:
77324 - if (new_asoc)
77325 + if (new_asoc) {
77326 + sctp_unhash_established(asoc);
77327 sctp_association_free(asoc);
77328 + }
77329 out_unlock:
77330 sctp_release_sock(sk);
77331
77332 @@ -4569,7 +4577,7 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
77333 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
77334 if (space_left < addrlen)
77335 return -ENOMEM;
77336 - if (copy_to_user(to, &temp, addrlen))
77337 + if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
77338 return -EFAULT;
77339 to += addrlen;
77340 cnt++;
77341 diff --git a/net/socket.c b/net/socket.c
77342 index 851edcd..b786851 100644
77343 --- a/net/socket.c
77344 +++ b/net/socket.c
77345 @@ -88,6 +88,7 @@
77346 #include <linux/nsproxy.h>
77347 #include <linux/magic.h>
77348 #include <linux/slab.h>
77349 +#include <linux/in.h>
77350
77351 #include <asm/uaccess.h>
77352 #include <asm/unistd.h>
77353 @@ -105,6 +106,8 @@
77354 #include <linux/sockios.h>
77355 #include <linux/atalk.h>
77356
77357 +#include <linux/grsock.h>
77358 +
77359 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
77360 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
77361 unsigned long nr_segs, loff_t pos);
77362 @@ -321,7 +324,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type,
77363 &sockfs_dentry_operations, SOCKFS_MAGIC);
77364 }
77365
77366 -static struct vfsmount *sock_mnt __read_mostly;
77367 +struct vfsmount *sock_mnt __read_mostly;
77368
77369 static struct file_system_type sock_fs_type = {
77370 .name = "sockfs",
77371 @@ -1207,6 +1210,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
77372 return -EAFNOSUPPORT;
77373 if (type < 0 || type >= SOCK_MAX)
77374 return -EINVAL;
77375 + if (protocol < 0)
77376 + return -EINVAL;
77377
77378 /* Compatibility.
77379
77380 @@ -1339,6 +1344,16 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
77381 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
77382 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
77383
77384 + if(!gr_search_socket(family, type, protocol)) {
77385 + retval = -EACCES;
77386 + goto out;
77387 + }
77388 +
77389 + if (gr_handle_sock_all(family, type, protocol)) {
77390 + retval = -EACCES;
77391 + goto out;
77392 + }
77393 +
77394 retval = sock_create(family, type, protocol, &sock);
77395 if (retval < 0)
77396 goto out;
77397 @@ -1451,6 +1466,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
77398 if (sock) {
77399 err = move_addr_to_kernel(umyaddr, addrlen, &address);
77400 if (err >= 0) {
77401 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
77402 + err = -EACCES;
77403 + goto error;
77404 + }
77405 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
77406 + if (err)
77407 + goto error;
77408 +
77409 err = security_socket_bind(sock,
77410 (struct sockaddr *)&address,
77411 addrlen);
77412 @@ -1459,6 +1482,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
77413 (struct sockaddr *)
77414 &address, addrlen);
77415 }
77416 +error:
77417 fput_light(sock->file, fput_needed);
77418 }
77419 return err;
77420 @@ -1482,10 +1506,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
77421 if ((unsigned)backlog > somaxconn)
77422 backlog = somaxconn;
77423
77424 + if (gr_handle_sock_server_other(sock->sk)) {
77425 + err = -EPERM;
77426 + goto error;
77427 + }
77428 +
77429 + err = gr_search_listen(sock);
77430 + if (err)
77431 + goto error;
77432 +
77433 err = security_socket_listen(sock, backlog);
77434 if (!err)
77435 err = sock->ops->listen(sock, backlog);
77436
77437 +error:
77438 fput_light(sock->file, fput_needed);
77439 }
77440 return err;
77441 @@ -1529,6 +1563,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
77442 newsock->type = sock->type;
77443 newsock->ops = sock->ops;
77444
77445 + if (gr_handle_sock_server_other(sock->sk)) {
77446 + err = -EPERM;
77447 + sock_release(newsock);
77448 + goto out_put;
77449 + }
77450 +
77451 + err = gr_search_accept(sock);
77452 + if (err) {
77453 + sock_release(newsock);
77454 + goto out_put;
77455 + }
77456 +
77457 /*
77458 * We don't need try_module_get here, as the listening socket (sock)
77459 * has the protocol module (sock->ops->owner) held.
77460 @@ -1567,6 +1613,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
77461 fd_install(newfd, newfile);
77462 err = newfd;
77463
77464 + gr_attach_curr_ip(newsock->sk);
77465 +
77466 out_put:
77467 fput_light(sock->file, fput_needed);
77468 out:
77469 @@ -1599,6 +1647,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
77470 int, addrlen)
77471 {
77472 struct socket *sock;
77473 + struct sockaddr *sck;
77474 struct sockaddr_storage address;
77475 int err, fput_needed;
77476
77477 @@ -1609,6 +1658,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
77478 if (err < 0)
77479 goto out_put;
77480
77481 + sck = (struct sockaddr *)&address;
77482 +
77483 + if (gr_handle_sock_client(sck)) {
77484 + err = -EACCES;
77485 + goto out_put;
77486 + }
77487 +
77488 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
77489 + if (err)
77490 + goto out_put;
77491 +
77492 err =
77493 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
77494 if (err)
77495 @@ -1966,7 +2026,7 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
77496 * checking falls down on this.
77497 */
77498 if (copy_from_user(ctl_buf,
77499 - (void __user __force *)msg_sys->msg_control,
77500 + (void __force_user *)msg_sys->msg_control,
77501 ctl_len))
77502 goto out_freectl;
77503 msg_sys->msg_control = ctl_buf;
77504 @@ -2136,7 +2196,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
77505 * kernel msghdr to use the kernel address space)
77506 */
77507
77508 - uaddr = (__force void __user *)msg_sys->msg_name;
77509 + uaddr = (void __force_user *)msg_sys->msg_name;
77510 uaddr_len = COMPAT_NAMELEN(msg);
77511 if (MSG_CMSG_COMPAT & flags) {
77512 err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE);
77513 @@ -2758,7 +2818,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
77514 }
77515
77516 ifr = compat_alloc_user_space(buf_size);
77517 - rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8);
77518 + rxnfc = (void __user *)ifr + ALIGN(sizeof(struct ifreq), 8);
77519
77520 if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ))
77521 return -EFAULT;
77522 @@ -2782,12 +2842,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
77523 offsetof(struct ethtool_rxnfc, fs.ring_cookie));
77524
77525 if (copy_in_user(rxnfc, compat_rxnfc,
77526 - (void *)(&rxnfc->fs.m_ext + 1) -
77527 - (void *)rxnfc) ||
77528 + (void __user *)(&rxnfc->fs.m_ext + 1) -
77529 + (void __user *)rxnfc) ||
77530 copy_in_user(&rxnfc->fs.ring_cookie,
77531 &compat_rxnfc->fs.ring_cookie,
77532 - (void *)(&rxnfc->fs.location + 1) -
77533 - (void *)&rxnfc->fs.ring_cookie) ||
77534 + (void __user *)(&rxnfc->fs.location + 1) -
77535 + (void __user *)&rxnfc->fs.ring_cookie) ||
77536 copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
77537 sizeof(rxnfc->rule_cnt)))
77538 return -EFAULT;
77539 @@ -2799,12 +2859,12 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
77540
77541 if (convert_out) {
77542 if (copy_in_user(compat_rxnfc, rxnfc,
77543 - (const void *)(&rxnfc->fs.m_ext + 1) -
77544 - (const void *)rxnfc) ||
77545 + (const void __user *)(&rxnfc->fs.m_ext + 1) -
77546 + (const void __user *)rxnfc) ||
77547 copy_in_user(&compat_rxnfc->fs.ring_cookie,
77548 &rxnfc->fs.ring_cookie,
77549 - (const void *)(&rxnfc->fs.location + 1) -
77550 - (const void *)&rxnfc->fs.ring_cookie) ||
77551 + (const void __user *)(&rxnfc->fs.location + 1) -
77552 + (const void __user *)&rxnfc->fs.ring_cookie) ||
77553 copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt,
77554 sizeof(rxnfc->rule_cnt)))
77555 return -EFAULT;
77556 @@ -2874,7 +2934,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
77557 old_fs = get_fs();
77558 set_fs(KERNEL_DS);
77559 err = dev_ioctl(net, cmd,
77560 - (struct ifreq __user __force *) &kifr);
77561 + (struct ifreq __force_user *) &kifr);
77562 set_fs(old_fs);
77563
77564 return err;
77565 @@ -2983,7 +3043,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
77566
77567 old_fs = get_fs();
77568 set_fs(KERNEL_DS);
77569 - err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
77570 + err = dev_ioctl(net, cmd, (void __force_user *)&ifr);
77571 set_fs(old_fs);
77572
77573 if (cmd == SIOCGIFMAP && !err) {
77574 @@ -3088,7 +3148,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
77575 ret |= __get_user(rtdev, &(ur4->rt_dev));
77576 if (rtdev) {
77577 ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
77578 - r4.rt_dev = (char __user __force *)devname;
77579 + r4.rt_dev = (char __force_user *)devname;
77580 devname[15] = 0;
77581 } else
77582 r4.rt_dev = NULL;
77583 @@ -3314,8 +3374,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
77584 int __user *uoptlen;
77585 int err;
77586
77587 - uoptval = (char __user __force *) optval;
77588 - uoptlen = (int __user __force *) optlen;
77589 + uoptval = (char __force_user *) optval;
77590 + uoptlen = (int __force_user *) optlen;
77591
77592 set_fs(KERNEL_DS);
77593 if (level == SOL_SOCKET)
77594 @@ -3335,7 +3395,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
77595 char __user *uoptval;
77596 int err;
77597
77598 - uoptval = (char __user __force *) optval;
77599 + uoptval = (char __force_user *) optval;
77600
77601 set_fs(KERNEL_DS);
77602 if (level == SOL_SOCKET)
77603 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
77604 index 994cfea..5343b6b 100644
77605 --- a/net/sunrpc/sched.c
77606 +++ b/net/sunrpc/sched.c
77607 @@ -240,9 +240,9 @@ static int rpc_wait_bit_killable(void *word)
77608 #ifdef RPC_DEBUG
77609 static void rpc_task_set_debuginfo(struct rpc_task *task)
77610 {
77611 - static atomic_t rpc_pid;
77612 + static atomic_unchecked_t rpc_pid;
77613
77614 - task->tk_pid = atomic_inc_return(&rpc_pid);
77615 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
77616 }
77617 #else
77618 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
77619 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
77620 index 8343737..677025e 100644
77621 --- a/net/sunrpc/xprtrdma/svc_rdma.c
77622 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
77623 @@ -62,15 +62,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
77624 static unsigned int min_max_inline = 4096;
77625 static unsigned int max_max_inline = 65536;
77626
77627 -atomic_t rdma_stat_recv;
77628 -atomic_t rdma_stat_read;
77629 -atomic_t rdma_stat_write;
77630 -atomic_t rdma_stat_sq_starve;
77631 -atomic_t rdma_stat_rq_starve;
77632 -atomic_t rdma_stat_rq_poll;
77633 -atomic_t rdma_stat_rq_prod;
77634 -atomic_t rdma_stat_sq_poll;
77635 -atomic_t rdma_stat_sq_prod;
77636 +atomic_unchecked_t rdma_stat_recv;
77637 +atomic_unchecked_t rdma_stat_read;
77638 +atomic_unchecked_t rdma_stat_write;
77639 +atomic_unchecked_t rdma_stat_sq_starve;
77640 +atomic_unchecked_t rdma_stat_rq_starve;
77641 +atomic_unchecked_t rdma_stat_rq_poll;
77642 +atomic_unchecked_t rdma_stat_rq_prod;
77643 +atomic_unchecked_t rdma_stat_sq_poll;
77644 +atomic_unchecked_t rdma_stat_sq_prod;
77645
77646 /* Temporary NFS request map and context caches */
77647 struct kmem_cache *svc_rdma_map_cachep;
77648 @@ -110,7 +110,7 @@ static int read_reset_stat(ctl_table *table, int write,
77649 len -= *ppos;
77650 if (len > *lenp)
77651 len = *lenp;
77652 - if (len && copy_to_user(buffer, str_buf, len))
77653 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
77654 return -EFAULT;
77655 *lenp = len;
77656 *ppos += len;
77657 @@ -151,63 +151,63 @@ static ctl_table svcrdma_parm_table[] = {
77658 {
77659 .procname = "rdma_stat_read",
77660 .data = &rdma_stat_read,
77661 - .maxlen = sizeof(atomic_t),
77662 + .maxlen = sizeof(atomic_unchecked_t),
77663 .mode = 0644,
77664 .proc_handler = read_reset_stat,
77665 },
77666 {
77667 .procname = "rdma_stat_recv",
77668 .data = &rdma_stat_recv,
77669 - .maxlen = sizeof(atomic_t),
77670 + .maxlen = sizeof(atomic_unchecked_t),
77671 .mode = 0644,
77672 .proc_handler = read_reset_stat,
77673 },
77674 {
77675 .procname = "rdma_stat_write",
77676 .data = &rdma_stat_write,
77677 - .maxlen = sizeof(atomic_t),
77678 + .maxlen = sizeof(atomic_unchecked_t),
77679 .mode = 0644,
77680 .proc_handler = read_reset_stat,
77681 },
77682 {
77683 .procname = "rdma_stat_sq_starve",
77684 .data = &rdma_stat_sq_starve,
77685 - .maxlen = sizeof(atomic_t),
77686 + .maxlen = sizeof(atomic_unchecked_t),
77687 .mode = 0644,
77688 .proc_handler = read_reset_stat,
77689 },
77690 {
77691 .procname = "rdma_stat_rq_starve",
77692 .data = &rdma_stat_rq_starve,
77693 - .maxlen = sizeof(atomic_t),
77694 + .maxlen = sizeof(atomic_unchecked_t),
77695 .mode = 0644,
77696 .proc_handler = read_reset_stat,
77697 },
77698 {
77699 .procname = "rdma_stat_rq_poll",
77700 .data = &rdma_stat_rq_poll,
77701 - .maxlen = sizeof(atomic_t),
77702 + .maxlen = sizeof(atomic_unchecked_t),
77703 .mode = 0644,
77704 .proc_handler = read_reset_stat,
77705 },
77706 {
77707 .procname = "rdma_stat_rq_prod",
77708 .data = &rdma_stat_rq_prod,
77709 - .maxlen = sizeof(atomic_t),
77710 + .maxlen = sizeof(atomic_unchecked_t),
77711 .mode = 0644,
77712 .proc_handler = read_reset_stat,
77713 },
77714 {
77715 .procname = "rdma_stat_sq_poll",
77716 .data = &rdma_stat_sq_poll,
77717 - .maxlen = sizeof(atomic_t),
77718 + .maxlen = sizeof(atomic_unchecked_t),
77719 .mode = 0644,
77720 .proc_handler = read_reset_stat,
77721 },
77722 {
77723 .procname = "rdma_stat_sq_prod",
77724 .data = &rdma_stat_sq_prod,
77725 - .maxlen = sizeof(atomic_t),
77726 + .maxlen = sizeof(atomic_unchecked_t),
77727 .mode = 0644,
77728 .proc_handler = read_reset_stat,
77729 },
77730 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
77731 index 41cb63b..c4a1489 100644
77732 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
77733 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
77734 @@ -501,7 +501,7 @@ next_sge:
77735 svc_rdma_put_context(ctxt, 0);
77736 goto out;
77737 }
77738 - atomic_inc(&rdma_stat_read);
77739 + atomic_inc_unchecked(&rdma_stat_read);
77740
77741 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
77742 chl_map->ch[ch_no].count -= read_wr.num_sge;
77743 @@ -611,7 +611,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
77744 dto_q);
77745 list_del_init(&ctxt->dto_q);
77746 } else {
77747 - atomic_inc(&rdma_stat_rq_starve);
77748 + atomic_inc_unchecked(&rdma_stat_rq_starve);
77749 clear_bit(XPT_DATA, &xprt->xpt_flags);
77750 ctxt = NULL;
77751 }
77752 @@ -631,7 +631,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
77753 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
77754 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
77755 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
77756 - atomic_inc(&rdma_stat_recv);
77757 + atomic_inc_unchecked(&rdma_stat_recv);
77758
77759 /* Build up the XDR from the receive buffers. */
77760 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
77761 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
77762 index 42eb7ba..c887c45 100644
77763 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
77764 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
77765 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
77766 write_wr.wr.rdma.remote_addr = to;
77767
77768 /* Post It */
77769 - atomic_inc(&rdma_stat_write);
77770 + atomic_inc_unchecked(&rdma_stat_write);
77771 if (svc_rdma_send(xprt, &write_wr))
77772 goto err;
77773 return 0;
77774 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
77775 index 73b428b..5f3f8f3 100644
77776 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
77777 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
77778 @@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
77779 return;
77780
77781 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
77782 - atomic_inc(&rdma_stat_rq_poll);
77783 + atomic_inc_unchecked(&rdma_stat_rq_poll);
77784
77785 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
77786 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
77787 @@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
77788 }
77789
77790 if (ctxt)
77791 - atomic_inc(&rdma_stat_rq_prod);
77792 + atomic_inc_unchecked(&rdma_stat_rq_prod);
77793
77794 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
77795 /*
77796 @@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
77797 return;
77798
77799 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
77800 - atomic_inc(&rdma_stat_sq_poll);
77801 + atomic_inc_unchecked(&rdma_stat_sq_poll);
77802 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
77803 if (wc.status != IB_WC_SUCCESS)
77804 /* Close the transport */
77805 @@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
77806 }
77807
77808 if (ctxt)
77809 - atomic_inc(&rdma_stat_sq_prod);
77810 + atomic_inc_unchecked(&rdma_stat_sq_prod);
77811 }
77812
77813 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
77814 @@ -1266,7 +1266,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
77815 spin_lock_bh(&xprt->sc_lock);
77816 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
77817 spin_unlock_bh(&xprt->sc_lock);
77818 - atomic_inc(&rdma_stat_sq_starve);
77819 + atomic_inc_unchecked(&rdma_stat_sq_starve);
77820
77821 /* See if we can opportunistically reap SQ WR to make room */
77822 sq_cq_reap(xprt);
77823 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
77824 index c3e65ae..f512a2b 100644
77825 --- a/net/sysctl_net.c
77826 +++ b/net/sysctl_net.c
77827 @@ -47,7 +47,7 @@ static int net_ctl_permissions(struct ctl_table_root *root,
77828 struct ctl_table *table)
77829 {
77830 /* Allow network administrator to have same access as root. */
77831 - if (capable(CAP_NET_ADMIN)) {
77832 + if (capable_nolog(CAP_NET_ADMIN)) {
77833 int mode = (table->mode >> 6) & 7;
77834 return (mode << 6) | (mode << 3) | mode;
77835 }
77836 diff --git a/net/tipc/link.c b/net/tipc/link.c
77837 index b4b9b30..5b62131 100644
77838 --- a/net/tipc/link.c
77839 +++ b/net/tipc/link.c
77840 @@ -1203,7 +1203,7 @@ static int link_send_sections_long(struct tipc_port *sender,
77841 struct tipc_msg fragm_hdr;
77842 struct sk_buff *buf, *buf_chain, *prev;
77843 u32 fragm_crs, fragm_rest, hsz, sect_rest;
77844 - const unchar *sect_crs;
77845 + const unchar __user *sect_crs;
77846 int curr_sect;
77847 u32 fragm_no;
77848
77849 @@ -1247,7 +1247,7 @@ again:
77850
77851 if (!sect_rest) {
77852 sect_rest = msg_sect[++curr_sect].iov_len;
77853 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
77854 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
77855 }
77856
77857 if (sect_rest < fragm_rest)
77858 @@ -1266,7 +1266,7 @@ error:
77859 }
77860 } else
77861 skb_copy_to_linear_data_offset(buf, fragm_crs,
77862 - sect_crs, sz);
77863 + (const void __force_kernel *)sect_crs, sz);
77864 sect_crs += sz;
77865 sect_rest -= sz;
77866 fragm_crs += sz;
77867 diff --git a/net/tipc/msg.c b/net/tipc/msg.c
77868 index e3afe16..333ea83 100644
77869 --- a/net/tipc/msg.c
77870 +++ b/net/tipc/msg.c
77871 @@ -99,7 +99,7 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
77872 msg_sect[cnt].iov_len);
77873 else
77874 skb_copy_to_linear_data_offset(*buf, pos,
77875 - msg_sect[cnt].iov_base,
77876 + (const void __force_kernel *)msg_sect[cnt].iov_base,
77877 msg_sect[cnt].iov_len);
77878 pos += msg_sect[cnt].iov_len;
77879 }
77880 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
77881 index b2964e9..fdf2e27 100644
77882 --- a/net/tipc/subscr.c
77883 +++ b/net/tipc/subscr.c
77884 @@ -101,7 +101,7 @@ static void subscr_send_event(struct tipc_subscription *sub,
77885 {
77886 struct iovec msg_sect;
77887
77888 - msg_sect.iov_base = (void *)&sub->evt;
77889 + msg_sect.iov_base = (void __force_user *)&sub->evt;
77890 msg_sect.iov_len = sizeof(struct tipc_event);
77891
77892 sub->evt.event = htohl(event, sub->swap);
77893 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
77894 index d510353..26c8a32 100644
77895 --- a/net/unix/af_unix.c
77896 +++ b/net/unix/af_unix.c
77897 @@ -779,6 +779,12 @@ static struct sock *unix_find_other(struct net *net,
77898 err = -ECONNREFUSED;
77899 if (!S_ISSOCK(inode->i_mode))
77900 goto put_fail;
77901 +
77902 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
77903 + err = -EACCES;
77904 + goto put_fail;
77905 + }
77906 +
77907 u = unix_find_socket_byinode(inode);
77908 if (!u)
77909 goto put_fail;
77910 @@ -799,6 +805,13 @@ static struct sock *unix_find_other(struct net *net,
77911 if (u) {
77912 struct dentry *dentry;
77913 dentry = unix_sk(u)->path.dentry;
77914 +
77915 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
77916 + err = -EPERM;
77917 + sock_put(u);
77918 + goto fail;
77919 + }
77920 +
77921 if (dentry)
77922 touch_atime(&unix_sk(u)->path);
77923 } else
77924 @@ -881,11 +894,18 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
77925 err = security_path_mknod(&path, dentry, mode, 0);
77926 if (err)
77927 goto out_mknod_drop_write;
77928 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
77929 + err = -EACCES;
77930 + goto out_mknod_drop_write;
77931 + }
77932 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
77933 out_mknod_drop_write:
77934 mnt_drop_write(path.mnt);
77935 if (err)
77936 goto out_mknod_dput;
77937 +
77938 + gr_handle_create(dentry, path.mnt);
77939 +
77940 mutex_unlock(&path.dentry->d_inode->i_mutex);
77941 dput(path.dentry);
77942 path.dentry = dentry;
77943 diff --git a/net/wireless/core.h b/net/wireless/core.h
77944 index 3ac2dd0..fbe533e 100644
77945 --- a/net/wireless/core.h
77946 +++ b/net/wireless/core.h
77947 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
77948 struct mutex mtx;
77949
77950 /* rfkill support */
77951 - struct rfkill_ops rfkill_ops;
77952 + rfkill_ops_no_const rfkill_ops;
77953 struct rfkill *rfkill;
77954 struct work_struct rfkill_sync;
77955
77956 diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
77957 index af648e0..6185d3a 100644
77958 --- a/net/wireless/wext-core.c
77959 +++ b/net/wireless/wext-core.c
77960 @@ -747,8 +747,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
77961 */
77962
77963 /* Support for very large requests */
77964 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
77965 - (user_length > descr->max_tokens)) {
77966 + if (user_length > descr->max_tokens) {
77967 /* Allow userspace to GET more than max so
77968 * we can support any size GET requests.
77969 * There is still a limit : -ENOMEM.
77970 @@ -787,22 +786,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
77971 }
77972 }
77973
77974 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
77975 - /*
77976 - * If this is a GET, but not NOMAX, it means that the extra
77977 - * data is not bounded by userspace, but by max_tokens. Thus
77978 - * set the length to max_tokens. This matches the extra data
77979 - * allocation.
77980 - * The driver should fill it with the number of tokens it
77981 - * provided, and it may check iwp->length rather than having
77982 - * knowledge of max_tokens. If the driver doesn't change the
77983 - * iwp->length, this ioctl just copies back max_token tokens
77984 - * filled with zeroes. Hopefully the driver isn't claiming
77985 - * them to be valid data.
77986 - */
77987 - iwp->length = descr->max_tokens;
77988 - }
77989 -
77990 err = handler(dev, info, (union iwreq_data *) iwp, extra);
77991
77992 iwp->length += essid_compat;
77993 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
77994 index a15d2a0..12142af 100644
77995 --- a/net/xfrm/xfrm_policy.c
77996 +++ b/net/xfrm/xfrm_policy.c
77997 @@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
77998 {
77999 policy->walk.dead = 1;
78000
78001 - atomic_inc(&policy->genid);
78002 + atomic_inc_unchecked(&policy->genid);
78003
78004 if (del_timer(&policy->timer))
78005 xfrm_pol_put(policy);
78006 @@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
78007 hlist_add_head(&policy->bydst, chain);
78008 xfrm_pol_hold(policy);
78009 net->xfrm.policy_count[dir]++;
78010 - atomic_inc(&flow_cache_genid);
78011 + atomic_inc_unchecked(&flow_cache_genid);
78012 if (delpol)
78013 __xfrm_policy_unlink(delpol, dir);
78014 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
78015 @@ -1530,7 +1530,7 @@ free_dst:
78016 goto out;
78017 }
78018
78019 -static int inline
78020 +static inline int
78021 xfrm_dst_alloc_copy(void **target, const void *src, int size)
78022 {
78023 if (!*target) {
78024 @@ -1542,7 +1542,7 @@ xfrm_dst_alloc_copy(void **target, const void *src, int size)
78025 return 0;
78026 }
78027
78028 -static int inline
78029 +static inline int
78030 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
78031 {
78032 #ifdef CONFIG_XFRM_SUB_POLICY
78033 @@ -1554,7 +1554,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
78034 #endif
78035 }
78036
78037 -static int inline
78038 +static inline int
78039 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
78040 {
78041 #ifdef CONFIG_XFRM_SUB_POLICY
78042 @@ -1648,7 +1648,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
78043
78044 xdst->num_pols = num_pols;
78045 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
78046 - xdst->policy_genid = atomic_read(&pols[0]->genid);
78047 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
78048
78049 return xdst;
78050 }
78051 @@ -2348,7 +2348,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
78052 if (xdst->xfrm_genid != dst->xfrm->genid)
78053 return 0;
78054 if (xdst->num_pols > 0 &&
78055 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
78056 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
78057 return 0;
78058
78059 mtu = dst_mtu(dst->child);
78060 @@ -2885,7 +2885,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
78061 sizeof(pol->xfrm_vec[i].saddr));
78062 pol->xfrm_vec[i].encap_family = mp->new_family;
78063 /* flush bundles */
78064 - atomic_inc(&pol->genid);
78065 + atomic_inc_unchecked(&pol->genid);
78066 }
78067 }
78068
78069 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
78070 index ff1720d..ed8475e 100644
78071 --- a/scripts/Makefile.build
78072 +++ b/scripts/Makefile.build
78073 @@ -111,7 +111,7 @@ endif
78074 endif
78075
78076 # Do not include host rules unless needed
78077 -ifneq ($(hostprogs-y)$(hostprogs-m),)
78078 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m)$(hostcxxlibs-y)$(hostcxxlibs-m),)
78079 include scripts/Makefile.host
78080 endif
78081
78082 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
78083 index 686cb0d..9d653bf 100644
78084 --- a/scripts/Makefile.clean
78085 +++ b/scripts/Makefile.clean
78086 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
78087 __clean-files := $(extra-y) $(always) \
78088 $(targets) $(clean-files) \
78089 $(host-progs) \
78090 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
78091 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
78092 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
78093
78094 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
78095
78096 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
78097 index 1ac414f..38575f7 100644
78098 --- a/scripts/Makefile.host
78099 +++ b/scripts/Makefile.host
78100 @@ -31,6 +31,8 @@
78101 # Note: Shared libraries consisting of C++ files are not supported
78102
78103 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
78104 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
78105 +__hostcxxlibs := $(sort $(hostcxxlibs-y) $(hostcxxlibs-m))
78106
78107 # C code
78108 # Executables compiled from a single .c file
78109 @@ -54,11 +56,15 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
78110 # Shared libaries (only .c supported)
78111 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
78112 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
78113 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
78114 +host-cxxshlib := $(sort $(filter %.so, $(__hostcxxlibs)))
78115 # Remove .so files from "xxx-objs"
78116 host-cobjs := $(filter-out %.so,$(host-cobjs))
78117 +host-cxxobjs := $(filter-out %.so,$(host-cxxobjs))
78118
78119 -#Object (.o) files used by the shared libaries
78120 +# Object (.o) files used by the shared libaries
78121 host-cshobjs := $(sort $(foreach m,$(host-cshlib),$($(m:.so=-objs))))
78122 +host-cxxshobjs := $(sort $(foreach m,$(host-cxxshlib),$($(m:.so=-objs))))
78123
78124 # output directory for programs/.o files
78125 # hostprogs-y := tools/build may have been specified. Retrieve directory
78126 @@ -82,7 +88,9 @@ host-cobjs := $(addprefix $(obj)/,$(host-cobjs))
78127 host-cxxmulti := $(addprefix $(obj)/,$(host-cxxmulti))
78128 host-cxxobjs := $(addprefix $(obj)/,$(host-cxxobjs))
78129 host-cshlib := $(addprefix $(obj)/,$(host-cshlib))
78130 +host-cxxshlib := $(addprefix $(obj)/,$(host-cxxshlib))
78131 host-cshobjs := $(addprefix $(obj)/,$(host-cshobjs))
78132 +host-cxxshobjs := $(addprefix $(obj)/,$(host-cxxshobjs))
78133 host-objdirs := $(addprefix $(obj)/,$(host-objdirs))
78134
78135 obj-dirs += $(host-objdirs)
78136 @@ -156,6 +164,13 @@ quiet_cmd_host-cshobjs = HOSTCC -fPIC $@
78137 $(host-cshobjs): $(obj)/%.o: $(src)/%.c FORCE
78138 $(call if_changed_dep,host-cshobjs)
78139
78140 +# Compile .c file, create position independent .o file
78141 +# host-cxxshobjs -> .o
78142 +quiet_cmd_host-cxxshobjs = HOSTCXX -fPIC $@
78143 + cmd_host-cxxshobjs = $(HOSTCXX) $(hostcxx_flags) -fPIC -c -o $@ $<
78144 +$(host-cxxshobjs): $(obj)/%.o: $(src)/%.c FORCE
78145 + $(call if_changed_dep,host-cxxshobjs)
78146 +
78147 # Link a shared library, based on position independent .o files
78148 # *.o -> .so shared library (host-cshlib)
78149 quiet_cmd_host-cshlib = HOSTLLD -shared $@
78150 @@ -165,6 +180,15 @@ quiet_cmd_host-cshlib = HOSTLLD -shared $@
78151 $(host-cshlib): $(obj)/%: $(host-cshobjs) FORCE
78152 $(call if_changed,host-cshlib)
78153
78154 +# Link a shared library, based on position independent .o files
78155 +# *.o -> .so shared library (host-cxxshlib)
78156 +quiet_cmd_host-cxxshlib = HOSTLLD -shared $@
78157 + cmd_host-cxxshlib = $(HOSTCXX) $(HOSTLDFLAGS) -shared -o $@ \
78158 + $(addprefix $(obj)/,$($(@F:.so=-objs))) \
78159 + $(HOST_LOADLIBES) $(HOSTLOADLIBES_$(@F))
78160 +$(host-cxxshlib): $(obj)/%: $(host-cxxshobjs) FORCE
78161 + $(call if_changed,host-cxxshlib)
78162 +
78163 targets += $(host-csingle) $(host-cmulti) $(host-cobjs)\
78164 - $(host-cxxmulti) $(host-cxxobjs) $(host-cshlib) $(host-cshobjs)
78165 + $(host-cxxmulti) $(host-cxxobjs) $(host-cshlib) $(host-cshobjs) $(host-cxxshlib) $(host-cxxshobjs)
78166
78167 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
78168 index cb1f50c..cef2a7c 100644
78169 --- a/scripts/basic/fixdep.c
78170 +++ b/scripts/basic/fixdep.c
78171 @@ -161,7 +161,7 @@ static unsigned int strhash(const char *str, unsigned int sz)
78172 /*
78173 * Lookup a value in the configuration string.
78174 */
78175 -static int is_defined_config(const char *name, int len, unsigned int hash)
78176 +static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
78177 {
78178 struct item *aux;
78179
78180 @@ -211,10 +211,10 @@ static void clear_config(void)
78181 /*
78182 * Record the use of a CONFIG_* word.
78183 */
78184 -static void use_config(const char *m, int slen)
78185 +static void use_config(const char *m, unsigned int slen)
78186 {
78187 unsigned int hash = strhash(m, slen);
78188 - int c, i;
78189 + unsigned int c, i;
78190
78191 if (is_defined_config(m, slen, hash))
78192 return;
78193 @@ -235,9 +235,9 @@ static void use_config(const char *m, int slen)
78194
78195 static void parse_config_file(const char *map, size_t len)
78196 {
78197 - const int *end = (const int *) (map + len);
78198 + const unsigned int *end = (const unsigned int *) (map + len);
78199 /* start at +1, so that p can never be < map */
78200 - const int *m = (const int *) map + 1;
78201 + const unsigned int *m = (const unsigned int *) map + 1;
78202 const char *p, *q;
78203
78204 for (; m < end; m++) {
78205 @@ -406,7 +406,7 @@ static void print_deps(void)
78206 static void traps(void)
78207 {
78208 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
78209 - int *p = (int *)test;
78210 + unsigned int *p = (unsigned int *)test;
78211
78212 if (*p != INT_CONF) {
78213 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
78214 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
78215 new file mode 100644
78216 index 0000000..008ac1a
78217 --- /dev/null
78218 +++ b/scripts/gcc-plugin.sh
78219 @@ -0,0 +1,17 @@
78220 +#!/bin/bash
78221 +plugincc=`$1 -x c -shared - -o /dev/null -I\`$3 -print-file-name=plugin\`/include 2>&1 <<EOF
78222 +#include "gcc-plugin.h"
78223 +#include "tree.h"
78224 +#include "tm.h"
78225 +#include "rtl.h"
78226 +#ifdef ENABLE_BUILD_WITH_CXX
78227 +#warning $2
78228 +#else
78229 +#warning $1
78230 +#endif
78231 +EOF`
78232 +if [ $? -eq 0 ]
78233 +then
78234 + [[ "$plugincc" =~ "$1" ]] && echo "$1"
78235 + [[ "$plugincc" =~ "$2" ]] && echo "$2"
78236 +fi
78237 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
78238 index 44ddaa5..a3119bd 100644
78239 --- a/scripts/mod/file2alias.c
78240 +++ b/scripts/mod/file2alias.c
78241 @@ -128,7 +128,7 @@ static void device_id_check(const char *modname, const char *device_id,
78242 unsigned long size, unsigned long id_size,
78243 void *symval)
78244 {
78245 - int i;
78246 + unsigned int i;
78247
78248 if (size % id_size || size < id_size) {
78249 if (cross_build != 0)
78250 @@ -158,7 +158,7 @@ static void device_id_check(const char *modname, const char *device_id,
78251 /* USB is special because the bcdDevice can be matched against a numeric range */
78252 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
78253 static void do_usb_entry(struct usb_device_id *id,
78254 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
78255 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
78256 unsigned char range_lo, unsigned char range_hi,
78257 unsigned char max, struct module *mod)
78258 {
78259 @@ -259,7 +259,7 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
78260 {
78261 unsigned int devlo, devhi;
78262 unsigned char chi, clo, max;
78263 - int ndigits;
78264 + unsigned int ndigits;
78265
78266 id->match_flags = TO_NATIVE(id->match_flags);
78267 id->idVendor = TO_NATIVE(id->idVendor);
78268 @@ -501,7 +501,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
78269 for (i = 0; i < count; i++) {
78270 const char *id = (char *)devs[i].id;
78271 char acpi_id[sizeof(devs[0].id)];
78272 - int j;
78273 + unsigned int j;
78274
78275 buf_printf(&mod->dev_table_buf,
78276 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
78277 @@ -531,7 +531,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
78278
78279 for (j = 0; j < PNP_MAX_DEVICES; j++) {
78280 const char *id = (char *)card->devs[j].id;
78281 - int i2, j2;
78282 + unsigned int i2, j2;
78283 int dup = 0;
78284
78285 if (!id[0])
78286 @@ -557,7 +557,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
78287 /* add an individual alias for every device entry */
78288 if (!dup) {
78289 char acpi_id[sizeof(card->devs[0].id)];
78290 - int k;
78291 + unsigned int k;
78292
78293 buf_printf(&mod->dev_table_buf,
78294 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
78295 @@ -882,7 +882,7 @@ static void dmi_ascii_filter(char *d, const char *s)
78296 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
78297 char *alias)
78298 {
78299 - int i, j;
78300 + unsigned int i, j;
78301
78302 sprintf(alias, "dmi*");
78303
78304 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
78305 index c4e7d15..dad16c1 100644
78306 --- a/scripts/mod/modpost.c
78307 +++ b/scripts/mod/modpost.c
78308 @@ -922,6 +922,7 @@ enum mismatch {
78309 ANY_INIT_TO_ANY_EXIT,
78310 ANY_EXIT_TO_ANY_INIT,
78311 EXPORT_TO_INIT_EXIT,
78312 + DATA_TO_TEXT
78313 };
78314
78315 struct sectioncheck {
78316 @@ -1030,6 +1031,12 @@ const struct sectioncheck sectioncheck[] = {
78317 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
78318 .mismatch = EXPORT_TO_INIT_EXIT,
78319 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
78320 +},
78321 +/* Do not reference code from writable data */
78322 +{
78323 + .fromsec = { DATA_SECTIONS, NULL },
78324 + .tosec = { TEXT_SECTIONS, NULL },
78325 + .mismatch = DATA_TO_TEXT
78326 }
78327 };
78328
78329 @@ -1152,10 +1159,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
78330 continue;
78331 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
78332 continue;
78333 - if (sym->st_value == addr)
78334 - return sym;
78335 /* Find a symbol nearby - addr are maybe negative */
78336 d = sym->st_value - addr;
78337 + if (d == 0)
78338 + return sym;
78339 if (d < 0)
78340 d = addr - sym->st_value;
78341 if (d < distance) {
78342 @@ -1434,6 +1441,14 @@ static void report_sec_mismatch(const char *modname,
78343 tosym, prl_to, prl_to, tosym);
78344 free(prl_to);
78345 break;
78346 + case DATA_TO_TEXT:
78347 +#if 0
78348 + fprintf(stderr,
78349 + "The %s %s:%s references\n"
78350 + "the %s %s:%s%s\n",
78351 + from, fromsec, fromsym, to, tosec, tosym, to_p);
78352 +#endif
78353 + break;
78354 }
78355 fprintf(stderr, "\n");
78356 }
78357 @@ -1668,7 +1683,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
78358 static void check_sec_ref(struct module *mod, const char *modname,
78359 struct elf_info *elf)
78360 {
78361 - int i;
78362 + unsigned int i;
78363 Elf_Shdr *sechdrs = elf->sechdrs;
78364
78365 /* Walk through all sections */
78366 @@ -1766,7 +1781,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
78367 va_end(ap);
78368 }
78369
78370 -void buf_write(struct buffer *buf, const char *s, int len)
78371 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
78372 {
78373 if (buf->size - buf->pos < len) {
78374 buf->size += len + SZ;
78375 @@ -1984,7 +1999,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
78376 if (fstat(fileno(file), &st) < 0)
78377 goto close_write;
78378
78379 - if (st.st_size != b->pos)
78380 + if (st.st_size != (off_t)b->pos)
78381 goto close_write;
78382
78383 tmp = NOFAIL(malloc(b->pos));
78384 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
78385 index 51207e4..f7d603d 100644
78386 --- a/scripts/mod/modpost.h
78387 +++ b/scripts/mod/modpost.h
78388 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
78389
78390 struct buffer {
78391 char *p;
78392 - int pos;
78393 - int size;
78394 + unsigned int pos;
78395 + unsigned int size;
78396 };
78397
78398 void __attribute__((format(printf, 2, 3)))
78399 buf_printf(struct buffer *buf, const char *fmt, ...);
78400
78401 void
78402 -buf_write(struct buffer *buf, const char *s, int len);
78403 +buf_write(struct buffer *buf, const char *s, unsigned int len);
78404
78405 struct module {
78406 struct module *next;
78407 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
78408 index 9dfcd6d..099068e 100644
78409 --- a/scripts/mod/sumversion.c
78410 +++ b/scripts/mod/sumversion.c
78411 @@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum,
78412 goto out;
78413 }
78414
78415 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
78416 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
78417 warn("writing sum in %s failed: %s\n",
78418 filename, strerror(errno));
78419 goto out;
78420 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
78421 index 5c11312..72742b5 100644
78422 --- a/scripts/pnmtologo.c
78423 +++ b/scripts/pnmtologo.c
78424 @@ -237,14 +237,14 @@ static void write_header(void)
78425 fprintf(out, " * Linux logo %s\n", logoname);
78426 fputs(" */\n\n", out);
78427 fputs("#include <linux/linux_logo.h>\n\n", out);
78428 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
78429 + fprintf(out, "static unsigned char %s_data[] = {\n",
78430 logoname);
78431 }
78432
78433 static void write_footer(void)
78434 {
78435 fputs("\n};\n\n", out);
78436 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
78437 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
78438 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
78439 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
78440 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
78441 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
78442 fputs("\n};\n\n", out);
78443
78444 /* write logo clut */
78445 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
78446 + fprintf(out, "static unsigned char %s_clut[] = {\n",
78447 logoname);
78448 write_hex_cnt = 0;
78449 for (i = 0; i < logo_clutsize; i++) {
78450 diff --git a/security/Kconfig b/security/Kconfig
78451 index ccc61f8..00dd2a2 100644
78452 --- a/security/Kconfig
78453 +++ b/security/Kconfig
78454 @@ -4,6 +4,870 @@
78455
78456 menu "Security options"
78457
78458 +menu "Grsecurity"
78459 +
78460 + config ARCH_TRACK_EXEC_LIMIT
78461 + bool
78462 +
78463 + config PAX_KERNEXEC_PLUGIN
78464 + bool
78465 +
78466 + config PAX_PER_CPU_PGD
78467 + bool
78468 +
78469 + config TASK_SIZE_MAX_SHIFT
78470 + int
78471 + depends on X86_64
78472 + default 47 if !PAX_PER_CPU_PGD
78473 + default 42 if PAX_PER_CPU_PGD
78474 +
78475 + config PAX_ENABLE_PAE
78476 + bool
78477 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
78478 +
78479 +config GRKERNSEC
78480 + bool "Grsecurity"
78481 + select CRYPTO
78482 + select CRYPTO_SHA256
78483 + select STOP_MACHINE
78484 + help
78485 + If you say Y here, you will be able to configure many features
78486 + that will enhance the security of your system. It is highly
78487 + recommended that you say Y here and read through the help
78488 + for each option so that you fully understand the features and
78489 + can evaluate their usefulness for your machine.
78490 +
78491 +choice
78492 + prompt "Configuration Method"
78493 + depends on GRKERNSEC
78494 + default GRKERNSEC_CONFIG_CUSTOM
78495 + help
78496 +
78497 +config GRKERNSEC_CONFIG_AUTO
78498 + bool "Automatic"
78499 + help
78500 + If you choose this configuration method, you'll be able to answer a small
78501 + number of simple questions about how you plan to use this kernel.
78502 + The settings of grsecurity and PaX will be automatically configured for
78503 + the highest commonly-used settings within the provided constraints.
78504 +
78505 + If you require additional configuration, custom changes can still be made
78506 + from the "custom configuration" menu.
78507 +
78508 +config GRKERNSEC_CONFIG_CUSTOM
78509 + bool "Custom"
78510 + help
78511 + If you choose this configuration method, you'll be able to configure all
78512 + grsecurity and PaX settings manually. Via this method, no options are
78513 + automatically enabled.
78514 +
78515 +endchoice
78516 +
78517 +choice
78518 + prompt "Usage Type"
78519 + depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
78520 + default GRKERNSEC_CONFIG_SERVER
78521 + help
78522 +
78523 +config GRKERNSEC_CONFIG_SERVER
78524 + bool "Server"
78525 + help
78526 + Choose this option if you plan to use this kernel on a server.
78527 +
78528 +config GRKERNSEC_CONFIG_DESKTOP
78529 + bool "Desktop"
78530 + help
78531 + Choose this option if you plan to use this kernel on a desktop.
78532 +
78533 +endchoice
78534 +
78535 +choice
78536 + prompt "Virtualization Type"
78537 + depends on (GRKERNSEC && X86 && GRKERNSEC_CONFIG_AUTO)
78538 + default GRKERNSEC_CONFIG_VIRT_NONE
78539 + help
78540 +
78541 +config GRKERNSEC_CONFIG_VIRT_NONE
78542 + bool "None"
78543 + help
78544 + Choose this option if this kernel will be run on bare metal.
78545 +
78546 +config GRKERNSEC_CONFIG_VIRT_GUEST
78547 + bool "Guest"
78548 + help
78549 + Choose this option if this kernel will be run as a VM guest.
78550 +
78551 +config GRKERNSEC_CONFIG_VIRT_HOST
78552 + bool "Host"
78553 + help
78554 + Choose this option if this kernel will be run as a VM host.
78555 +
78556 +endchoice
78557 +
78558 +choice
78559 + prompt "Virtualization Hardware"
78560 + depends on (GRKERNSEC && X86 && GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_GUEST || GRKERNSEC_CONFIG_VIRT_HOST))
78561 + help
78562 +
78563 +config GRKERNSEC_CONFIG_VIRT_EPT
78564 + bool "EPT/RVI Processor Support"
78565 + depends on X86
78566 + help
78567 + Choose this option if your CPU supports the EPT or RVI features of 2nd-gen
78568 + hardware virtualization. This allows for additional kernel hardening protections
78569 + to operate without additional performance impact.
78570 +
78571 + To see if your Intel processor supports EPT, see:
78572 + http://ark.intel.com/Products/VirtualizationTechnology
78573 + (Most Core i3/5/7 support EPT)
78574 +
78575 + To see if your AMD processor supports RVI, see:
78576 + http://support.amd.com/us/kbarticles/Pages/GPU120AMDRVICPUsHyperVWin8.aspx
78577 +
78578 +config GRKERNSEC_CONFIG_VIRT_SOFT
78579 + bool "First-gen/No Hardware Virtualization"
78580 + help
78581 + Choose this option if you use an Atom/Pentium/Core 2 processor that either doesn't
78582 + support hardware virtualization or doesn't support the EPT/RVI extensions.
78583 +
78584 +endchoice
78585 +
78586 +choice
78587 + prompt "Virtualization Software"
78588 + depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_GUEST || GRKERNSEC_CONFIG_VIRT_HOST))
78589 + help
78590 +
78591 +config GRKERNSEC_CONFIG_VIRT_XEN
78592 + bool "Xen"
78593 + help
78594 + Choose this option if this kernel is running as a Xen guest or host.
78595 +
78596 +config GRKERNSEC_CONFIG_VIRT_VMWARE
78597 + bool "VMWare"
78598 + help
78599 + Choose this option if this kernel is running as a VMWare guest or host.
78600 +
78601 +config GRKERNSEC_CONFIG_VIRT_KVM
78602 + bool "KVM"
78603 + help
78604 + Choose this option if this kernel is running as a KVM guest or host.
78605 +
78606 +config GRKERNSEC_CONFIG_VIRT_VIRTUALBOX
78607 + bool "VirtualBox"
78608 + help
78609 + Choose this option if this kernel is running as a VirtualBox guest or host.
78610 +
78611 +endchoice
78612 +
78613 +choice
78614 + prompt "Required Priorities"
78615 + depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
78616 + default GRKERNSEC_CONFIG_PRIORITY_PERF
78617 + help
78618 +
78619 +config GRKERNSEC_CONFIG_PRIORITY_PERF
78620 + bool "Performance"
78621 + help
78622 + Choose this option if performance is of highest priority for this deployment
78623 + of grsecurity. Features like UDEREF on a 64bit kernel, kernel stack clearing,
78624 + and freed memory sanitizing will be disabled.
78625 +
78626 +config GRKERNSEC_CONFIG_PRIORITY_SECURITY
78627 + bool "Security"
78628 + help
78629 + Choose this option if security is of highest priority for this deployment of
78630 + grsecurity. UDEREF, kernel stack clearing, and freed memory sanitizing will
78631 + be enabled for this kernel. In a worst-case scenario, these features can
78632 + introduce a 20% performance hit (UDEREF on x64 contributing half of this hit).
78633 +
78634 +endchoice
78635 +
78636 +menu "Default Special Groups"
78637 +depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
78638 +
78639 +config GRKERNSEC_PROC_GID
78640 + int "GID exempted from /proc restrictions"
78641 + default 1001
78642 + help
78643 + Setting this GID determines which group will be exempted from
78644 + grsecurity's /proc restrictions, allowing users of the specified
78645 + group to view network statistics and the existence of other users'
78646 + processes on the system.
78647 +
78648 +config GRKERNSEC_TPE_GID
78649 + int "GID for untrusted users"
78650 + depends on GRKERNSEC_CONFIG_SERVER
78651 + default 1005
78652 + help
78653 + Setting this GID determines which group untrusted users should
78654 + be added to. These users will be placed under grsecurity's Trusted Path
78655 + Execution mechanism, preventing them from executing their own binaries.
78656 + The users will only be able to execute binaries in directories owned and
78657 + writable only by the root user.
78658 +
78659 +config GRKERNSEC_SYMLINKOWN_GID
78660 + int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
78661 + depends on GRKERNSEC_CONFIG_SERVER
78662 + default 1006
78663 + help
78664 + Setting this GID determines what group kernel-enforced
78665 + SymlinksIfOwnerMatch will be enabled for. If the sysctl option
78666 + is enabled, a sysctl option with name "symlinkown_gid" is created.
78667 +
78668 +
78669 +endmenu
78670 +
78671 +menu "Customize Configuration"
78672 +depends on GRKERNSEC
78673 +
78674 +menu "PaX"
78675 +
78676 +config PAX
78677 + bool "Enable various PaX features"
78678 + default y if GRKERNSEC_CONFIG_AUTO
78679 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
78680 + help
78681 + This allows you to enable various PaX features. PaX adds
78682 + intrusion prevention mechanisms to the kernel that reduce
78683 + the risks posed by exploitable memory corruption bugs.
78684 +
78685 +menu "PaX Control"
78686 + depends on PAX
78687 +
78688 +config PAX_SOFTMODE
78689 + bool 'Support soft mode'
78690 + help
78691 + Enabling this option will allow you to run PaX in soft mode, that
78692 + is, PaX features will not be enforced by default, only on executables
78693 + marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
78694 + support as they are the only way to mark executables for soft mode use.
78695 +
78696 + Soft mode can be activated by using the "pax_softmode=1" kernel command
78697 + line option on boot. Furthermore you can control various PaX features
78698 + at runtime via the entries in /proc/sys/kernel/pax.
78699 +
78700 +config PAX_EI_PAX
78701 + bool 'Use legacy ELF header marking'
78702 + default y if GRKERNSEC_CONFIG_AUTO
78703 + help
78704 + Enabling this option will allow you to control PaX features on
78705 + a per executable basis via the 'chpax' utility available at
78706 + http://pax.grsecurity.net/. The control flags will be read from
78707 + an otherwise reserved part of the ELF header. This marking has
78708 + numerous drawbacks (no support for soft-mode, toolchain does not
78709 + know about the non-standard use of the ELF header) therefore it
78710 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
78711 + support.
78712 +
78713 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
78714 + support as well, they will override the legacy EI_PAX marks.
78715 +
78716 + If you enable none of the marking options then all applications
78717 + will run with PaX enabled on them by default.
78718 +
78719 +config PAX_PT_PAX_FLAGS
78720 + bool 'Use ELF program header marking'
78721 + default y if GRKERNSEC_CONFIG_AUTO
78722 + help
78723 + Enabling this option will allow you to control PaX features on
78724 + a per executable basis via the 'paxctl' utility available at
78725 + http://pax.grsecurity.net/. The control flags will be read from
78726 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
78727 + has the benefits of supporting both soft mode and being fully
78728 + integrated into the toolchain (the binutils patch is available
78729 + from http://pax.grsecurity.net).
78730 +
78731 + Note that if you enable the legacy EI_PAX marking support as well,
78732 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
78733 +
78734 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
78735 + must make sure that the marks are the same if a binary has both marks.
78736 +
78737 + If you enable none of the marking options then all applications
78738 + will run with PaX enabled on them by default.
78739 +
78740 +config PAX_XATTR_PAX_FLAGS
78741 + bool 'Use filesystem extended attributes marking'
78742 + default y if GRKERNSEC_CONFIG_AUTO
78743 + select CIFS_XATTR if CIFS
78744 + select EXT2_FS_XATTR if EXT2_FS
78745 + select EXT3_FS_XATTR if EXT3_FS
78746 + select EXT4_FS_XATTR if EXT4_FS
78747 + select JFFS2_FS_XATTR if JFFS2_FS
78748 + select REISERFS_FS_XATTR if REISERFS_FS
78749 + select SQUASHFS_XATTR if SQUASHFS
78750 + select TMPFS_XATTR if TMPFS
78751 + select UBIFS_FS_XATTR if UBIFS_FS
78752 + help
78753 + Enabling this option will allow you to control PaX features on
78754 + a per executable basis via the 'setfattr' utility. The control
78755 + flags will be read from the user.pax.flags extended attribute of
78756 + the file. This marking has the benefit of supporting binary-only
78757 + applications that self-check themselves (e.g., skype) and would
78758 + not tolerate chpax/paxctl changes. The main drawback is that
78759 + extended attributes are not supported by some filesystems (e.g.,
78760 + isofs, udf, vfat) so copying files through such filesystems will
78761 + lose the extended attributes and these PaX markings.
78762 +
78763 + Note that if you enable the legacy EI_PAX marking support as well,
78764 + the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
78765 +
78766 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
78767 + must make sure that the marks are the same if a binary has both marks.
78768 +
78769 + If you enable none of the marking options then all applications
78770 + will run with PaX enabled on them by default.
78771 +
78772 +choice
78773 + prompt 'MAC system integration'
78774 + default PAX_HAVE_ACL_FLAGS
78775 + help
78776 + Mandatory Access Control systems have the option of controlling
78777 + PaX flags on a per executable basis, choose the method supported
78778 + by your particular system.
78779 +
78780 + - "none": if your MAC system does not interact with PaX,
78781 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
78782 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
78783 +
78784 + NOTE: this option is for developers/integrators only.
78785 +
78786 + config PAX_NO_ACL_FLAGS
78787 + bool 'none'
78788 +
78789 + config PAX_HAVE_ACL_FLAGS
78790 + bool 'direct'
78791 +
78792 + config PAX_HOOK_ACL_FLAGS
78793 + bool 'hook'
78794 +endchoice
78795 +
78796 +endmenu
78797 +
78798 +menu "Non-executable pages"
78799 + depends on PAX
78800 +
78801 +config PAX_NOEXEC
78802 + bool "Enforce non-executable pages"
78803 + default y if GRKERNSEC_CONFIG_AUTO
78804 + depends on ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
78805 + help
78806 + By design some architectures do not allow for protecting memory
78807 + pages against execution or even if they do, Linux does not make
78808 + use of this feature. In practice this means that if a page is
78809 + readable (such as the stack or heap) it is also executable.
78810 +
78811 + There is a well known exploit technique that makes use of this
78812 + fact and a common programming mistake where an attacker can
78813 + introduce code of his choice somewhere in the attacked program's
78814 + memory (typically the stack or the heap) and then execute it.
78815 +
78816 + If the attacked program was running with different (typically
78817 + higher) privileges than that of the attacker, then he can elevate
78818 + his own privilege level (e.g. get a root shell, write to files for
78819 + which he does not have write access to, etc).
78820 +
78821 + Enabling this option will let you choose from various features
78822 + that prevent the injection and execution of 'foreign' code in
78823 + a program.
78824 +
78825 + This will also break programs that rely on the old behaviour and
78826 + expect that dynamically allocated memory via the malloc() family
78827 + of functions is executable (which it is not). Notable examples
78828 + are the XFree86 4.x server, the java runtime and wine.
78829 +
78830 +config PAX_PAGEEXEC
78831 + bool "Paging based non-executable pages"
78832 + default y if GRKERNSEC_CONFIG_AUTO
78833 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
78834 + select S390_SWITCH_AMODE if S390
78835 + select S390_EXEC_PROTECT if S390
78836 + select ARCH_TRACK_EXEC_LIMIT if X86_32
78837 + help
78838 + This implementation is based on the paging feature of the CPU.
78839 + On i386 without hardware non-executable bit support there is a
78840 + variable but usually low performance impact, however on Intel's
78841 + P4 core based CPUs it is very high so you should not enable this
78842 + for kernels meant to be used on such CPUs.
78843 +
78844 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
78845 + with hardware non-executable bit support there is no performance
78846 + impact, on ppc the impact is negligible.
78847 +
78848 + Note that several architectures require various emulations due to
78849 + badly designed userland ABIs, this will cause a performance impact
78850 + but will disappear as soon as userland is fixed. For example, ppc
78851 + userland MUST have been built with secure-plt by a recent toolchain.
78852 +
78853 +config PAX_SEGMEXEC
78854 + bool "Segmentation based non-executable pages"
78855 + default y if GRKERNSEC_CONFIG_AUTO
78856 + depends on PAX_NOEXEC && X86_32
78857 + help
78858 + This implementation is based on the segmentation feature of the
78859 + CPU and has a very small performance impact, however applications
78860 + will be limited to a 1.5 GB address space instead of the normal
78861 + 3 GB.
78862 +
78863 +config PAX_EMUTRAMP
78864 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
78865 + default y if PARISC
78866 + help
78867 + There are some programs and libraries that for one reason or
78868 + another attempt to execute special small code snippets from
78869 + non-executable memory pages. Most notable examples are the
78870 + signal handler return code generated by the kernel itself and
78871 + the GCC trampolines.
78872 +
78873 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
78874 + such programs will no longer work under your kernel.
78875 +
78876 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
78877 + utilities to enable trampoline emulation for the affected programs
78878 + yet still have the protection provided by the non-executable pages.
78879 +
78880 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
78881 + your system will not even boot.
78882 +
78883 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
78884 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
78885 + for the affected files.
78886 +
78887 + NOTE: enabling this feature *may* open up a loophole in the
78888 + protection provided by non-executable pages that an attacker
78889 + could abuse. Therefore the best solution is to not have any
78890 + files on your system that would require this option. This can
78891 + be achieved by not using libc5 (which relies on the kernel
78892 + signal handler return code) and not using or rewriting programs
78893 + that make use of the nested function implementation of GCC.
78894 + Skilled users can just fix GCC itself so that it implements
78895 + nested function calls in a way that does not interfere with PaX.
78896 +
78897 +config PAX_EMUSIGRT
78898 + bool "Automatically emulate sigreturn trampolines"
78899 + depends on PAX_EMUTRAMP && PARISC
78900 + default y
78901 + help
78902 + Enabling this option will have the kernel automatically detect
78903 + and emulate signal return trampolines executing on the stack
78904 + that would otherwise lead to task termination.
78905 +
78906 + This solution is intended as a temporary one for users with
78907 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
78908 + Modula-3 runtime, etc) or executables linked to such, basically
78909 + everything that does not specify its own SA_RESTORER function in
78910 + normal executable memory like glibc 2.1+ does.
78911 +
78912 + On parisc you MUST enable this option, otherwise your system will
78913 + not even boot.
78914 +
78915 + NOTE: this feature cannot be disabled on a per executable basis
78916 + and since it *does* open up a loophole in the protection provided
78917 + by non-executable pages, the best solution is to not have any
78918 + files on your system that would require this option.
78919 +
78920 +config PAX_MPROTECT
78921 + bool "Restrict mprotect()"
78922 + default y if GRKERNSEC_CONFIG_AUTO
78923 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
78924 + help
78925 + Enabling this option will prevent programs from
78926 + - changing the executable status of memory pages that were
78927 + not originally created as executable,
78928 + - making read-only executable pages writable again,
78929 + - creating executable pages from anonymous memory,
78930 + - making read-only-after-relocations (RELRO) data pages writable again.
78931 +
78932 + You should say Y here to complete the protection provided by
78933 + the enforcement of non-executable pages.
78934 +
78935 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
78936 + this feature on a per file basis.
78937 +
78938 +config PAX_MPROTECT_COMPAT
78939 + bool "Use legacy/compat protection demoting (read help)"
78940 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_DESKTOP)
78941 + depends on PAX_MPROTECT
78942 + help
78943 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
78944 + by sending the proper error code to the application. For some broken
78945 + userland, this can cause problems with Python or other applications. The
78946 + current implementation however allows for applications like clamav to
78947 + detect if JIT compilation/execution is allowed and to fall back gracefully
78948 + to an interpreter-based mode if it does not. While we encourage everyone
78949 + to use the current implementation as-is and push upstream to fix broken
78950 + userland (note that the RWX logging option can assist with this), in some
78951 + environments this may not be possible. Having to disable MPROTECT
78952 + completely on certain binaries reduces the security benefit of PaX,
78953 + so this option is provided for those environments to revert to the old
78954 + behavior.
78955 +
78956 +config PAX_ELFRELOCS
78957 + bool "Allow ELF text relocations (read help)"
78958 + depends on PAX_MPROTECT
78959 + default n
78960 + help
78961 + Non-executable pages and mprotect() restrictions are effective
78962 + in preventing the introduction of new executable code into an
78963 + attacked task's address space. There remain only two venues
78964 + for this kind of attack: if the attacker can execute already
78965 + existing code in the attacked task then he can either have it
78966 + create and mmap() a file containing his code or have it mmap()
78967 + an already existing ELF library that does not have position
78968 + independent code in it and use mprotect() on it to make it
78969 + writable and copy his code there. While protecting against
78970 + the former approach is beyond PaX, the latter can be prevented
78971 + by having only PIC ELF libraries on one's system (which do not
78972 + need to relocate their code). If you are sure this is your case,
78973 + as is the case with all modern Linux distributions, then leave
78974 + this option disabled. You should say 'n' here.
78975 +
78976 +config PAX_ETEXECRELOCS
78977 + bool "Allow ELF ET_EXEC text relocations"
78978 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
78979 + select PAX_ELFRELOCS
78980 + default y
78981 + help
78982 + On some architectures there are incorrectly created applications
78983 + that require text relocations and would not work without enabling
78984 + this option. If you are an alpha, ia64 or parisc user, you should
78985 + enable this option and disable it once you have made sure that
78986 + none of your applications need it.
78987 +
78988 +config PAX_EMUPLT
78989 + bool "Automatically emulate ELF PLT"
78990 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
78991 + default y
78992 + help
78993 + Enabling this option will have the kernel automatically detect
78994 + and emulate the Procedure Linkage Table entries in ELF files.
78995 + On some architectures such entries are in writable memory, and
78996 + become non-executable leading to task termination. Therefore
78997 + it is mandatory that you enable this option on alpha, parisc,
78998 + sparc and sparc64, otherwise your system would not even boot.
78999 +
79000 + NOTE: this feature *does* open up a loophole in the protection
79001 + provided by the non-executable pages, therefore the proper
79002 + solution is to modify the toolchain to produce a PLT that does
79003 + not need to be writable.
79004 +
79005 +config PAX_DLRESOLVE
79006 + bool 'Emulate old glibc resolver stub'
79007 + depends on PAX_EMUPLT && SPARC
79008 + default n
79009 + help
79010 + This option is needed if userland has an old glibc (before 2.4)
79011 + that puts a 'save' instruction into the runtime generated resolver
79012 + stub that needs special emulation.
79013 +
79014 +config PAX_KERNEXEC
79015 + bool "Enforce non-executable kernel pages"
79016 + default y if GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_NONE || (GRKERNSEC_CONFIG_VIRT_EPT && GRKERNSEC_CONFIG_VIRT_GUEST) || (GRKERNSEC_CONFIG_VIRT_EPT && GRKERNSEC_CONFIG_VIRT_KVM))
79017 + depends on (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
79018 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
79019 + select PAX_KERNEXEC_PLUGIN if X86_64
79020 + help
79021 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
79022 + that is, enabling this option will make it harder to inject
79023 + and execute 'foreign' code in kernel memory itself.
79024 +
79025 + Note that on x86_64 kernels there is a known regression when
79026 + this feature and KVM/VMX are both enabled in the host kernel.
79027 +
79028 +choice
79029 + prompt "Return Address Instrumentation Method"
79030 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
79031 + depends on PAX_KERNEXEC_PLUGIN
79032 + help
79033 + Select the method used to instrument function pointer dereferences.
79034 + Note that binary modules cannot be instrumented by this approach.
79035 +
79036 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
79037 + bool "bts"
79038 + help
79039 + This method is compatible with binary only modules but has
79040 + a higher runtime overhead.
79041 +
79042 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
79043 + bool "or"
79044 + depends on !PARAVIRT
79045 + help
79046 + This method is incompatible with binary only modules but has
79047 + a lower runtime overhead.
79048 +endchoice
79049 +
79050 +config PAX_KERNEXEC_PLUGIN_METHOD
79051 + string
79052 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
79053 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
79054 + default ""
79055 +
79056 +config PAX_KERNEXEC_MODULE_TEXT
79057 + int "Minimum amount of memory reserved for module code"
79058 + default "4" if (!GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_SERVER)
79059 + default "12" if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_DESKTOP)
79060 + depends on PAX_KERNEXEC && X86_32 && MODULES
79061 + help
79062 + Due to implementation details the kernel must reserve a fixed
79063 + amount of memory for module code at compile time that cannot be
79064 + changed at runtime. Here you can specify the minimum amount
79065 + in MB that will be reserved. Due to the same implementation
79066 + details this size will always be rounded up to the next 2/4 MB
79067 + boundary (depends on PAE) so the actually available memory for
79068 + module code will usually be more than this minimum.
79069 +
79070 + The default 4 MB should be enough for most users but if you have
79071 + an excessive number of modules (e.g., most distribution configs
79072 + compile many drivers as modules) or use huge modules such as
79073 + nvidia's kernel driver, you will need to adjust this amount.
79074 + A good rule of thumb is to look at your currently loaded kernel
79075 + modules and add up their sizes.
79076 +
79077 +endmenu
79078 +
79079 +menu "Address Space Layout Randomization"
79080 + depends on PAX
79081 +
79082 +config PAX_ASLR
79083 + bool "Address Space Layout Randomization"
79084 + default y if GRKERNSEC_CONFIG_AUTO
79085 + help
79086 + Many if not most exploit techniques rely on the knowledge of
79087 + certain addresses in the attacked program. The following options
79088 + will allow the kernel to apply a certain amount of randomization
79089 + to specific parts of the program thereby forcing an attacker to
79090 + guess them in most cases. Any failed guess will most likely crash
79091 + the attacked program which allows the kernel to detect such attempts
79092 + and react on them. PaX itself provides no reaction mechanisms,
79093 + instead it is strongly encouraged that you make use of Nergal's
79094 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
79095 + (http://www.grsecurity.net/) built-in crash detection features or
79096 + develop one yourself.
79097 +
79098 + By saying Y here you can choose to randomize the following areas:
79099 + - top of the task's kernel stack
79100 + - top of the task's userland stack
79101 + - base address for mmap() requests that do not specify one
79102 + (this includes all libraries)
79103 + - base address of the main executable
79104 +
79105 + It is strongly recommended to say Y here as address space layout
79106 + randomization has negligible impact on performance yet it provides
79107 + a very effective protection.
79108 +
79109 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
79110 + this feature on a per file basis.
79111 +
79112 +config PAX_RANDKSTACK
79113 + bool "Randomize kernel stack base"
79114 + default y if GRKERNSEC_CONFIG_AUTO
79115 + depends on X86_TSC && X86
79116 + help
79117 + By saying Y here the kernel will randomize every task's kernel
79118 + stack on every system call. This will not only force an attacker
79119 + to guess it but also prevent him from making use of possible
79120 + leaked information about it.
79121 +
79122 + Since the kernel stack is a rather scarce resource, randomization
79123 + may cause unexpected stack overflows, therefore you should very
79124 + carefully test your system. Note that once enabled in the kernel
79125 + configuration, this feature cannot be disabled on a per file basis.
79126 +
79127 +config PAX_RANDUSTACK
79128 + bool "Randomize user stack base"
79129 + default y if GRKERNSEC_CONFIG_AUTO
79130 + depends on PAX_ASLR
79131 + help
79132 + By saying Y here the kernel will randomize every task's userland
79133 + stack. The randomization is done in two steps where the second
79134 + one may apply a big amount of shift to the top of the stack and
79135 + cause problems for programs that want to use lots of memory (more
79136 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
79137 + For this reason the second step can be controlled by 'chpax' or
79138 + 'paxctl' on a per file basis.
79139 +
79140 +config PAX_RANDMMAP
79141 + bool "Randomize mmap() base"
79142 + default y if GRKERNSEC_CONFIG_AUTO
79143 + depends on PAX_ASLR
79144 + help
79145 + By saying Y here the kernel will use a randomized base address for
79146 + mmap() requests that do not specify one themselves. As a result
79147 + all dynamically loaded libraries will appear at random addresses
79148 + and therefore be harder to exploit by a technique where an attacker
79149 + attempts to execute library code for his purposes (e.g. spawn a
79150 + shell from an exploited program that is running at an elevated
79151 + privilege level).
79152 +
79153 + Furthermore, if a program is relinked as a dynamic ELF file, its
79154 + base address will be randomized as well, completing the full
79155 + randomization of the address space layout. Attacking such programs
79156 + becomes a guess game. You can find an example of doing this at
79157 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
79158 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
79159 +
79160 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
79161 + feature on a per file basis.
79162 +
79163 +endmenu
79164 +
79165 +menu "Miscellaneous hardening features"
79166 +
79167 +config PAX_MEMORY_SANITIZE
79168 + bool "Sanitize all freed memory"
79169 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY)
79170 + depends on !HIBERNATION
79171 + help
79172 + By saying Y here the kernel will erase memory pages as soon as they
79173 + are freed. This in turn reduces the lifetime of data stored in the
79174 + pages, making it less likely that sensitive information such as
79175 + passwords, cryptographic secrets, etc stay in memory for too long.
79176 +
79177 + This is especially useful for programs whose runtime is short, long
79178 + lived processes and the kernel itself benefit from this as long as
79179 + they operate on whole memory pages and ensure timely freeing of pages
79180 + that may hold sensitive information.
79181 +
79182 + The tradeoff is performance impact, on a single CPU system kernel
79183 + compilation sees a 3% slowdown, other systems and workloads may vary
79184 + and you are advised to test this feature on your expected workload
79185 + before deploying it.
79186 +
79187 + Note that this feature does not protect data stored in live pages,
79188 + e.g., process memory swapped to disk may stay there for a long time.
79189 +
79190 +config PAX_MEMORY_STACKLEAK
79191 + bool "Sanitize kernel stack"
79192 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY)
79193 + depends on X86
79194 + help
79195 + By saying Y here the kernel will erase the kernel stack before it
79196 + returns from a system call. This in turn reduces the information
79197 + that a kernel stack leak bug can reveal.
79198 +
79199 + Note that such a bug can still leak information that was put on
79200 + the stack by the current system call (the one eventually triggering
79201 + the bug) but traces of earlier system calls on the kernel stack
79202 + cannot leak anymore.
79203 +
79204 + The tradeoff is performance impact: on a single CPU system kernel
79205 + compilation sees a 1% slowdown, other systems and workloads may vary
79206 + and you are advised to test this feature on your expected workload
79207 + before deploying it.
79208 +
79209 + Note: full support for this feature requires gcc with plugin support
79210 + so make sure your compiler is at least gcc 4.5.0. Using older gcc
79211 + versions means that functions with large enough stack frames may
79212 + leave uninitialized memory behind that may be exposed to a later
79213 + syscall leaking the stack.
79214 +
79215 +config PAX_MEMORY_UDEREF
79216 + bool "Prevent invalid userland pointer dereference"
79217 + default y if GRKERNSEC_CONFIG_AUTO && (X86_32 || (X86_64 && GRKERNSEC_CONFIG_PRIORITY_SECURITY)) && (GRKERNSEC_CONFIG_VIRT_NONE || GRKERNSEC_CONFIG_VIRT_EPT)
79218 + depends on X86 && !UML_X86 && !XEN
79219 + select PAX_PER_CPU_PGD if X86_64
79220 + help
79221 + By saying Y here the kernel will be prevented from dereferencing
79222 + userland pointers in contexts where the kernel expects only kernel
79223 + pointers. This is both a useful runtime debugging feature and a
79224 + security measure that prevents exploiting a class of kernel bugs.
79225 +
79226 + The tradeoff is that some virtualization solutions may experience
79227 + a huge slowdown and therefore you should not enable this feature
79228 + for kernels meant to run in such environments. Whether a given VM
79229 + solution is affected or not is best determined by simply trying it
79230 + out, the performance impact will be obvious right on boot as this
79231 + mechanism engages from very early on. A good rule of thumb is that
79232 + VMs running on CPUs without hardware virtualization support (i.e.,
79233 + the majority of IA-32 CPUs) will likely experience the slowdown.
79234 +
79235 +config PAX_REFCOUNT
79236 + bool "Prevent various kernel object reference counter overflows"
79237 + default y if GRKERNSEC_CONFIG_AUTO
79238 + depends on GRKERNSEC && ((ARM && (CPU_32v6 || CPU_32v6K || CPU_32v7)) || SPARC64 || X86)
79239 + help
79240 + By saying Y here the kernel will detect and prevent overflowing
79241 + various (but not all) kinds of object reference counters. Such
79242 + overflows can normally occur due to bugs only and are often, if
79243 + not always, exploitable.
79244 +
79245 + The tradeoff is that data structures protected by an overflowed
79246 + refcount will never be freed and therefore will leak memory. Note
79247 + that this leak also happens even without this protection but in
79248 + that case the overflow can eventually trigger the freeing of the
79249 + data structure while it is still being used elsewhere, resulting
79250 + in the exploitable situation that this feature prevents.
79251 +
79252 + Since this has a negligible performance impact, you should enable
79253 + this feature.
79254 +
79255 +config PAX_USERCOPY
79256 + bool "Harden heap object copies between kernel and userland"
79257 + default y if GRKERNSEC_CONFIG_AUTO
79258 + depends on X86 || PPC || SPARC || ARM
79259 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
79260 + help
79261 + By saying Y here the kernel will enforce the size of heap objects
79262 + when they are copied in either direction between the kernel and
79263 + userland, even if only a part of the heap object is copied.
79264 +
79265 + Specifically, this checking prevents information leaking from the
79266 + kernel heap during kernel to userland copies (if the kernel heap
79267 + object is otherwise fully initialized) and prevents kernel heap
79268 + overflows during userland to kernel copies.
79269 +
79270 + Note that the current implementation provides the strictest bounds
79271 + checks for the SLUB allocator.
79272 +
79273 + Enabling this option also enables per-slab cache protection against
79274 + data in a given cache being copied into/out of via userland
79275 + accessors. Though the whitelist of regions will be reduced over
79276 + time, it notably protects important data structures like task structs.
79277 +
79278 + If frame pointers are enabled on x86, this option will also restrict
79279 + copies into and out of the kernel stack to local variables within a
79280 + single frame.
79281 +
79282 + Since this has a negligible performance impact, you should enable
79283 + this feature.
79284 +
79285 +config PAX_SIZE_OVERFLOW
79286 + bool "Prevent various integer overflows in function size parameters"
79287 + default y if GRKERNSEC_CONFIG_AUTO
79288 + depends on X86
79289 + help
79290 + By saying Y here the kernel recomputes expressions of function
79291 + arguments marked by a size_overflow attribute with double integer
79292 + precision (DImode/TImode for 32/64 bit integer types).
79293 +
79294 + The recomputed argument is checked against INT_MAX and an event
79295 + is logged on overflow and the triggering process is killed.
79296 +
79297 + Homepage:
79298 + http://www.grsecurity.net/~ephox/overflow_plugin/
79299 +
79300 +config PAX_LATENT_ENTROPY
79301 + bool "Generate some entropy during boot"
79302 + help
79303 + By saying Y here the kernel will instrument early boot code to
79304 + extract some entropy from both original and artificially created
79305 + program state. This will help especially embedded systems where
79306 + there is little 'natural' source of entropy normally. The cost
79307 + is some slowdown of the boot process.
79308 +
79309 + Note that entropy extracted this way is not cryptographically
79310 + secure!
79311 +
79312 +endmenu
79313 +
79314 +endmenu
79315 +
79316 +source grsecurity/Kconfig
79317 +
79318 +endmenu
79319 +
79320 +endmenu
79321 +
79322 config KEYS
79323 bool "Enable access key retention support"
79324 help
79325 @@ -169,7 +1033,7 @@ config INTEL_TXT
79326 config LSM_MMAP_MIN_ADDR
79327 int "Low address space for LSM to protect from user allocation"
79328 depends on SECURITY && SECURITY_SELINUX
79329 - default 32768 if ARM
79330 + default 32768 if ALPHA || ARM || PARISC || SPARC32
79331 default 65536
79332 help
79333 This is the portion of low virtual memory which should be protected
79334 diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
79335 index ad05d39..afffccb 100644
79336 --- a/security/apparmor/lsm.c
79337 +++ b/security/apparmor/lsm.c
79338 @@ -622,7 +622,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
79339 return error;
79340 }
79341
79342 -static struct security_operations apparmor_ops = {
79343 +static struct security_operations apparmor_ops __read_only = {
79344 .name = "apparmor",
79345
79346 .ptrace_access_check = apparmor_ptrace_access_check,
79347 diff --git a/security/commoncap.c b/security/commoncap.c
79348 index 71a166a..851bb3e 100644
79349 --- a/security/commoncap.c
79350 +++ b/security/commoncap.c
79351 @@ -576,6 +576,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
79352 {
79353 const struct cred *cred = current_cred();
79354
79355 + if (gr_acl_enable_at_secure())
79356 + return 1;
79357 +
79358 if (cred->uid != 0) {
79359 if (bprm->cap_effective)
79360 return 1;
79361 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
79362 index 3ccf7ac..d73ad64 100644
79363 --- a/security/integrity/ima/ima.h
79364 +++ b/security/integrity/ima/ima.h
79365 @@ -86,8 +86,8 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
79366 extern spinlock_t ima_queue_lock;
79367
79368 struct ima_h_table {
79369 - atomic_long_t len; /* number of stored measurements in the list */
79370 - atomic_long_t violations;
79371 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
79372 + atomic_long_unchecked_t violations;
79373 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
79374 };
79375 extern struct ima_h_table ima_htable;
79376 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
79377 index 88a2788..581ab92 100644
79378 --- a/security/integrity/ima/ima_api.c
79379 +++ b/security/integrity/ima/ima_api.c
79380 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
79381 int result;
79382
79383 /* can overflow, only indicator */
79384 - atomic_long_inc(&ima_htable.violations);
79385 + atomic_long_inc_unchecked(&ima_htable.violations);
79386
79387 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
79388 if (!entry) {
79389 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
79390 index e1aa2b4..52027bf 100644
79391 --- a/security/integrity/ima/ima_fs.c
79392 +++ b/security/integrity/ima/ima_fs.c
79393 @@ -28,12 +28,12 @@
79394 static int valid_policy = 1;
79395 #define TMPBUFLEN 12
79396 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
79397 - loff_t *ppos, atomic_long_t *val)
79398 + loff_t *ppos, atomic_long_unchecked_t *val)
79399 {
79400 char tmpbuf[TMPBUFLEN];
79401 ssize_t len;
79402
79403 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
79404 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
79405 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
79406 }
79407
79408 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
79409 index 55a6271..ad829c3 100644
79410 --- a/security/integrity/ima/ima_queue.c
79411 +++ b/security/integrity/ima/ima_queue.c
79412 @@ -81,7 +81,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
79413 INIT_LIST_HEAD(&qe->later);
79414 list_add_tail_rcu(&qe->later, &ima_measurements);
79415
79416 - atomic_long_inc(&ima_htable.len);
79417 + atomic_long_inc_unchecked(&ima_htable.len);
79418 key = ima_hash_key(entry->digest);
79419 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
79420 return 0;
79421 diff --git a/security/keys/compat.c b/security/keys/compat.c
79422 index 4c48e13..7abdac9 100644
79423 --- a/security/keys/compat.c
79424 +++ b/security/keys/compat.c
79425 @@ -44,7 +44,7 @@ long compat_keyctl_instantiate_key_iov(
79426 if (ret == 0)
79427 goto no_payload_free;
79428
79429 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
79430 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
79431
79432 if (iov != iovstack)
79433 kfree(iov);
79434 diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
79435 index fb767c6..b9c49c0 100644
79436 --- a/security/keys/keyctl.c
79437 +++ b/security/keys/keyctl.c
79438 @@ -935,7 +935,7 @@ static int keyctl_change_reqkey_auth(struct key *key)
79439 /*
79440 * Copy the iovec data from userspace
79441 */
79442 -static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
79443 +static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
79444 unsigned ioc)
79445 {
79446 for (; ioc > 0; ioc--) {
79447 @@ -957,7 +957,7 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
79448 * If successful, 0 will be returned.
79449 */
79450 long keyctl_instantiate_key_common(key_serial_t id,
79451 - const struct iovec *payload_iov,
79452 + const struct iovec __user *payload_iov,
79453 unsigned ioc,
79454 size_t plen,
79455 key_serial_t ringid)
79456 @@ -1052,7 +1052,7 @@ long keyctl_instantiate_key(key_serial_t id,
79457 [0].iov_len = plen
79458 };
79459
79460 - return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
79461 + return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
79462 }
79463
79464 return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
79465 @@ -1085,7 +1085,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
79466 if (ret == 0)
79467 goto no_payload_free;
79468
79469 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
79470 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
79471
79472 if (iov != iovstack)
79473 kfree(iov);
79474 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
79475 index d605f75..2bc6be9 100644
79476 --- a/security/keys/keyring.c
79477 +++ b/security/keys/keyring.c
79478 @@ -214,15 +214,15 @@ static long keyring_read(const struct key *keyring,
79479 ret = -EFAULT;
79480
79481 for (loop = 0; loop < klist->nkeys; loop++) {
79482 + key_serial_t serial;
79483 key = klist->keys[loop];
79484 + serial = key->serial;
79485
79486 tmp = sizeof(key_serial_t);
79487 if (tmp > buflen)
79488 tmp = buflen;
79489
79490 - if (copy_to_user(buffer,
79491 - &key->serial,
79492 - tmp) != 0)
79493 + if (copy_to_user(buffer, &serial, tmp))
79494 goto error;
79495
79496 buflen -= tmp;
79497 diff --git a/security/min_addr.c b/security/min_addr.c
79498 index f728728..6457a0c 100644
79499 --- a/security/min_addr.c
79500 +++ b/security/min_addr.c
79501 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
79502 */
79503 static void update_mmap_min_addr(void)
79504 {
79505 +#ifndef SPARC
79506 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
79507 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
79508 mmap_min_addr = dac_mmap_min_addr;
79509 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
79510 #else
79511 mmap_min_addr = dac_mmap_min_addr;
79512 #endif
79513 +#endif
79514 }
79515
79516 /*
79517 diff --git a/security/security.c b/security/security.c
79518 index bf619ff..8179030 100644
79519 --- a/security/security.c
79520 +++ b/security/security.c
79521 @@ -20,6 +20,7 @@
79522 #include <linux/ima.h>
79523 #include <linux/evm.h>
79524 #include <linux/fsnotify.h>
79525 +#include <linux/mm.h>
79526 #include <net/flow.h>
79527
79528 #define MAX_LSM_EVM_XATTR 2
79529 @@ -28,8 +29,8 @@
79530 static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
79531 CONFIG_DEFAULT_SECURITY;
79532
79533 -static struct security_operations *security_ops;
79534 -static struct security_operations default_security_ops = {
79535 +static struct security_operations *security_ops __read_only;
79536 +static struct security_operations default_security_ops __read_only = {
79537 .name = "default",
79538 };
79539
79540 @@ -70,7 +71,9 @@ int __init security_init(void)
79541
79542 void reset_security_ops(void)
79543 {
79544 + pax_open_kernel();
79545 security_ops = &default_security_ops;
79546 + pax_close_kernel();
79547 }
79548
79549 /* Save user chosen LSM */
79550 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
79551 index d85b793..a164832 100644
79552 --- a/security/selinux/hooks.c
79553 +++ b/security/selinux/hooks.c
79554 @@ -95,8 +95,6 @@
79555
79556 #define NUM_SEL_MNT_OPTS 5
79557
79558 -extern struct security_operations *security_ops;
79559 -
79560 /* SECMARK reference count */
79561 static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
79562
79563 @@ -5520,7 +5518,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
79564
79565 #endif
79566
79567 -static struct security_operations selinux_ops = {
79568 +static struct security_operations selinux_ops __read_only = {
79569 .name = "selinux",
79570
79571 .ptrace_access_check = selinux_ptrace_access_check,
79572 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
79573 index c220f31..89fab3f 100644
79574 --- a/security/selinux/include/xfrm.h
79575 +++ b/security/selinux/include/xfrm.h
79576 @@ -50,7 +50,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
79577
79578 static inline void selinux_xfrm_notify_policyload(void)
79579 {
79580 - atomic_inc(&flow_cache_genid);
79581 + atomic_inc_unchecked(&flow_cache_genid);
79582 }
79583 #else
79584 static inline int selinux_xfrm_enabled(void)
79585 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
79586 index 45c32f0..0038be2 100644
79587 --- a/security/smack/smack_lsm.c
79588 +++ b/security/smack/smack_lsm.c
79589 @@ -3500,7 +3500,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
79590 return 0;
79591 }
79592
79593 -struct security_operations smack_ops = {
79594 +struct security_operations smack_ops __read_only = {
79595 .name = "smack",
79596
79597 .ptrace_access_check = smack_ptrace_access_check,
79598 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
79599 index 620d37c..e2ad89b 100644
79600 --- a/security/tomoyo/tomoyo.c
79601 +++ b/security/tomoyo/tomoyo.c
79602 @@ -501,7 +501,7 @@ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
79603 * tomoyo_security_ops is a "struct security_operations" which is used for
79604 * registering TOMOYO.
79605 */
79606 -static struct security_operations tomoyo_security_ops = {
79607 +static struct security_operations tomoyo_security_ops __read_only = {
79608 .name = "tomoyo",
79609 .cred_alloc_blank = tomoyo_cred_alloc_blank,
79610 .cred_prepare = tomoyo_cred_prepare,
79611 diff --git a/security/yama/Kconfig b/security/yama/Kconfig
79612 index 51d6709..1f3dbe2 100644
79613 --- a/security/yama/Kconfig
79614 +++ b/security/yama/Kconfig
79615 @@ -1,6 +1,6 @@
79616 config SECURITY_YAMA
79617 bool "Yama support"
79618 - depends on SECURITY
79619 + depends on SECURITY && !GRKERNSEC
79620 select SECURITYFS
79621 select SECURITY_PATH
79622 default n
79623 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
79624 index 270790d..c67dfcb 100644
79625 --- a/sound/aoa/codecs/onyx.c
79626 +++ b/sound/aoa/codecs/onyx.c
79627 @@ -54,7 +54,7 @@ struct onyx {
79628 spdif_locked:1,
79629 analog_locked:1,
79630 original_mute:2;
79631 - int open_count;
79632 + local_t open_count;
79633 struct codec_info *codec_info;
79634
79635 /* mutex serializes concurrent access to the device
79636 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_item *cii,
79637 struct onyx *onyx = cii->codec_data;
79638
79639 mutex_lock(&onyx->mutex);
79640 - onyx->open_count++;
79641 + local_inc(&onyx->open_count);
79642 mutex_unlock(&onyx->mutex);
79643
79644 return 0;
79645 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_item *cii,
79646 struct onyx *onyx = cii->codec_data;
79647
79648 mutex_lock(&onyx->mutex);
79649 - onyx->open_count--;
79650 - if (!onyx->open_count)
79651 + if (local_dec_and_test(&onyx->open_count))
79652 onyx->spdif_locked = onyx->analog_locked = 0;
79653 mutex_unlock(&onyx->mutex);
79654
79655 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
79656 index ffd2025..df062c9 100644
79657 --- a/sound/aoa/codecs/onyx.h
79658 +++ b/sound/aoa/codecs/onyx.h
79659 @@ -11,6 +11,7 @@
79660 #include <linux/i2c.h>
79661 #include <asm/pmac_low_i2c.h>
79662 #include <asm/prom.h>
79663 +#include <asm/local.h>
79664
79665 /* PCM3052 register definitions */
79666
79667 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
79668 index 08fde00..0bf641a 100644
79669 --- a/sound/core/oss/pcm_oss.c
79670 +++ b/sound/core/oss/pcm_oss.c
79671 @@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
79672 if (in_kernel) {
79673 mm_segment_t fs;
79674 fs = snd_enter_user();
79675 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
79676 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
79677 snd_leave_user(fs);
79678 } else {
79679 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
79680 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
79681 }
79682 if (ret != -EPIPE && ret != -ESTRPIPE)
79683 break;
79684 @@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
79685 if (in_kernel) {
79686 mm_segment_t fs;
79687 fs = snd_enter_user();
79688 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
79689 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
79690 snd_leave_user(fs);
79691 } else {
79692 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
79693 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
79694 }
79695 if (ret == -EPIPE) {
79696 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
79697 @@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
79698 struct snd_pcm_plugin_channel *channels;
79699 size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
79700 if (!in_kernel) {
79701 - if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
79702 + if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
79703 return -EFAULT;
79704 buf = runtime->oss.buffer;
79705 }
79706 @@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
79707 }
79708 } else {
79709 tmp = snd_pcm_oss_write2(substream,
79710 - (const char __force *)buf,
79711 + (const char __force_kernel *)buf,
79712 runtime->oss.period_bytes, 0);
79713 if (tmp <= 0)
79714 goto err;
79715 @@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
79716 struct snd_pcm_runtime *runtime = substream->runtime;
79717 snd_pcm_sframes_t frames, frames1;
79718 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
79719 - char __user *final_dst = (char __force __user *)buf;
79720 + char __user *final_dst = (char __force_user *)buf;
79721 if (runtime->oss.plugin_first) {
79722 struct snd_pcm_plugin_channel *channels;
79723 size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
79724 @@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
79725 xfer += tmp;
79726 runtime->oss.buffer_used -= tmp;
79727 } else {
79728 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
79729 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
79730 runtime->oss.period_bytes, 0);
79731 if (tmp <= 0)
79732 goto err;
79733 @@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
79734 size1);
79735 size1 /= runtime->channels; /* frames */
79736 fs = snd_enter_user();
79737 - snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
79738 + snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
79739 snd_leave_user(fs);
79740 }
79741 } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
79742 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
79743 index 91cdf94..4085161 100644
79744 --- a/sound/core/pcm_compat.c
79745 +++ b/sound/core/pcm_compat.c
79746 @@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
79747 int err;
79748
79749 fs = snd_enter_user();
79750 - err = snd_pcm_delay(substream, &delay);
79751 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
79752 snd_leave_user(fs);
79753 if (err < 0)
79754 return err;
79755 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
79756 index 3fe99e6..26952e4 100644
79757 --- a/sound/core/pcm_native.c
79758 +++ b/sound/core/pcm_native.c
79759 @@ -2770,11 +2770,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
79760 switch (substream->stream) {
79761 case SNDRV_PCM_STREAM_PLAYBACK:
79762 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
79763 - (void __user *)arg);
79764 + (void __force_user *)arg);
79765 break;
79766 case SNDRV_PCM_STREAM_CAPTURE:
79767 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
79768 - (void __user *)arg);
79769 + (void __force_user *)arg);
79770 break;
79771 default:
79772 result = -EINVAL;
79773 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
79774 index 5cf8d65..912a79c 100644
79775 --- a/sound/core/seq/seq_device.c
79776 +++ b/sound/core/seq/seq_device.c
79777 @@ -64,7 +64,7 @@ struct ops_list {
79778 int argsize; /* argument size */
79779
79780 /* operators */
79781 - struct snd_seq_dev_ops ops;
79782 + struct snd_seq_dev_ops *ops;
79783
79784 /* registred devices */
79785 struct list_head dev_list; /* list of devices */
79786 @@ -333,7 +333,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
79787
79788 mutex_lock(&ops->reg_mutex);
79789 /* copy driver operators */
79790 - ops->ops = *entry;
79791 + ops->ops = entry;
79792 ops->driver |= DRIVER_LOADED;
79793 ops->argsize = argsize;
79794
79795 @@ -463,7 +463,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
79796 dev->name, ops->id, ops->argsize, dev->argsize);
79797 return -EINVAL;
79798 }
79799 - if (ops->ops.init_device(dev) >= 0) {
79800 + if (ops->ops->init_device(dev) >= 0) {
79801 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
79802 ops->num_init_devices++;
79803 } else {
79804 @@ -490,7 +490,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
79805 dev->name, ops->id, ops->argsize, dev->argsize);
79806 return -EINVAL;
79807 }
79808 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
79809 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
79810 dev->status = SNDRV_SEQ_DEVICE_FREE;
79811 dev->driver_data = NULL;
79812 ops->num_init_devices--;
79813 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
79814 index 621e60e..f4543f5 100644
79815 --- a/sound/drivers/mts64.c
79816 +++ b/sound/drivers/mts64.c
79817 @@ -29,6 +29,7 @@
79818 #include <sound/initval.h>
79819 #include <sound/rawmidi.h>
79820 #include <sound/control.h>
79821 +#include <asm/local.h>
79822
79823 #define CARD_NAME "Miditerminal 4140"
79824 #define DRIVER_NAME "MTS64"
79825 @@ -67,7 +68,7 @@ struct mts64 {
79826 struct pardevice *pardev;
79827 int pardev_claimed;
79828
79829 - int open_count;
79830 + local_t open_count;
79831 int current_midi_output_port;
79832 int current_midi_input_port;
79833 u8 mode[MTS64_NUM_INPUT_PORTS];
79834 @@ -697,7 +698,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
79835 {
79836 struct mts64 *mts = substream->rmidi->private_data;
79837
79838 - if (mts->open_count == 0) {
79839 + if (local_read(&mts->open_count) == 0) {
79840 /* We don't need a spinlock here, because this is just called
79841 if the device has not been opened before.
79842 So there aren't any IRQs from the device */
79843 @@ -705,7 +706,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
79844
79845 msleep(50);
79846 }
79847 - ++(mts->open_count);
79848 + local_inc(&mts->open_count);
79849
79850 return 0;
79851 }
79852 @@ -715,8 +716,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
79853 struct mts64 *mts = substream->rmidi->private_data;
79854 unsigned long flags;
79855
79856 - --(mts->open_count);
79857 - if (mts->open_count == 0) {
79858 + if (local_dec_return(&mts->open_count) == 0) {
79859 /* We need the spinlock_irqsave here because we can still
79860 have IRQs at this point */
79861 spin_lock_irqsave(&mts->lock, flags);
79862 @@ -725,8 +725,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
79863
79864 msleep(500);
79865
79866 - } else if (mts->open_count < 0)
79867 - mts->open_count = 0;
79868 + } else if (local_read(&mts->open_count) < 0)
79869 + local_set(&mts->open_count, 0);
79870
79871 return 0;
79872 }
79873 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
79874 index b953fb4..1999c01 100644
79875 --- a/sound/drivers/opl4/opl4_lib.c
79876 +++ b/sound/drivers/opl4/opl4_lib.c
79877 @@ -29,7 +29,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
79878 MODULE_DESCRIPTION("OPL4 driver");
79879 MODULE_LICENSE("GPL");
79880
79881 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
79882 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
79883 {
79884 int timeout = 10;
79885 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
79886 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
79887 index 3e32bd3..46fc152 100644
79888 --- a/sound/drivers/portman2x4.c
79889 +++ b/sound/drivers/portman2x4.c
79890 @@ -48,6 +48,7 @@
79891 #include <sound/initval.h>
79892 #include <sound/rawmidi.h>
79893 #include <sound/control.h>
79894 +#include <asm/local.h>
79895
79896 #define CARD_NAME "Portman 2x4"
79897 #define DRIVER_NAME "portman"
79898 @@ -85,7 +86,7 @@ struct portman {
79899 struct pardevice *pardev;
79900 int pardev_claimed;
79901
79902 - int open_count;
79903 + local_t open_count;
79904 int mode[PORTMAN_NUM_INPUT_PORTS];
79905 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
79906 };
79907 diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
79908 index 87657dd..a8268d4 100644
79909 --- a/sound/firewire/amdtp.c
79910 +++ b/sound/firewire/amdtp.c
79911 @@ -371,7 +371,7 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
79912 ptr = s->pcm_buffer_pointer + data_blocks;
79913 if (ptr >= pcm->runtime->buffer_size)
79914 ptr -= pcm->runtime->buffer_size;
79915 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
79916 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
79917
79918 s->pcm_period_pointer += data_blocks;
79919 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
79920 @@ -511,7 +511,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
79921 */
79922 void amdtp_out_stream_update(struct amdtp_out_stream *s)
79923 {
79924 - ACCESS_ONCE(s->source_node_id_field) =
79925 + ACCESS_ONCE_RW(s->source_node_id_field) =
79926 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
79927 }
79928 EXPORT_SYMBOL(amdtp_out_stream_update);
79929 diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
79930 index 537a9cb..8e8c8e9 100644
79931 --- a/sound/firewire/amdtp.h
79932 +++ b/sound/firewire/amdtp.h
79933 @@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_prepare(struct amdtp_out_stream *s)
79934 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
79935 struct snd_pcm_substream *pcm)
79936 {
79937 - ACCESS_ONCE(s->pcm) = pcm;
79938 + ACCESS_ONCE_RW(s->pcm) = pcm;
79939 }
79940
79941 /**
79942 diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
79943 index d428ffe..751ef78 100644
79944 --- a/sound/firewire/isight.c
79945 +++ b/sound/firewire/isight.c
79946 @@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count)
79947 ptr += count;
79948 if (ptr >= runtime->buffer_size)
79949 ptr -= runtime->buffer_size;
79950 - ACCESS_ONCE(isight->buffer_pointer) = ptr;
79951 + ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
79952
79953 isight->period_counter += count;
79954 if (isight->period_counter >= runtime->period_size) {
79955 @@ -307,7 +307,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
79956 if (err < 0)
79957 return err;
79958
79959 - ACCESS_ONCE(isight->pcm_active) = true;
79960 + ACCESS_ONCE_RW(isight->pcm_active) = true;
79961
79962 return 0;
79963 }
79964 @@ -340,7 +340,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
79965 {
79966 struct isight *isight = substream->private_data;
79967
79968 - ACCESS_ONCE(isight->pcm_active) = false;
79969 + ACCESS_ONCE_RW(isight->pcm_active) = false;
79970
79971 mutex_lock(&isight->mutex);
79972 isight_stop_streaming(isight);
79973 @@ -433,10 +433,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
79974
79975 switch (cmd) {
79976 case SNDRV_PCM_TRIGGER_START:
79977 - ACCESS_ONCE(isight->pcm_running) = true;
79978 + ACCESS_ONCE_RW(isight->pcm_running) = true;
79979 break;
79980 case SNDRV_PCM_TRIGGER_STOP:
79981 - ACCESS_ONCE(isight->pcm_running) = false;
79982 + ACCESS_ONCE_RW(isight->pcm_running) = false;
79983 break;
79984 default:
79985 return -EINVAL;
79986 diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
79987 index 7bd5e33..1fcab12 100644
79988 --- a/sound/isa/cmi8330.c
79989 +++ b/sound/isa/cmi8330.c
79990 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
79991
79992 struct snd_pcm *pcm;
79993 struct snd_cmi8330_stream {
79994 - struct snd_pcm_ops ops;
79995 + snd_pcm_ops_no_const ops;
79996 snd_pcm_open_callback_t open;
79997 void *private_data; /* sb or wss */
79998 } streams[2];
79999 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
80000 index 733b014..56ce96f 100644
80001 --- a/sound/oss/sb_audio.c
80002 +++ b/sound/oss/sb_audio.c
80003 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
80004 buf16 = (signed short *)(localbuf + localoffs);
80005 while (c)
80006 {
80007 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
80008 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
80009 if (copy_from_user(lbuf8,
80010 userbuf+useroffs + p,
80011 locallen))
80012 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
80013 index 09d4648..cf234c7 100644
80014 --- a/sound/oss/swarm_cs4297a.c
80015 +++ b/sound/oss/swarm_cs4297a.c
80016 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
80017 {
80018 struct cs4297a_state *s;
80019 u32 pwr, id;
80020 - mm_segment_t fs;
80021 int rval;
80022 #ifndef CONFIG_BCM_CS4297A_CSWARM
80023 u64 cfg;
80024 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
80025 if (!rval) {
80026 char *sb1250_duart_present;
80027
80028 +#if 0
80029 + mm_segment_t fs;
80030 fs = get_fs();
80031 set_fs(KERNEL_DS);
80032 -#if 0
80033 val = SOUND_MASK_LINE;
80034 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
80035 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
80036 val = initvol[i].vol;
80037 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
80038 }
80039 + set_fs(fs);
80040 // cs4297a_write_ac97(s, 0x18, 0x0808);
80041 #else
80042 // cs4297a_write_ac97(s, 0x5e, 0x180);
80043 cs4297a_write_ac97(s, 0x02, 0x0808);
80044 cs4297a_write_ac97(s, 0x18, 0x0808);
80045 #endif
80046 - set_fs(fs);
80047
80048 list_add(&s->list, &cs4297a_devs);
80049
80050 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
80051 index 56b4f74..7cfd41a 100644
80052 --- a/sound/pci/hda/hda_codec.h
80053 +++ b/sound/pci/hda/hda_codec.h
80054 @@ -611,7 +611,7 @@ struct hda_bus_ops {
80055 /* notify power-up/down from codec to controller */
80056 void (*pm_notify)(struct hda_bus *bus);
80057 #endif
80058 -};
80059 +} __no_const;
80060
80061 /* template to pass to the bus constructor */
80062 struct hda_bus_template {
80063 @@ -713,6 +713,7 @@ struct hda_codec_ops {
80064 #endif
80065 void (*reboot_notify)(struct hda_codec *codec);
80066 };
80067 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
80068
80069 /* record for amp information cache */
80070 struct hda_cache_head {
80071 @@ -743,7 +744,7 @@ struct hda_pcm_ops {
80072 struct snd_pcm_substream *substream);
80073 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
80074 struct snd_pcm_substream *substream);
80075 -};
80076 +} __no_const;
80077
80078 /* PCM information for each substream */
80079 struct hda_pcm_stream {
80080 @@ -801,7 +802,7 @@ struct hda_codec {
80081 const char *modelname; /* model name for preset */
80082
80083 /* set by patch */
80084 - struct hda_codec_ops patch_ops;
80085 + hda_codec_ops_no_const patch_ops;
80086
80087 /* PCM to create, set by patch_ops.build_pcms callback */
80088 unsigned int num_pcms;
80089 diff --git a/sound/pci/ice1712/ice1712.h b/sound/pci/ice1712/ice1712.h
80090 index 0da778a..bc38b84 100644
80091 --- a/sound/pci/ice1712/ice1712.h
80092 +++ b/sound/pci/ice1712/ice1712.h
80093 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
80094 unsigned int mask_flags; /* total mask bits */
80095 struct snd_akm4xxx_ops {
80096 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
80097 - } ops;
80098 + } __no_const ops;
80099 };
80100
80101 struct snd_ice1712_spdif {
80102 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
80103 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
80104 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
80105 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
80106 - } ops;
80107 + } __no_const ops;
80108 };
80109
80110
80111 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
80112 index a8159b81..5f006a5 100644
80113 --- a/sound/pci/ymfpci/ymfpci_main.c
80114 +++ b/sound/pci/ymfpci/ymfpci_main.c
80115 @@ -203,8 +203,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
80116 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
80117 break;
80118 }
80119 - if (atomic_read(&chip->interrupt_sleep_count)) {
80120 - atomic_set(&chip->interrupt_sleep_count, 0);
80121 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
80122 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
80123 wake_up(&chip->interrupt_sleep);
80124 }
80125 __end:
80126 @@ -788,7 +788,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
80127 continue;
80128 init_waitqueue_entry(&wait, current);
80129 add_wait_queue(&chip->interrupt_sleep, &wait);
80130 - atomic_inc(&chip->interrupt_sleep_count);
80131 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
80132 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
80133 remove_wait_queue(&chip->interrupt_sleep, &wait);
80134 }
80135 @@ -826,8 +826,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
80136 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
80137 spin_unlock(&chip->reg_lock);
80138
80139 - if (atomic_read(&chip->interrupt_sleep_count)) {
80140 - atomic_set(&chip->interrupt_sleep_count, 0);
80141 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
80142 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
80143 wake_up(&chip->interrupt_sleep);
80144 }
80145 }
80146 @@ -2398,7 +2398,7 @@ int __devinit snd_ymfpci_create(struct snd_card *card,
80147 spin_lock_init(&chip->reg_lock);
80148 spin_lock_init(&chip->voice_lock);
80149 init_waitqueue_head(&chip->interrupt_sleep);
80150 - atomic_set(&chip->interrupt_sleep_count, 0);
80151 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
80152 chip->card = card;
80153 chip->pci = pci;
80154 chip->irq = -1;
80155 diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
80156 index 0ad8dca..7186339 100644
80157 --- a/sound/soc/soc-pcm.c
80158 +++ b/sound/soc/soc-pcm.c
80159 @@ -641,7 +641,7 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
80160 struct snd_soc_platform *platform = rtd->platform;
80161 struct snd_soc_dai *codec_dai = rtd->codec_dai;
80162 struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
80163 - struct snd_pcm_ops *soc_pcm_ops = &rtd->ops;
80164 + snd_pcm_ops_no_const *soc_pcm_ops = &rtd->ops;
80165 struct snd_pcm *pcm;
80166 char new_name[64];
80167 int ret = 0, playback = 0, capture = 0;
80168 diff --git a/sound/usb/card.h b/sound/usb/card.h
80169 index da5fa1a..113cd02 100644
80170 --- a/sound/usb/card.h
80171 +++ b/sound/usb/card.h
80172 @@ -45,6 +45,7 @@ struct snd_urb_ops {
80173 int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
80174 int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
80175 };
80176 +typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
80177
80178 struct snd_usb_substream {
80179 struct snd_usb_stream *stream;
80180 @@ -94,7 +95,7 @@ struct snd_usb_substream {
80181 struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
80182 spinlock_t lock;
80183
80184 - struct snd_urb_ops ops; /* callbacks (must be filled at init) */
80185 + snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
80186 int last_frame_number; /* stored frame number */
80187 int last_delay; /* stored delay */
80188 };
80189 diff --git a/tools/gcc/.gitignore b/tools/gcc/.gitignore
80190 new file mode 100644
80191 index 0000000..50f2f2f
80192 --- /dev/null
80193 +++ b/tools/gcc/.gitignore
80194 @@ -0,0 +1 @@
80195 +size_overflow_hash.h
80196 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
80197 new file mode 100644
80198 index 0000000..e9d4079
80199 --- /dev/null
80200 +++ b/tools/gcc/Makefile
80201 @@ -0,0 +1,43 @@
80202 +#CC := gcc
80203 +#PLUGIN_SOURCE_FILES := pax_plugin.c
80204 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
80205 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
80206 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99
80207 +
80208 +ifeq ($(PLUGINCC),$(HOSTCC))
80209 +HOSTLIBS := hostlibs
80210 +HOST_EXTRACFLAGS += -Iinclude -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu99 -ggdb
80211 +else
80212 +HOSTLIBS := hostcxxlibs
80213 +HOST_EXTRACXXFLAGS += -Iinclude -I$(GCCPLUGINS_DIR)/include -I$(GCCPLUGINS_DIR)/include/c-family -std=gnu++98 -ggdb -Wno-unused-parameter
80214 +endif
80215 +
80216 +$(HOSTLIBS)-y := constify_plugin.so
80217 +$(HOSTLIBS)-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
80218 +$(HOSTLIBS)-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
80219 +$(HOSTLIBS)-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
80220 +$(HOSTLIBS)-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
80221 +$(HOSTLIBS)-y += colorize_plugin.so
80222 +$(HOSTLIBS)-$(CONFIG_PAX_SIZE_OVERFLOW) += size_overflow_plugin.so
80223 +$(HOSTLIBS)-$(CONFIG_PAX_LATENT_ENTROPY) += latent_entropy_plugin.so
80224 +
80225 +always := $($(HOSTLIBS)-y)
80226 +
80227 +constify_plugin-objs := constify_plugin.o
80228 +stackleak_plugin-objs := stackleak_plugin.o
80229 +kallocstat_plugin-objs := kallocstat_plugin.o
80230 +kernexec_plugin-objs := kernexec_plugin.o
80231 +checker_plugin-objs := checker_plugin.o
80232 +colorize_plugin-objs := colorize_plugin.o
80233 +size_overflow_plugin-objs := size_overflow_plugin.o
80234 +latent_entropy_plugin-objs := latent_entropy_plugin.o
80235 +
80236 +$(obj)/size_overflow_plugin.o: $(objtree)/$(obj)/size_overflow_hash.h
80237 +
80238 +quiet_cmd_build_size_overflow_hash = GENHASH $@
80239 + cmd_build_size_overflow_hash = \
80240 + $(CONFIG_SHELL) $(srctree)/$(src)/generate_size_overflow_hash.sh -d $< -o $@
80241 +$(objtree)/$(obj)/size_overflow_hash.h: $(src)/size_overflow_hash.data FORCE
80242 + $(call if_changed,build_size_overflow_hash)
80243 +
80244 +targets += size_overflow_hash.h
80245 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
80246 new file mode 100644
80247 index 0000000..d41b5af
80248 --- /dev/null
80249 +++ b/tools/gcc/checker_plugin.c
80250 @@ -0,0 +1,171 @@
80251 +/*
80252 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
80253 + * Licensed under the GPL v2
80254 + *
80255 + * Note: the choice of the license means that the compilation process is
80256 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
80257 + * but for the kernel it doesn't matter since it doesn't link against
80258 + * any of the gcc libraries
80259 + *
80260 + * gcc plugin to implement various sparse (source code checker) features
80261 + *
80262 + * TODO:
80263 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
80264 + *
80265 + * BUGS:
80266 + * - none known
80267 + */
80268 +#include "gcc-plugin.h"
80269 +#include "config.h"
80270 +#include "system.h"
80271 +#include "coretypes.h"
80272 +#include "tree.h"
80273 +#include "tree-pass.h"
80274 +#include "flags.h"
80275 +#include "intl.h"
80276 +#include "toplev.h"
80277 +#include "plugin.h"
80278 +//#include "expr.h" where are you...
80279 +#include "diagnostic.h"
80280 +#include "plugin-version.h"
80281 +#include "tm.h"
80282 +#include "function.h"
80283 +#include "basic-block.h"
80284 +#include "gimple.h"
80285 +#include "rtl.h"
80286 +#include "emit-rtl.h"
80287 +#include "tree-flow.h"
80288 +#include "target.h"
80289 +
80290 +extern void c_register_addr_space (const char *str, addr_space_t as);
80291 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
80292 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
80293 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
80294 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
80295 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
80296 +
80297 +extern void print_gimple_stmt(FILE *, gimple, int, int);
80298 +extern rtx emit_move_insn(rtx x, rtx y);
80299 +
80300 +int plugin_is_GPL_compatible;
80301 +
80302 +static struct plugin_info checker_plugin_info = {
80303 + .version = "201111150100",
80304 +};
80305 +
80306 +#define ADDR_SPACE_KERNEL 0
80307 +#define ADDR_SPACE_FORCE_KERNEL 1
80308 +#define ADDR_SPACE_USER 2
80309 +#define ADDR_SPACE_FORCE_USER 3
80310 +#define ADDR_SPACE_IOMEM 0
80311 +#define ADDR_SPACE_FORCE_IOMEM 0
80312 +#define ADDR_SPACE_PERCPU 0
80313 +#define ADDR_SPACE_FORCE_PERCPU 0
80314 +#define ADDR_SPACE_RCU 0
80315 +#define ADDR_SPACE_FORCE_RCU 0
80316 +
80317 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
80318 +{
80319 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
80320 +}
80321 +
80322 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
80323 +{
80324 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
80325 +}
80326 +
80327 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
80328 +{
80329 + return default_addr_space_valid_pointer_mode(mode, as);
80330 +}
80331 +
80332 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
80333 +{
80334 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
80335 +}
80336 +
80337 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
80338 +{
80339 + return default_addr_space_legitimize_address(x, oldx, mode, as);
80340 +}
80341 +
80342 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
80343 +{
80344 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
80345 + return true;
80346 +
80347 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
80348 + return true;
80349 +
80350 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
80351 + return true;
80352 +
80353 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
80354 + return true;
80355 +
80356 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
80357 + return true;
80358 +
80359 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
80360 + return true;
80361 +
80362 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
80363 + return true;
80364 +
80365 + return subset == superset;
80366 +}
80367 +
80368 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
80369 +{
80370 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
80371 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
80372 +
80373 + return op;
80374 +}
80375 +
80376 +static void register_checker_address_spaces(void *event_data, void *data)
80377 +{
80378 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
80379 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
80380 + c_register_addr_space("__user", ADDR_SPACE_USER);
80381 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
80382 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
80383 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
80384 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
80385 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
80386 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
80387 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
80388 +
80389 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
80390 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
80391 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
80392 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
80393 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
80394 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
80395 + targetm.addr_space.convert = checker_addr_space_convert;
80396 +}
80397 +
80398 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
80399 +{
80400 + const char * const plugin_name = plugin_info->base_name;
80401 + const int argc = plugin_info->argc;
80402 + const struct plugin_argument * const argv = plugin_info->argv;
80403 + int i;
80404 +
80405 + if (!plugin_default_version_check(version, &gcc_version)) {
80406 + error(G_("incompatible gcc/plugin versions"));
80407 + return 1;
80408 + }
80409 +
80410 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
80411 +
80412 + for (i = 0; i < argc; ++i)
80413 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80414 +
80415 + if (TARGET_64BIT == 0)
80416 + return 0;
80417 +
80418 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
80419 +
80420 + return 0;
80421 +}
80422 diff --git a/tools/gcc/colorize_plugin.c b/tools/gcc/colorize_plugin.c
80423 new file mode 100644
80424 index 0000000..846aeb0
80425 --- /dev/null
80426 +++ b/tools/gcc/colorize_plugin.c
80427 @@ -0,0 +1,148 @@
80428 +/*
80429 + * Copyright 2012 by PaX Team <pageexec@freemail.hu>
80430 + * Licensed under the GPL v2
80431 + *
80432 + * Note: the choice of the license means that the compilation process is
80433 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
80434 + * but for the kernel it doesn't matter since it doesn't link against
80435 + * any of the gcc libraries
80436 + *
80437 + * gcc plugin to colorize diagnostic output
80438 + *
80439 + */
80440 +
80441 +#include "gcc-plugin.h"
80442 +#include "config.h"
80443 +#include "system.h"
80444 +#include "coretypes.h"
80445 +#include "tree.h"
80446 +#include "tree-pass.h"
80447 +#include "flags.h"
80448 +#include "intl.h"
80449 +#include "toplev.h"
80450 +#include "plugin.h"
80451 +#include "diagnostic.h"
80452 +#include "plugin-version.h"
80453 +#include "tm.h"
80454 +
80455 +int plugin_is_GPL_compatible;
80456 +
80457 +static struct plugin_info colorize_plugin_info = {
80458 + .version = "201203092200",
80459 + .help = NULL,
80460 +};
80461 +
80462 +#define GREEN "\033[32m\033[2m"
80463 +#define LIGHTGREEN "\033[32m\033[1m"
80464 +#define YELLOW "\033[33m\033[2m"
80465 +#define LIGHTYELLOW "\033[33m\033[1m"
80466 +#define RED "\033[31m\033[2m"
80467 +#define LIGHTRED "\033[31m\033[1m"
80468 +#define BLUE "\033[34m\033[2m"
80469 +#define LIGHTBLUE "\033[34m\033[1m"
80470 +#define BRIGHT "\033[m\033[1m"
80471 +#define NORMAL "\033[m"
80472 +
80473 +static diagnostic_starter_fn old_starter;
80474 +static diagnostic_finalizer_fn old_finalizer;
80475 +
80476 +static void start_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
80477 +{
80478 + const char *color;
80479 + char *newprefix;
80480 +
80481 + switch (diagnostic->kind) {
80482 + case DK_NOTE:
80483 + color = LIGHTBLUE;
80484 + break;
80485 +
80486 + case DK_PEDWARN:
80487 + case DK_WARNING:
80488 + color = LIGHTYELLOW;
80489 + break;
80490 +
80491 + case DK_ERROR:
80492 + case DK_FATAL:
80493 + case DK_ICE:
80494 + case DK_PERMERROR:
80495 + case DK_SORRY:
80496 + color = LIGHTRED;
80497 + break;
80498 +
80499 + default:
80500 + color = NORMAL;
80501 + }
80502 +
80503 + old_starter(context, diagnostic);
80504 + if (-1 == asprintf(&newprefix, "%s%s" NORMAL, color, context->printer->prefix))
80505 + return;
80506 + pp_destroy_prefix(context->printer);
80507 + pp_set_prefix(context->printer, newprefix);
80508 +}
80509 +
80510 +static void finalize_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
80511 +{
80512 + old_finalizer(context, diagnostic);
80513 +}
80514 +
80515 +static void colorize_arm(void)
80516 +{
80517 + old_starter = diagnostic_starter(global_dc);
80518 + old_finalizer = diagnostic_finalizer(global_dc);
80519 +
80520 + diagnostic_starter(global_dc) = start_colorize;
80521 + diagnostic_finalizer(global_dc) = finalize_colorize;
80522 +}
80523 +
80524 +static unsigned int execute_colorize_rearm(void)
80525 +{
80526 + if (diagnostic_starter(global_dc) == start_colorize)
80527 + return 0;
80528 +
80529 + colorize_arm();
80530 + return 0;
80531 +}
80532 +
80533 +struct simple_ipa_opt_pass pass_ipa_colorize_rearm = {
80534 + .pass = {
80535 + .type = SIMPLE_IPA_PASS,
80536 + .name = "colorize_rearm",
80537 + .gate = NULL,
80538 + .execute = execute_colorize_rearm,
80539 + .sub = NULL,
80540 + .next = NULL,
80541 + .static_pass_number = 0,
80542 + .tv_id = TV_NONE,
80543 + .properties_required = 0,
80544 + .properties_provided = 0,
80545 + .properties_destroyed = 0,
80546 + .todo_flags_start = 0,
80547 + .todo_flags_finish = 0
80548 + }
80549 +};
80550 +
80551 +static void colorize_start_unit(void *gcc_data, void *user_data)
80552 +{
80553 + colorize_arm();
80554 +}
80555 +
80556 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
80557 +{
80558 + const char * const plugin_name = plugin_info->base_name;
80559 + struct register_pass_info colorize_rearm_pass_info = {
80560 + .pass = &pass_ipa_colorize_rearm.pass,
80561 + .reference_pass_name = "*free_lang_data",
80562 + .ref_pass_instance_number = 1,
80563 + .pos_op = PASS_POS_INSERT_AFTER
80564 + };
80565 +
80566 + if (!plugin_default_version_check(version, &gcc_version)) {
80567 + error(G_("incompatible gcc/plugin versions"));
80568 + return 1;
80569 + }
80570 +
80571 + register_callback(plugin_name, PLUGIN_INFO, NULL, &colorize_plugin_info);
80572 + register_callback(plugin_name, PLUGIN_START_UNIT, &colorize_start_unit, NULL);
80573 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &colorize_rearm_pass_info);
80574 + return 0;
80575 +}
80576 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
80577 new file mode 100644
80578 index 0000000..048d4ff
80579 --- /dev/null
80580 +++ b/tools/gcc/constify_plugin.c
80581 @@ -0,0 +1,328 @@
80582 +/*
80583 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
80584 + * Copyright 2011 by PaX Team <pageexec@freemail.hu>
80585 + * Licensed under the GPL v2, or (at your option) v3
80586 + *
80587 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
80588 + *
80589 + * Homepage:
80590 + * http://www.grsecurity.net/~ephox/const_plugin/
80591 + *
80592 + * Usage:
80593 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
80594 + * $ gcc -fplugin=constify_plugin.so test.c -O2
80595 + */
80596 +
80597 +#include "gcc-plugin.h"
80598 +#include "config.h"
80599 +#include "system.h"
80600 +#include "coretypes.h"
80601 +#include "tree.h"
80602 +#include "tree-pass.h"
80603 +#include "flags.h"
80604 +#include "intl.h"
80605 +#include "toplev.h"
80606 +#include "plugin.h"
80607 +#include "diagnostic.h"
80608 +#include "plugin-version.h"
80609 +#include "tm.h"
80610 +#include "function.h"
80611 +#include "basic-block.h"
80612 +#include "gimple.h"
80613 +#include "rtl.h"
80614 +#include "emit-rtl.h"
80615 +#include "tree-flow.h"
80616 +
80617 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
80618 +
80619 +int plugin_is_GPL_compatible;
80620 +
80621 +static struct plugin_info const_plugin_info = {
80622 + .version = "201205300030",
80623 + .help = "no-constify\tturn off constification\n",
80624 +};
80625 +
80626 +static void deconstify_tree(tree node);
80627 +
80628 +static void deconstify_type(tree type)
80629 +{
80630 + tree field;
80631 +
80632 + for (field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) {
80633 + tree type = TREE_TYPE(field);
80634 +
80635 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
80636 + continue;
80637 + if (!TYPE_READONLY(type))
80638 + continue;
80639 +
80640 + deconstify_tree(field);
80641 + }
80642 + TYPE_READONLY(type) = 0;
80643 + C_TYPE_FIELDS_READONLY(type) = 0;
80644 +}
80645 +
80646 +static void deconstify_tree(tree node)
80647 +{
80648 + tree old_type, new_type, field;
80649 +
80650 + old_type = TREE_TYPE(node);
80651 +
80652 + gcc_assert(TYPE_READONLY(old_type) && (TYPE_QUALS(old_type) & TYPE_QUAL_CONST));
80653 +
80654 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
80655 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
80656 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
80657 + DECL_FIELD_CONTEXT(field) = new_type;
80658 +
80659 + deconstify_type(new_type);
80660 +
80661 + TREE_READONLY(node) = 0;
80662 + TREE_TYPE(node) = new_type;
80663 +}
80664 +
80665 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
80666 +{
80667 + tree type;
80668 +
80669 + *no_add_attrs = true;
80670 + if (TREE_CODE(*node) == FUNCTION_DECL) {
80671 + error("%qE attribute does not apply to functions", name);
80672 + return NULL_TREE;
80673 + }
80674 +
80675 + if (TREE_CODE(*node) == VAR_DECL) {
80676 + error("%qE attribute does not apply to variables", name);
80677 + return NULL_TREE;
80678 + }
80679 +
80680 + if (TYPE_P(*node)) {
80681 + if (TREE_CODE(*node) == RECORD_TYPE || TREE_CODE(*node) == UNION_TYPE)
80682 + *no_add_attrs = false;
80683 + else
80684 + error("%qE attribute applies to struct and union types only", name);
80685 + return NULL_TREE;
80686 + }
80687 +
80688 + type = TREE_TYPE(*node);
80689 +
80690 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
80691 + error("%qE attribute applies to struct and union types only", name);
80692 + return NULL_TREE;
80693 + }
80694 +
80695 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
80696 + error("%qE attribute is already applied to the type", name);
80697 + return NULL_TREE;
80698 + }
80699 +
80700 + if (TREE_CODE(*node) == TYPE_DECL && !TYPE_READONLY(type)) {
80701 + error("%qE attribute used on type that is not constified", name);
80702 + return NULL_TREE;
80703 + }
80704 +
80705 + if (TREE_CODE(*node) == TYPE_DECL) {
80706 + deconstify_tree(*node);
80707 + return NULL_TREE;
80708 + }
80709 +
80710 + return NULL_TREE;
80711 +}
80712 +
80713 +static void constify_type(tree type)
80714 +{
80715 + TYPE_READONLY(type) = 1;
80716 + C_TYPE_FIELDS_READONLY(type) = 1;
80717 +}
80718 +
80719 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
80720 +{
80721 + *no_add_attrs = true;
80722 + if (!TYPE_P(*node)) {
80723 + error("%qE attribute applies to types only", name);
80724 + return NULL_TREE;
80725 + }
80726 +
80727 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
80728 + error("%qE attribute applies to struct and union types only", name);
80729 + return NULL_TREE;
80730 + }
80731 +
80732 + *no_add_attrs = false;
80733 + constify_type(*node);
80734 + return NULL_TREE;
80735 +}
80736 +
80737 +static struct attribute_spec no_const_attr = {
80738 + .name = "no_const",
80739 + .min_length = 0,
80740 + .max_length = 0,
80741 + .decl_required = false,
80742 + .type_required = false,
80743 + .function_type_required = false,
80744 + .handler = handle_no_const_attribute,
80745 +#if BUILDING_GCC_VERSION >= 4007
80746 + .affects_type_identity = true
80747 +#endif
80748 +};
80749 +
80750 +static struct attribute_spec do_const_attr = {
80751 + .name = "do_const",
80752 + .min_length = 0,
80753 + .max_length = 0,
80754 + .decl_required = false,
80755 + .type_required = false,
80756 + .function_type_required = false,
80757 + .handler = handle_do_const_attribute,
80758 +#if BUILDING_GCC_VERSION >= 4007
80759 + .affects_type_identity = true
80760 +#endif
80761 +};
80762 +
80763 +static void register_attributes(void *event_data, void *data)
80764 +{
80765 + register_attribute(&no_const_attr);
80766 + register_attribute(&do_const_attr);
80767 +}
80768 +
80769 +static bool is_fptr(tree field)
80770 +{
80771 + tree ptr = TREE_TYPE(field);
80772 +
80773 + if (TREE_CODE(ptr) != POINTER_TYPE)
80774 + return false;
80775 +
80776 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
80777 +}
80778 +
80779 +static bool walk_struct(tree node)
80780 +{
80781 + tree field;
80782 +
80783 + if (TYPE_FIELDS(node) == NULL_TREE)
80784 + return false;
80785 +
80786 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node))) {
80787 + gcc_assert(!TYPE_READONLY(node));
80788 + deconstify_type(node);
80789 + return false;
80790 + }
80791 +
80792 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
80793 + tree type = TREE_TYPE(field);
80794 + enum tree_code code = TREE_CODE(type);
80795 + if (code == RECORD_TYPE || code == UNION_TYPE) {
80796 + if (!(walk_struct(type)))
80797 + return false;
80798 + } else if (!is_fptr(field) && !TREE_READONLY(field))
80799 + return false;
80800 + }
80801 + return true;
80802 +}
80803 +
80804 +static void finish_type(void *event_data, void *data)
80805 +{
80806 + tree type = (tree)event_data;
80807 +
80808 + if (type == NULL_TREE)
80809 + return;
80810 +
80811 + if (TYPE_READONLY(type))
80812 + return;
80813 +
80814 + if (walk_struct(type))
80815 + constify_type(type);
80816 +}
80817 +
80818 +static unsigned int check_local_variables(void);
80819 +
80820 +struct gimple_opt_pass pass_local_variable = {
80821 + {
80822 + .type = GIMPLE_PASS,
80823 + .name = "check_local_variables",
80824 + .gate = NULL,
80825 + .execute = check_local_variables,
80826 + .sub = NULL,
80827 + .next = NULL,
80828 + .static_pass_number = 0,
80829 + .tv_id = TV_NONE,
80830 + .properties_required = 0,
80831 + .properties_provided = 0,
80832 + .properties_destroyed = 0,
80833 + .todo_flags_start = 0,
80834 + .todo_flags_finish = 0
80835 + }
80836 +};
80837 +
80838 +static unsigned int check_local_variables(void)
80839 +{
80840 + tree var;
80841 + referenced_var_iterator rvi;
80842 +
80843 +#if BUILDING_GCC_VERSION == 4005
80844 + FOR_EACH_REFERENCED_VAR(var, rvi) {
80845 +#else
80846 + FOR_EACH_REFERENCED_VAR(cfun, var, rvi) {
80847 +#endif
80848 + tree type = TREE_TYPE(var);
80849 +
80850 + if (!DECL_P(var) || TREE_STATIC(var) || DECL_EXTERNAL(var))
80851 + continue;
80852 +
80853 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
80854 + continue;
80855 +
80856 + if (!TYPE_READONLY(type))
80857 + continue;
80858 +
80859 +// if (lookup_attribute("no_const", DECL_ATTRIBUTES(var)))
80860 +// continue;
80861 +
80862 +// if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
80863 +// continue;
80864 +
80865 + if (walk_struct(type)) {
80866 + error_at(DECL_SOURCE_LOCATION(var), "constified variable %qE cannot be local", var);
80867 + return 1;
80868 + }
80869 + }
80870 + return 0;
80871 +}
80872 +
80873 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
80874 +{
80875 + const char * const plugin_name = plugin_info->base_name;
80876 + const int argc = plugin_info->argc;
80877 + const struct plugin_argument * const argv = plugin_info->argv;
80878 + int i;
80879 + bool constify = true;
80880 +
80881 + struct register_pass_info local_variable_pass_info = {
80882 + .pass = &pass_local_variable.pass,
80883 + .reference_pass_name = "*referenced_vars",
80884 + .ref_pass_instance_number = 1,
80885 + .pos_op = PASS_POS_INSERT_AFTER
80886 + };
80887 +
80888 + if (!plugin_default_version_check(version, &gcc_version)) {
80889 + error(G_("incompatible gcc/plugin versions"));
80890 + return 1;
80891 + }
80892 +
80893 + for (i = 0; i < argc; ++i) {
80894 + if (!(strcmp(argv[i].key, "no-constify"))) {
80895 + constify = false;
80896 + continue;
80897 + }
80898 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
80899 + }
80900 +
80901 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
80902 + if (constify) {
80903 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
80904 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
80905 + }
80906 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
80907 +
80908 + return 0;
80909 +}
80910 diff --git a/tools/gcc/generate_size_overflow_hash.sh b/tools/gcc/generate_size_overflow_hash.sh
80911 new file mode 100644
80912 index 0000000..a0fe8b2
80913 --- /dev/null
80914 +++ b/tools/gcc/generate_size_overflow_hash.sh
80915 @@ -0,0 +1,94 @@
80916 +#!/bin/bash
80917 +
80918 +# This script generates the hash table (size_overflow_hash.h) for the size_overflow gcc plugin (size_overflow_plugin.c).
80919 +
80920 +header1="size_overflow_hash.h"
80921 +database="size_overflow_hash.data"
80922 +n=65536
80923 +
80924 +usage() {
80925 +cat <<EOF
80926 +usage: $0 options
80927 +OPTIONS:
80928 + -h|--help help
80929 + -o header file
80930 + -d database file
80931 + -n hash array size
80932 +EOF
80933 + return 0
80934 +}
80935 +
80936 +while true
80937 +do
80938 + case "$1" in
80939 + -h|--help) usage && exit 0;;
80940 + -n) n=$2; shift 2;;
80941 + -o) header1="$2"; shift 2;;
80942 + -d) database="$2"; shift 2;;
80943 + --) shift 1; break ;;
80944 + *) break ;;
80945 + esac
80946 +done
80947 +
80948 +create_defines() {
80949 + for i in `seq 1 10`
80950 + do
80951 + echo -e "#define PARAM"$i" (1U << "$i")" >> "$header1"
80952 + done
80953 + echo >> "$header1"
80954 +}
80955 +
80956 +create_structs () {
80957 + rm -f "$header1"
80958 +
80959 + create_defines
80960 +
80961 + cat "$database" | while read data
80962 + do
80963 + data_array=($data)
80964 + struct_hash_name="${data_array[0]}"
80965 + funcn="${data_array[1]}"
80966 + params="${data_array[2]}"
80967 + next="${data_array[5]}"
80968 +
80969 + echo "struct size_overflow_hash $struct_hash_name = {" >> "$header1"
80970 +
80971 + echo -e "\t.next\t= $next,\n\t.name\t= \"$funcn\"," >> "$header1"
80972 + echo -en "\t.param\t= " >> "$header1"
80973 + line=
80974 + for param_num in ${params//-/ };
80975 + do
80976 + line="${line}PARAM"$param_num"|"
80977 + done
80978 +
80979 + echo -e "${line%?},\n};\n" >> "$header1"
80980 + done
80981 +}
80982 +
80983 +create_headers () {
80984 + echo "struct size_overflow_hash *size_overflow_hash[$n] = {" >> "$header1"
80985 +}
80986 +
80987 +create_array_elements () {
80988 + index=0
80989 + grep -v "nohasharray" $database | sort -n -k 4 | while read data
80990 + do
80991 + data_array=($data)
80992 + i="${data_array[3]}"
80993 + hash="${data_array[4]}"
80994 + while [[ $index -lt $i ]]
80995 + do
80996 + echo -e "\t["$index"]\t= NULL," >> "$header1"
80997 + index=$(($index + 1))
80998 + done
80999 + index=$(($index + 1))
81000 + echo -e "\t["$i"]\t= &"$hash"," >> "$header1"
81001 + done
81002 + echo '};' >> $header1
81003 +}
81004 +
81005 +create_structs
81006 +create_headers
81007 +create_array_elements
81008 +
81009 +exit 0
81010 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
81011 new file mode 100644
81012 index 0000000..a86e422
81013 --- /dev/null
81014 +++ b/tools/gcc/kallocstat_plugin.c
81015 @@ -0,0 +1,167 @@
81016 +/*
81017 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
81018 + * Licensed under the GPL v2
81019 + *
81020 + * Note: the choice of the license means that the compilation process is
81021 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
81022 + * but for the kernel it doesn't matter since it doesn't link against
81023 + * any of the gcc libraries
81024 + *
81025 + * gcc plugin to find the distribution of k*alloc sizes
81026 + *
81027 + * TODO:
81028 + *
81029 + * BUGS:
81030 + * - none known
81031 + */
81032 +#include "gcc-plugin.h"
81033 +#include "config.h"
81034 +#include "system.h"
81035 +#include "coretypes.h"
81036 +#include "tree.h"
81037 +#include "tree-pass.h"
81038 +#include "flags.h"
81039 +#include "intl.h"
81040 +#include "toplev.h"
81041 +#include "plugin.h"
81042 +//#include "expr.h" where are you...
81043 +#include "diagnostic.h"
81044 +#include "plugin-version.h"
81045 +#include "tm.h"
81046 +#include "function.h"
81047 +#include "basic-block.h"
81048 +#include "gimple.h"
81049 +#include "rtl.h"
81050 +#include "emit-rtl.h"
81051 +
81052 +extern void print_gimple_stmt(FILE *, gimple, int, int);
81053 +
81054 +int plugin_is_GPL_compatible;
81055 +
81056 +static const char * const kalloc_functions[] = {
81057 + "__kmalloc",
81058 + "kmalloc",
81059 + "kmalloc_large",
81060 + "kmalloc_node",
81061 + "kmalloc_order",
81062 + "kmalloc_order_trace",
81063 + "kmalloc_slab",
81064 + "kzalloc",
81065 + "kzalloc_node",
81066 +};
81067 +
81068 +static struct plugin_info kallocstat_plugin_info = {
81069 + .version = "201111150100",
81070 +};
81071 +
81072 +static unsigned int execute_kallocstat(void);
81073 +
81074 +static struct gimple_opt_pass kallocstat_pass = {
81075 + .pass = {
81076 + .type = GIMPLE_PASS,
81077 + .name = "kallocstat",
81078 + .gate = NULL,
81079 + .execute = execute_kallocstat,
81080 + .sub = NULL,
81081 + .next = NULL,
81082 + .static_pass_number = 0,
81083 + .tv_id = TV_NONE,
81084 + .properties_required = 0,
81085 + .properties_provided = 0,
81086 + .properties_destroyed = 0,
81087 + .todo_flags_start = 0,
81088 + .todo_flags_finish = 0
81089 + }
81090 +};
81091 +
81092 +static bool is_kalloc(const char *fnname)
81093 +{
81094 + size_t i;
81095 +
81096 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
81097 + if (!strcmp(fnname, kalloc_functions[i]))
81098 + return true;
81099 + return false;
81100 +}
81101 +
81102 +static unsigned int execute_kallocstat(void)
81103 +{
81104 + basic_block bb;
81105 +
81106 + // 1. loop through BBs and GIMPLE statements
81107 + FOR_EACH_BB(bb) {
81108 + gimple_stmt_iterator gsi;
81109 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
81110 + // gimple match:
81111 + tree fndecl, size;
81112 + gimple call_stmt;
81113 + const char *fnname;
81114 +
81115 + // is it a call
81116 + call_stmt = gsi_stmt(gsi);
81117 + if (!is_gimple_call(call_stmt))
81118 + continue;
81119 + fndecl = gimple_call_fndecl(call_stmt);
81120 + if (fndecl == NULL_TREE)
81121 + continue;
81122 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
81123 + continue;
81124 +
81125 + // is it a call to k*alloc
81126 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
81127 + if (!is_kalloc(fnname))
81128 + continue;
81129 +
81130 + // is the size arg the result of a simple const assignment
81131 + size = gimple_call_arg(call_stmt, 0);
81132 + while (true) {
81133 + gimple def_stmt;
81134 + expanded_location xloc;
81135 + size_t size_val;
81136 +
81137 + if (TREE_CODE(size) != SSA_NAME)
81138 + break;
81139 + def_stmt = SSA_NAME_DEF_STMT(size);
81140 + if (!def_stmt || !is_gimple_assign(def_stmt))
81141 + break;
81142 + if (gimple_num_ops(def_stmt) != 2)
81143 + break;
81144 + size = gimple_assign_rhs1(def_stmt);
81145 + if (!TREE_CONSTANT(size))
81146 + continue;
81147 + xloc = expand_location(gimple_location(def_stmt));
81148 + if (!xloc.file)
81149 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
81150 + size_val = TREE_INT_CST_LOW(size);
81151 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
81152 + break;
81153 + }
81154 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
81155 +//debug_tree(gimple_call_fn(call_stmt));
81156 +//print_node(stderr, "pax", fndecl, 4);
81157 + }
81158 + }
81159 +
81160 + return 0;
81161 +}
81162 +
81163 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
81164 +{
81165 + const char * const plugin_name = plugin_info->base_name;
81166 + struct register_pass_info kallocstat_pass_info = {
81167 + .pass = &kallocstat_pass.pass,
81168 + .reference_pass_name = "ssa",
81169 + .ref_pass_instance_number = 1,
81170 + .pos_op = PASS_POS_INSERT_AFTER
81171 + };
81172 +
81173 + if (!plugin_default_version_check(version, &gcc_version)) {
81174 + error(G_("incompatible gcc/plugin versions"));
81175 + return 1;
81176 + }
81177 +
81178 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
81179 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
81180 +
81181 + return 0;
81182 +}
81183 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
81184 new file mode 100644
81185 index 0000000..98011fa
81186 --- /dev/null
81187 +++ b/tools/gcc/kernexec_plugin.c
81188 @@ -0,0 +1,427 @@
81189 +/*
81190 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
81191 + * Licensed under the GPL v2
81192 + *
81193 + * Note: the choice of the license means that the compilation process is
81194 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
81195 + * but for the kernel it doesn't matter since it doesn't link against
81196 + * any of the gcc libraries
81197 + *
81198 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
81199 + *
81200 + * TODO:
81201 + *
81202 + * BUGS:
81203 + * - none known
81204 + */
81205 +#include "gcc-plugin.h"
81206 +#include "config.h"
81207 +#include "system.h"
81208 +#include "coretypes.h"
81209 +#include "tree.h"
81210 +#include "tree-pass.h"
81211 +#include "flags.h"
81212 +#include "intl.h"
81213 +#include "toplev.h"
81214 +#include "plugin.h"
81215 +//#include "expr.h" where are you...
81216 +#include "diagnostic.h"
81217 +#include "plugin-version.h"
81218 +#include "tm.h"
81219 +#include "function.h"
81220 +#include "basic-block.h"
81221 +#include "gimple.h"
81222 +#include "rtl.h"
81223 +#include "emit-rtl.h"
81224 +#include "tree-flow.h"
81225 +
81226 +extern void print_gimple_stmt(FILE *, gimple, int, int);
81227 +extern rtx emit_move_insn(rtx x, rtx y);
81228 +
81229 +int plugin_is_GPL_compatible;
81230 +
81231 +static struct plugin_info kernexec_plugin_info = {
81232 + .version = "201111291120",
81233 + .help = "method=[bts|or]\tinstrumentation method\n"
81234 +};
81235 +
81236 +static unsigned int execute_kernexec_reload(void);
81237 +static unsigned int execute_kernexec_fptr(void);
81238 +static unsigned int execute_kernexec_retaddr(void);
81239 +static bool kernexec_cmodel_check(void);
81240 +
81241 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *);
81242 +static void (*kernexec_instrument_retaddr)(rtx);
81243 +
81244 +static struct gimple_opt_pass kernexec_reload_pass = {
81245 + .pass = {
81246 + .type = GIMPLE_PASS,
81247 + .name = "kernexec_reload",
81248 + .gate = kernexec_cmodel_check,
81249 + .execute = execute_kernexec_reload,
81250 + .sub = NULL,
81251 + .next = NULL,
81252 + .static_pass_number = 0,
81253 + .tv_id = TV_NONE,
81254 + .properties_required = 0,
81255 + .properties_provided = 0,
81256 + .properties_destroyed = 0,
81257 + .todo_flags_start = 0,
81258 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
81259 + }
81260 +};
81261 +
81262 +static struct gimple_opt_pass kernexec_fptr_pass = {
81263 + .pass = {
81264 + .type = GIMPLE_PASS,
81265 + .name = "kernexec_fptr",
81266 + .gate = kernexec_cmodel_check,
81267 + .execute = execute_kernexec_fptr,
81268 + .sub = NULL,
81269 + .next = NULL,
81270 + .static_pass_number = 0,
81271 + .tv_id = TV_NONE,
81272 + .properties_required = 0,
81273 + .properties_provided = 0,
81274 + .properties_destroyed = 0,
81275 + .todo_flags_start = 0,
81276 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
81277 + }
81278 +};
81279 +
81280 +static struct rtl_opt_pass kernexec_retaddr_pass = {
81281 + .pass = {
81282 + .type = RTL_PASS,
81283 + .name = "kernexec_retaddr",
81284 + .gate = kernexec_cmodel_check,
81285 + .execute = execute_kernexec_retaddr,
81286 + .sub = NULL,
81287 + .next = NULL,
81288 + .static_pass_number = 0,
81289 + .tv_id = TV_NONE,
81290 + .properties_required = 0,
81291 + .properties_provided = 0,
81292 + .properties_destroyed = 0,
81293 + .todo_flags_start = 0,
81294 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
81295 + }
81296 +};
81297 +
81298 +static bool kernexec_cmodel_check(void)
81299 +{
81300 + tree section;
81301 +
81302 + if (ix86_cmodel != CM_KERNEL)
81303 + return false;
81304 +
81305 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
81306 + if (!section || !TREE_VALUE(section))
81307 + return true;
81308 +
81309 + section = TREE_VALUE(TREE_VALUE(section));
81310 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
81311 + return true;
81312 +
81313 + return false;
81314 +}
81315 +
81316 +/*
81317 + * add special KERNEXEC instrumentation: reload %r10 after it has been clobbered
81318 + */
81319 +static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
81320 +{
81321 + gimple asm_movabs_stmt;
81322 +
81323 + // build asm volatile("movabs $0x8000000000000000, %%r10\n\t" : : : );
81324 + asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r10\n\t", NULL, NULL, NULL, NULL);
81325 + gimple_asm_set_volatile(asm_movabs_stmt, true);
81326 + gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
81327 + update_stmt(asm_movabs_stmt);
81328 +}
81329 +
81330 +/*
81331 + * find all asm() stmts that clobber r10 and add a reload of r10
81332 + */
81333 +static unsigned int execute_kernexec_reload(void)
81334 +{
81335 + basic_block bb;
81336 +
81337 + // 1. loop through BBs and GIMPLE statements
81338 + FOR_EACH_BB(bb) {
81339 + gimple_stmt_iterator gsi;
81340 +
81341 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
81342 + // gimple match: __asm__ ("" : : : "r10");
81343 + gimple asm_stmt;
81344 + size_t nclobbers;
81345 +
81346 + // is it an asm ...
81347 + asm_stmt = gsi_stmt(gsi);
81348 + if (gimple_code(asm_stmt) != GIMPLE_ASM)
81349 + continue;
81350 +
81351 + // ... clobbering r10
81352 + nclobbers = gimple_asm_nclobbers(asm_stmt);
81353 + while (nclobbers--) {
81354 + tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
81355 + if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r10"))
81356 + continue;
81357 + kernexec_reload_fptr_mask(&gsi);
81358 +//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
81359 + break;
81360 + }
81361 + }
81362 + }
81363 +
81364 + return 0;
81365 +}
81366 +
81367 +/*
81368 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
81369 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
81370 + */
81371 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi)
81372 +{
81373 + gimple assign_intptr, assign_new_fptr, call_stmt;
81374 + tree intptr, old_fptr, new_fptr, kernexec_mask;
81375 +
81376 + call_stmt = gsi_stmt(*gsi);
81377 + old_fptr = gimple_call_fn(call_stmt);
81378 +
81379 + // create temporary unsigned long variable used for bitops and cast fptr to it
81380 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
81381 + add_referenced_var(intptr);
81382 + mark_sym_for_renaming(intptr);
81383 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
81384 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
81385 + update_stmt(assign_intptr);
81386 +
81387 + // apply logical or to temporary unsigned long and bitmask
81388 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
81389 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
81390 + assign_intptr = gimple_build_assign(intptr, fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask));
81391 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
81392 + update_stmt(assign_intptr);
81393 +
81394 + // cast temporary unsigned long back to a temporary fptr variable
81395 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_fptr");
81396 + add_referenced_var(new_fptr);
81397 + mark_sym_for_renaming(new_fptr);
81398 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
81399 + gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT);
81400 + update_stmt(assign_new_fptr);
81401 +
81402 + // replace call stmt fn with the new fptr
81403 + gimple_call_set_fn(call_stmt, new_fptr);
81404 + update_stmt(call_stmt);
81405 +}
81406 +
81407 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi)
81408 +{
81409 + gimple asm_or_stmt, call_stmt;
81410 + tree old_fptr, new_fptr, input, output;
81411 + VEC(tree, gc) *inputs = NULL;
81412 + VEC(tree, gc) *outputs = NULL;
81413 +
81414 + call_stmt = gsi_stmt(*gsi);
81415 + old_fptr = gimple_call_fn(call_stmt);
81416 +
81417 + // create temporary fptr variable
81418 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
81419 + add_referenced_var(new_fptr);
81420 + mark_sym_for_renaming(new_fptr);
81421 +
81422 + // build asm volatile("orq %%r10, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
81423 + input = build_tree_list(NULL_TREE, build_string(2, "0"));
81424 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
81425 + output = build_tree_list(NULL_TREE, build_string(3, "=r"));
81426 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
81427 + VEC_safe_push(tree, gc, inputs, input);
81428 + VEC_safe_push(tree, gc, outputs, output);
81429 + asm_or_stmt = gimple_build_asm_vec("orq %%r10, %0\n\t", inputs, outputs, NULL, NULL);
81430 + gimple_asm_set_volatile(asm_or_stmt, true);
81431 + gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
81432 + update_stmt(asm_or_stmt);
81433 +
81434 + // replace call stmt fn with the new fptr
81435 + gimple_call_set_fn(call_stmt, new_fptr);
81436 + update_stmt(call_stmt);
81437 +}
81438 +
81439 +/*
81440 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
81441 + */
81442 +static unsigned int execute_kernexec_fptr(void)
81443 +{
81444 + basic_block bb;
81445 +
81446 + // 1. loop through BBs and GIMPLE statements
81447 + FOR_EACH_BB(bb) {
81448 + gimple_stmt_iterator gsi;
81449 +
81450 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
81451 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
81452 + tree fn;
81453 + gimple call_stmt;
81454 +
81455 + // is it a call ...
81456 + call_stmt = gsi_stmt(gsi);
81457 + if (!is_gimple_call(call_stmt))
81458 + continue;
81459 + fn = gimple_call_fn(call_stmt);
81460 + if (TREE_CODE(fn) == ADDR_EXPR)
81461 + continue;
81462 + if (TREE_CODE(fn) != SSA_NAME)
81463 + gcc_unreachable();
81464 +
81465 + // ... through a function pointer
81466 + fn = SSA_NAME_VAR(fn);
81467 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL)
81468 + continue;
81469 + fn = TREE_TYPE(fn);
81470 + if (TREE_CODE(fn) != POINTER_TYPE)
81471 + continue;
81472 + fn = TREE_TYPE(fn);
81473 + if (TREE_CODE(fn) != FUNCTION_TYPE)
81474 + continue;
81475 +
81476 + kernexec_instrument_fptr(&gsi);
81477 +
81478 +//debug_tree(gimple_call_fn(call_stmt));
81479 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
81480 + }
81481 + }
81482 +
81483 + return 0;
81484 +}
81485 +
81486 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
81487 +static void kernexec_instrument_retaddr_bts(rtx insn)
81488 +{
81489 + rtx btsq;
81490 + rtvec argvec, constraintvec, labelvec;
81491 + int line;
81492 +
81493 + // create asm volatile("btsq $63,(%%rsp)":::)
81494 + argvec = rtvec_alloc(0);
81495 + constraintvec = rtvec_alloc(0);
81496 + labelvec = rtvec_alloc(0);
81497 + line = expand_location(RTL_LOCATION(insn)).line;
81498 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
81499 + MEM_VOLATILE_P(btsq) = 1;
81500 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
81501 + emit_insn_before(btsq, insn);
81502 +}
81503 +
81504 +// add special KERNEXEC instrumentation: orq %r10,(%rsp) just before retn
81505 +static void kernexec_instrument_retaddr_or(rtx insn)
81506 +{
81507 + rtx orq;
81508 + rtvec argvec, constraintvec, labelvec;
81509 + int line;
81510 +
81511 + // create asm volatile("orq %%r10,(%%rsp)":::)
81512 + argvec = rtvec_alloc(0);
81513 + constraintvec = rtvec_alloc(0);
81514 + labelvec = rtvec_alloc(0);
81515 + line = expand_location(RTL_LOCATION(insn)).line;
81516 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r10,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
81517 + MEM_VOLATILE_P(orq) = 1;
81518 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
81519 + emit_insn_before(orq, insn);
81520 +}
81521 +
81522 +/*
81523 + * find all asm level function returns and forcibly set the highest bit of the return address
81524 + */
81525 +static unsigned int execute_kernexec_retaddr(void)
81526 +{
81527 + rtx insn;
81528 +
81529 + // 1. find function returns
81530 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
81531 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
81532 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
81533 + rtx body;
81534 +
81535 + // is it a retn
81536 + if (!JUMP_P(insn))
81537 + continue;
81538 + body = PATTERN(insn);
81539 + if (GET_CODE(body) == PARALLEL)
81540 + body = XVECEXP(body, 0, 0);
81541 + if (GET_CODE(body) != RETURN)
81542 + continue;
81543 + kernexec_instrument_retaddr(insn);
81544 + }
81545 +
81546 +// print_simple_rtl(stderr, get_insns());
81547 +// print_rtl(stderr, get_insns());
81548 +
81549 + return 0;
81550 +}
81551 +
81552 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
81553 +{
81554 + const char * const plugin_name = plugin_info->base_name;
81555 + const int argc = plugin_info->argc;
81556 + const struct plugin_argument * const argv = plugin_info->argv;
81557 + int i;
81558 + struct register_pass_info kernexec_reload_pass_info = {
81559 + .pass = &kernexec_reload_pass.pass,
81560 + .reference_pass_name = "ssa",
81561 + .ref_pass_instance_number = 1,
81562 + .pos_op = PASS_POS_INSERT_AFTER
81563 + };
81564 + struct register_pass_info kernexec_fptr_pass_info = {
81565 + .pass = &kernexec_fptr_pass.pass,
81566 + .reference_pass_name = "ssa",
81567 + .ref_pass_instance_number = 1,
81568 + .pos_op = PASS_POS_INSERT_AFTER
81569 + };
81570 + struct register_pass_info kernexec_retaddr_pass_info = {
81571 + .pass = &kernexec_retaddr_pass.pass,
81572 + .reference_pass_name = "pro_and_epilogue",
81573 + .ref_pass_instance_number = 1,
81574 + .pos_op = PASS_POS_INSERT_AFTER
81575 + };
81576 +
81577 + if (!plugin_default_version_check(version, &gcc_version)) {
81578 + error(G_("incompatible gcc/plugin versions"));
81579 + return 1;
81580 + }
81581 +
81582 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
81583 +
81584 + if (TARGET_64BIT == 0)
81585 + return 0;
81586 +
81587 + for (i = 0; i < argc; ++i) {
81588 + if (!strcmp(argv[i].key, "method")) {
81589 + if (!argv[i].value) {
81590 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
81591 + continue;
81592 + }
81593 + if (!strcmp(argv[i].value, "bts")) {
81594 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
81595 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
81596 + } else if (!strcmp(argv[i].value, "or")) {
81597 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
81598 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
81599 + fix_register("r10", 1, 1);
81600 + } else
81601 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
81602 + continue;
81603 + }
81604 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
81605 + }
81606 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
81607 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
81608 +
81609 + if (kernexec_instrument_fptr == kernexec_instrument_fptr_or)
81610 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info);
81611 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
81612 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
81613 +
81614 + return 0;
81615 +}
81616 diff --git a/tools/gcc/latent_entropy_plugin.c b/tools/gcc/latent_entropy_plugin.c
81617 new file mode 100644
81618 index 0000000..9788bfe
81619 --- /dev/null
81620 +++ b/tools/gcc/latent_entropy_plugin.c
81621 @@ -0,0 +1,291 @@
81622 +/*
81623 + * Copyright 2012 by the PaX Team <pageexec@freemail.hu>
81624 + * Licensed under the GPL v2
81625 + *
81626 + * Note: the choice of the license means that the compilation process is
81627 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
81628 + * but for the kernel it doesn't matter since it doesn't link against
81629 + * any of the gcc libraries
81630 + *
81631 + * gcc plugin to help generate a little bit of entropy from program state,
81632 + * used during boot in the kernel
81633 + *
81634 + * TODO:
81635 + * - quite a few, see the comments :)
81636 + *
81637 + * BUGS:
81638 + * - none known
81639 + */
81640 +#include "gcc-plugin.h"
81641 +#include "config.h"
81642 +#include "system.h"
81643 +#include "coretypes.h"
81644 +#include "tree.h"
81645 +#include "tree-pass.h"
81646 +#include "flags.h"
81647 +#include "intl.h"
81648 +#include "toplev.h"
81649 +#include "plugin.h"
81650 +//#include "expr.h" where are you...
81651 +#include "diagnostic.h"
81652 +#include "plugin-version.h"
81653 +#include "tm.h"
81654 +#include "function.h"
81655 +#include "basic-block.h"
81656 +#include "gimple.h"
81657 +#include "rtl.h"
81658 +#include "emit-rtl.h"
81659 +#include "tree-flow.h"
81660 +#include "cpplib.h"
81661 +#include "c-pragma.h"
81662 +
81663 +#include "linux/kconfig.h"
81664 +
81665 +int plugin_is_GPL_compatible;
81666 +
81667 +static tree latent_entropy_decl;
81668 +
81669 +static struct plugin_info latent_entropy_plugin_info = {
81670 + .version = "201207202140",
81671 + .help = NULL
81672 +};
81673 +
81674 +static unsigned int execute_latent_entropy(void);
81675 +static bool gate_latent_entropy(void);
81676 +
81677 +static struct gimple_opt_pass latent_entropy_pass = {
81678 + .pass = {
81679 + .type = GIMPLE_PASS,
81680 + .name = "latent_entropy",
81681 + .gate = gate_latent_entropy,
81682 + .execute = execute_latent_entropy,
81683 + .sub = NULL,
81684 + .next = NULL,
81685 + .static_pass_number = 0,
81686 + .tv_id = TV_NONE,
81687 + .properties_required = PROP_gimple_leh | PROP_cfg,
81688 + .properties_provided = 0,
81689 + .properties_destroyed = 0,
81690 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
81691 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
81692 + }
81693 +};
81694 +
81695 +// for kernel use we just want to instrument some of the boot code
81696 +// for userland use this would need changes
81697 +static bool gate_latent_entropy(void)
81698 +{
81699 + tree section_attr;
81700 + const char *section_name;
81701 +
81702 + // don't instrument modules
81703 + if (cpp_defined(parse_in, (const unsigned char *)"MODULE", 6))
81704 + return false;
81705 +
81706 + // don't instrument normal code
81707 + section_attr = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
81708 + if (!section_attr || !TREE_VALUE(section_attr))
81709 + return false;
81710 +
81711 + section_name = TREE_STRING_POINTER(TREE_VALUE(TREE_VALUE(section_attr)));
81712 +
81713 + // instrument code in boot related sections
81714 + if (!strncmp(section_name, ".init.text", 10))
81715 + return true;
81716 +
81717 + if (!strncmp(section_name, ".initcall", 9))
81718 + return true;
81719 +
81720 + if (!strncmp(section_name, ".con_initcall", 13))
81721 + return true;
81722 +
81723 + if (!strncmp(section_name, ".security_initcall", 18))
81724 + return true;
81725 +
81726 +#ifndef CONFIG_HOTPLUG
81727 + if (!strncmp(section_name, ".devinit.text", 13))
81728 + return true;
81729 +#endif
81730 +
81731 +#ifndef CONFIG_HOTPLUG_CPU
81732 + if (!strncmp(section_name, ".cpuinit.text", 13))
81733 + return true;
81734 +#endif
81735 +
81736 +#ifndef CONFIG_HOTPLUG_MEMORY
81737 + if (!strncmp(section_name, ".meminit.text", 13))
81738 + return true;
81739 +#endif
81740 +
81741 + // TODO check whether cfun is static and all its callers meet the above criteria
81742 + return false;
81743 +}
81744 +
81745 +static unsigned HOST_WIDE_INT seed;
81746 +static unsigned HOST_WIDE_INT get_random_const(void)
81747 +{
81748 + seed = (seed >> 1U) ^ (-(seed & 1ULL) & 0xD800000000000000ULL);
81749 + return seed;
81750 +}
81751 +
81752 +static enum tree_code get_op(tree *rhs)
81753 +{
81754 + static enum tree_code op;
81755 + unsigned HOST_WIDE_INT random_const;
81756 +
81757 + random_const = get_random_const();
81758 +
81759 + switch (op) {
81760 + case BIT_XOR_EXPR:
81761 + op = PLUS_EXPR;
81762 + break;
81763 +
81764 + case PLUS_EXPR:
81765 + if (rhs) {
81766 + op = LROTATE_EXPR;
81767 + random_const &= HOST_BITS_PER_WIDE_INT - 1;
81768 + break;
81769 + }
81770 +
81771 + case LROTATE_EXPR:
81772 + default:
81773 + op = BIT_XOR_EXPR;
81774 + break;
81775 + }
81776 + if (rhs)
81777 + *rhs = build_int_cstu(unsigned_intDI_type_node, random_const);
81778 + return op;
81779 +}
81780 +
81781 +static void perturb_local_entropy(basic_block bb, tree local_entropy)
81782 +{
81783 + gimple_stmt_iterator gsi;
81784 + gimple assign;
81785 + tree addxorrol, rhs;
81786 + enum tree_code op;
81787 +
81788 + op = get_op(&rhs);
81789 + addxorrol = fold_build2_loc(UNKNOWN_LOCATION, op, unsigned_intDI_type_node, local_entropy, rhs);
81790 + assign = gimple_build_assign(local_entropy, addxorrol);
81791 + find_referenced_vars_in(assign);
81792 +//debug_bb(bb);
81793 + gsi = gsi_after_labels(bb);
81794 + gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
81795 + update_stmt(assign);
81796 +}
81797 +
81798 +static void perturb_latent_entropy(basic_block bb, tree rhs)
81799 +{
81800 + gimple_stmt_iterator gsi;
81801 + gimple assign;
81802 + tree addxorrol, temp;
81803 +
81804 + // 1. create temporary copy of latent_entropy
81805 + temp = create_tmp_var(unsigned_intDI_type_node, "temp_latent_entropy");
81806 + add_referenced_var(temp);
81807 + mark_sym_for_renaming(temp);
81808 +
81809 + // 2. read...
81810 + assign = gimple_build_assign(temp, latent_entropy_decl);
81811 + find_referenced_vars_in(assign);
81812 + gsi = gsi_after_labels(bb);
81813 + gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
81814 + update_stmt(assign);
81815 +
81816 + // 3. ...modify...
81817 + addxorrol = fold_build2_loc(UNKNOWN_LOCATION, get_op(NULL), unsigned_intDI_type_node, temp, rhs);
81818 + assign = gimple_build_assign(temp, addxorrol);
81819 + find_referenced_vars_in(assign);
81820 + gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
81821 + update_stmt(assign);
81822 +
81823 + // 4. ...write latent_entropy
81824 + assign = gimple_build_assign(latent_entropy_decl, temp);
81825 + find_referenced_vars_in(assign);
81826 + gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
81827 + update_stmt(assign);
81828 +
81829 + // TODO we could mix in more local state such as function return values, etc
81830 +}
81831 +
81832 +static unsigned int execute_latent_entropy(void)
81833 +{
81834 + basic_block bb;
81835 + gimple assign;
81836 + gimple_stmt_iterator gsi;
81837 + tree local_entropy;
81838 +
81839 +//fprintf(stderr, "latent_entropy: %s\n", IDENTIFIER_POINTER(DECL_NAME(current_function_decl)));
81840 +
81841 + // 1. create local entropy variable
81842 + local_entropy = create_tmp_var(unsigned_intDI_type_node, "local_entropy");
81843 + add_referenced_var(local_entropy);
81844 + mark_sym_for_renaming(local_entropy);
81845 +
81846 + // 2. initialize local entropy variable
81847 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
81848 + if (dom_info_available_p(CDI_DOMINATORS))
81849 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
81850 + gsi = gsi_start_bb(bb);
81851 +
81852 + assign = gimple_build_assign(local_entropy, build_int_cstu(unsigned_intDI_type_node, get_random_const()));
81853 +// gimple_set_location(assign, loc);
81854 + find_referenced_vars_in(assign);
81855 + gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
81856 + update_stmt(assign);
81857 + bb = bb->next_bb;
81858 +
81859 + // 3. instrument each BB with an operation on the local entropy variable
81860 + while (bb != EXIT_BLOCK_PTR) {
81861 + perturb_local_entropy(bb, local_entropy);
81862 + bb = bb->next_bb;
81863 + };
81864 +
81865 + // 4. mix local entropy into the global entropy variable
81866 + perturb_latent_entropy(EXIT_BLOCK_PTR->prev_bb, local_entropy);
81867 + return 0;
81868 +}
81869 +
81870 +static void start_unit_callback(void *gcc_data, void *user_data)
81871 +{
81872 + // extern u64 latent_entropy
81873 + latent_entropy_decl = build_decl(UNKNOWN_LOCATION, VAR_DECL, get_identifier("latent_entropy"), unsigned_intDI_type_node);
81874 +
81875 + TREE_STATIC(latent_entropy_decl) = 1;
81876 + TREE_PUBLIC(latent_entropy_decl) = 1;
81877 + DECL_EXTERNAL(latent_entropy_decl) = 1;
81878 + DECL_ARTIFICIAL(latent_entropy_decl) = 1;
81879 + DECL_INITIAL(latent_entropy_decl) = NULL;
81880 +// DECL_ASSEMBLER_NAME(latent_entropy_decl);
81881 +// varpool_finalize_decl(latent_entropy_decl);
81882 +// varpool_mark_needed_node(latent_entropy_decl);
81883 +
81884 +#if BUILDING_GCC_VERSION >= 4007
81885 + seed = get_random_seed(false);
81886 +#else
81887 + sscanf(get_random_seed(false), "%" HOST_WIDE_INT_PRINT "x", &seed);
81888 + seed *= seed;
81889 +#endif
81890 +}
81891 +
81892 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
81893 +{
81894 + const char * const plugin_name = plugin_info->base_name;
81895 + struct register_pass_info latent_entropy_pass_info = {
81896 + .pass = &latent_entropy_pass.pass,
81897 + .reference_pass_name = "optimized",
81898 + .ref_pass_instance_number = 1,
81899 + .pos_op = PASS_POS_INSERT_BEFORE
81900 + };
81901 +
81902 + if (!plugin_default_version_check(version, &gcc_version)) {
81903 + error(G_("incompatible gcc/plugin versions"));
81904 + return 1;
81905 + }
81906 +
81907 + register_callback(plugin_name, PLUGIN_INFO, NULL, &latent_entropy_plugin_info);
81908 + register_callback ("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
81909 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &latent_entropy_pass_info);
81910 +
81911 + return 0;
81912 +}
81913 diff --git a/tools/gcc/size_overflow_hash.data b/tools/gcc/size_overflow_hash.data
81914 new file mode 100644
81915 index 0000000..daaa86c
81916 --- /dev/null
81917 +++ b/tools/gcc/size_overflow_hash.data
81918 @@ -0,0 +1,2486 @@
81919 +_000001_hash alloc_dr 2 65495 _000001_hash NULL
81920 +_000002_hash __copy_from_user 3 10918 _000002_hash NULL
81921 +_000003_hash copy_from_user 3 17559 _000003_hash NULL
81922 +_000004_hash __copy_from_user_inatomic 3 4365 _000004_hash NULL
81923 +_000005_hash __copy_from_user_nocache 3 39351 _000005_hash NULL
81924 +_000006_hash __copy_to_user_inatomic 3 19214 _000006_hash NULL
81925 +_000007_hash do_xip_mapping_read 5 60297 _000007_hash NULL
81926 +_000008_hash hugetlbfs_read 3 11268 _000008_hash NULL
81927 +_000009_hash kmalloc 1 60432 _002597_hash NULL nohasharray
81928 +_000010_hash kmalloc_array 1-2 9444 _000010_hash NULL
81929 +_000012_hash kmalloc_slab 1 11917 _000012_hash NULL
81930 +_000013_hash kmemdup 2 64015 _000013_hash NULL
81931 +_000014_hash __krealloc 2 14857 _000331_hash NULL nohasharray
81932 +_000015_hash memdup_user 2 59590 _000015_hash NULL
81933 +_000016_hash module_alloc 1 63630 _000016_hash NULL
81934 +_000017_hash read_default_ldt 2 14302 _000017_hash NULL
81935 +_000018_hash read_kcore 3 63488 _000018_hash NULL
81936 +_000019_hash read_ldt 2 47570 _000019_hash NULL
81937 +_000020_hash read_zero 3 19366 _000020_hash NULL
81938 +_000021_hash __vmalloc_node 1 39308 _000021_hash NULL
81939 +_000022_hash vm_map_ram 2 23078 _001054_hash NULL nohasharray
81940 +_000023_hash aa_simple_write_to_buffer 4-3 49683 _000023_hash NULL
81941 +_000024_hash ablkcipher_copy_iv 3 64140 _000024_hash NULL
81942 +_000025_hash ablkcipher_next_slow 4 47274 _000025_hash NULL
81943 +_000026_hash acpi_battery_write_alarm 3 1240 _000026_hash NULL
81944 +_000027_hash acpi_os_allocate 1 14892 _000027_hash NULL
81945 +_000028_hash acpi_system_write_wakeup_device 3 34853 _000028_hash NULL
81946 +_000029_hash adu_write 3 30487 _000029_hash NULL
81947 +_000030_hash aer_inject_write 3 52399 _000030_hash NULL
81948 +_000031_hash afs_alloc_flat_call 2-3 36399 _000031_hash NULL
81949 +_000033_hash afs_proc_cells_write 3 61139 _000033_hash NULL
81950 +_000034_hash afs_proc_rootcell_write 3 15822 _000034_hash NULL
81951 +_000035_hash agp_3_5_isochronous_node_enable 3 49465 _000035_hash NULL
81952 +_000036_hash agp_alloc_page_array 1 22554 _000036_hash NULL
81953 +_000037_hash ah_alloc_tmp 2 54378 _000037_hash NULL
81954 +_000038_hash ahash_setkey_unaligned 3 33521 _000038_hash NULL
81955 +_000039_hash alg_setkey 3 31485 _000039_hash NULL
81956 +_000040_hash aligned_kmalloc 1 3628 _000040_hash NULL
81957 +_000041_hash alloc_context 1 3194 _000041_hash NULL
81958 +_000042_hash alloc_ep_req 2 54860 _000042_hash NULL
81959 +_000043_hash alloc_fdmem 1 27083 _000043_hash NULL
81960 +_000044_hash alloc_flex_gd 1 57259 _000044_hash NULL
81961 +_000045_hash alloc_sglist 1-3-2 22960 _000045_hash NULL
81962 +_000046_hash aoedev_flush 2 44398 _000046_hash NULL
81963 +_000047_hash append_to_buffer 3 63550 _000047_hash NULL
81964 +_000048_hash asix_read_cmd 5 13245 _000048_hash NULL
81965 +_000049_hash asix_write_cmd 5 58192 _000049_hash NULL
81966 +_000050_hash asn1_octets_decode 2 9991 _000050_hash NULL
81967 +_000051_hash asn1_oid_decode 2 4999 _000051_hash NULL
81968 +_000052_hash at76_set_card_command 4 4471 _000052_hash NULL
81969 +_000053_hash ath6kl_add_bss_if_needed 6 24317 _000053_hash NULL
81970 +_000054_hash ath6kl_debug_roam_tbl_event 3 5224 _000054_hash NULL
81971 +_000055_hash ath6kl_mgmt_powersave_ap 6 13791 _000055_hash NULL
81972 +_000056_hash ath6kl_send_go_probe_resp 3 21113 _000056_hash NULL
81973 +_000057_hash ath6kl_set_ap_probe_resp_ies 3 50539 _000057_hash NULL
81974 +_000058_hash ath6kl_set_assoc_req_ies 3 43185 _000058_hash NULL
81975 +_000059_hash ath6kl_wmi_bssinfo_event_rx 3 2275 _000059_hash NULL
81976 +_000060_hash ath6kl_wmi_send_action_cmd 7 58860 _000060_hash NULL
81977 +_000061_hash __ath6kl_wmi_send_mgmt_cmd 7 38971 _000061_hash NULL
81978 +_000062_hash attach_hdlc_protocol 3 19986 _000062_hash NULL
81979 +_000063_hash audio_write 4 54261 _001597_hash NULL nohasharray
81980 +_000064_hash audit_unpack_string 3 13748 _000064_hash NULL
81981 +_000065_hash av7110_vbi_write 3 34384 _000065_hash NULL
81982 +_000066_hash ax25_setsockopt 5 42740 _000066_hash NULL
81983 +_000067_hash b43_debugfs_write 3 34838 _000067_hash NULL
81984 +_000068_hash b43legacy_debugfs_write 3 28556 _000068_hash NULL
81985 +_000069_hash bch_alloc 1 4593 _000069_hash NULL
81986 +_000070_hash befs_nls2utf 3 17163 _000070_hash NULL
81987 +_000071_hash befs_utf2nls 3 25628 _000071_hash NULL
81988 +_000072_hash bfad_debugfs_write_regrd 3 15218 _000072_hash NULL
81989 +_000073_hash bfad_debugfs_write_regwr 3 61841 _000073_hash NULL
81990 +_000074_hash bio_alloc_map_data 1-2 50782 _000074_hash NULL
81991 +_000076_hash bio_kmalloc 2 54672 _000076_hash NULL
81992 +_000077_hash blkcipher_copy_iv 3 24075 _000077_hash NULL
81993 +_000078_hash blkcipher_next_slow 4 52733 _000078_hash NULL
81994 +_000079_hash bl_pipe_downcall 3 34264 _000079_hash NULL
81995 +_000080_hash bnad_debugfs_write_regrd 3 6706 _000080_hash NULL
81996 +_000081_hash bnad_debugfs_write_regwr 3 57500 _000081_hash NULL
81997 +_000082_hash bnx2fc_cmd_mgr_alloc 2-3 24873 _000082_hash NULL
81998 +_000084_hash bnx2_nvram_write 4 7790 _000084_hash NULL
81999 +_000085_hash brcmf_sdbrcm_downloadvars 3 42064 _000085_hash NULL
82000 +_000086_hash btmrvl_gpiogap_write 3 35053 _000086_hash NULL
82001 +_000087_hash btmrvl_hscfgcmd_write 3 27143 _000087_hash NULL
82002 +_000088_hash btmrvl_hscmd_write 3 27089 _000088_hash NULL
82003 +_000089_hash btmrvl_hsmode_write 3 42252 _000089_hash NULL
82004 +_000090_hash btmrvl_pscmd_write 3 29504 _000090_hash NULL
82005 +_000091_hash btmrvl_psmode_write 3 3703 _000091_hash NULL
82006 +_000092_hash btrfs_alloc_delayed_item 1 11678 _000092_hash NULL
82007 +_000093_hash cache_do_downcall 3 6926 _000093_hash NULL
82008 +_000094_hash cachefiles_cook_key 2 33274 _000094_hash NULL
82009 +_000095_hash cachefiles_daemon_write 3 43535 _000095_hash NULL
82010 +_000096_hash capi_write 3 35104 _000096_hash NULL
82011 +_000097_hash carl9170_debugfs_write 3 50857 _000097_hash NULL
82012 +_000098_hash cciss_allocate_sg_chain_blocks 2-3 5368 _000098_hash NULL
82013 +_000100_hash cciss_proc_write 3 10259 _000100_hash NULL
82014 +_000101_hash cdrom_read_cdda_old 4 27664 _000101_hash NULL
82015 +_000102_hash ceph_alloc_page_vector 1 18710 _000102_hash NULL
82016 +_000103_hash ceph_buffer_new 1 35974 _000103_hash NULL
82017 +_000104_hash ceph_copy_user_to_page_vector 4 656 _000104_hash NULL
82018 +_000105_hash ceph_get_direct_page_vector 2 41917 _000105_hash NULL
82019 +_000106_hash ceph_msg_new 2 5846 _000106_hash NULL
82020 +_000107_hash ceph_setxattr 4 18913 _000107_hash NULL
82021 +_000108_hash cfi_read_pri 3 24366 _000108_hash NULL
82022 +_000109_hash cgroup_write_string 5 10900 _000109_hash NULL
82023 +_000110_hash cgroup_write_X64 5 54514 _000110_hash NULL
82024 +_000111_hash change_xattr 5 61390 _000111_hash NULL
82025 +_000112_hash check_load_and_stores 2 2143 _000112_hash NULL
82026 +_000113_hash cifs_idmap_key_instantiate 3 54503 _000113_hash NULL
82027 +_000114_hash cifs_security_flags_proc_write 3 5484 _000114_hash NULL
82028 +_000115_hash cifs_setxattr 4 23957 _000115_hash NULL
82029 +_000116_hash cifs_spnego_key_instantiate 3 23588 _000116_hash NULL
82030 +_000117_hash ci_ll_write 4 3740 _000117_hash NULL
82031 +_000118_hash cld_pipe_downcall 3 15058 _000118_hash NULL
82032 +_000119_hash clear_refs_write 3 61904 _000119_hash NULL
82033 +_000120_hash clusterip_proc_write 3 44729 _000120_hash NULL
82034 +_000121_hash cm4040_write 3 58079 _000121_hash NULL
82035 +_000122_hash cm_copy_private_data 2 3649 _000122_hash NULL
82036 +_000123_hash cmm_write 3 2896 _000123_hash NULL
82037 +_000124_hash cm_write 3 36858 _000124_hash NULL
82038 +_000125_hash coda_psdev_write 3 1711 _000125_hash NULL
82039 +_000126_hash codec_reg_read_file 3 36280 _000126_hash NULL
82040 +_000127_hash command_file_write 3 31318 _000127_hash NULL
82041 +_000128_hash command_write 3 58841 _000128_hash NULL
82042 +_000129_hash comm_write 3 44537 _001532_hash NULL nohasharray
82043 +_000130_hash concat_writev 3 21451 _000130_hash NULL
82044 +_000131_hash copy_and_check 3 19089 _000131_hash NULL
82045 +_000132_hash copy_from_user_toio 3 31966 _000132_hash NULL
82046 +_000133_hash copy_items 6 50140 _000133_hash NULL
82047 +_000134_hash copy_macs 4 45534 _000134_hash NULL
82048 +_000135_hash __copy_to_user 3 17551 _000135_hash NULL
82049 +_000136_hash copy_vm86_regs_from_user 3 45340 _000136_hash NULL
82050 +_000137_hash cosa_write 3 1774 _000137_hash NULL
82051 +_000138_hash create_entry 2 33479 _000138_hash NULL
82052 +_000139_hash create_queues 2-3 9088 _000139_hash NULL
82053 +_000141_hash create_xattr 5 54106 _000141_hash NULL
82054 +_000142_hash create_xattr_datum 5 33356 _000142_hash NULL
82055 +_000143_hash csum_partial_copy_fromiovecend 3-4 9957 _000143_hash NULL
82056 +_000145_hash ctrl_out 3-5 8712 _000145_hash NULL
82057 +_000147_hash cx24116_writeregN 4 41975 _000147_hash NULL
82058 +_000148_hash cxacru_cm_get_array 4 4412 _000148_hash NULL
82059 +_000149_hash cxgbi_alloc_big_mem 1 4707 _000149_hash NULL
82060 +_000150_hash dac960_user_command_proc_write 3 3071 _000150_hash NULL
82061 +_000151_hash datablob_format 2 39571 _002156_hash NULL nohasharray
82062 +_000152_hash dccp_feat_clone_sp_val 3 11942 _000152_hash NULL
82063 +_000153_hash dccp_setsockopt_ccid 4 30701 _000153_hash NULL
82064 +_000154_hash dccp_setsockopt_cscov 2 37766 _000154_hash NULL
82065 +_000155_hash dccp_setsockopt_service 4 65336 _000155_hash NULL
82066 +_000156_hash ddb_output_write 3 31902 _000156_hash NULL
82067 +_000157_hash ddebug_proc_write 3 18055 _000157_hash NULL
82068 +_000158_hash dev_config 3 8506 _000158_hash NULL
82069 +_000159_hash device_write 3 45156 _000159_hash NULL
82070 +_000160_hash devm_kzalloc 2 4966 _000160_hash NULL
82071 +_000161_hash devres_alloc 2 551 _000161_hash NULL
82072 +_000162_hash dfs_file_write 3 41196 _000162_hash NULL
82073 +_000163_hash direct_entry 3 38836 _000163_hash NULL
82074 +_000164_hash dispatch_proc_write 3 44320 _000164_hash NULL
82075 +_000165_hash diva_os_copy_from_user 4 7792 _000165_hash NULL
82076 +_000166_hash dlm_alloc_pagevec 1 54296 _000166_hash NULL
82077 +_000167_hash dlmfs_file_read 3 28385 _000167_hash NULL
82078 +_000168_hash dlmfs_file_write 3 6892 _000168_hash NULL
82079 +_000169_hash dm_read 3 15674 _000169_hash NULL
82080 +_000170_hash dm_write 3 2513 _000170_hash NULL
82081 +_000171_hash __dn_setsockopt 5 13060 _000171_hash NULL
82082 +_000172_hash dns_query 3 9676 _000172_hash NULL
82083 +_000173_hash dns_resolver_instantiate 3 63314 _000173_hash NULL
82084 +_000174_hash do_add_counters 3 3992 _000174_hash NULL
82085 +_000175_hash __do_config_autodelink 3 58763 _000175_hash NULL
82086 +_000176_hash do_ip_setsockopt 5 41852 _000176_hash NULL
82087 +_000177_hash do_ipv6_setsockopt 5 18215 _000177_hash NULL
82088 +_000178_hash do_ip_vs_set_ctl 4 48641 _000178_hash NULL
82089 +_000179_hash do_kimage_alloc 3 64827 _000179_hash NULL
82090 +_000180_hash do_register_entry 4 29478 _000180_hash NULL
82091 +_000181_hash do_tty_write 5 44896 _000181_hash NULL
82092 +_000182_hash do_update_counters 4 2259 _000182_hash NULL
82093 +_000183_hash dsp_write 2 46218 _000183_hash NULL
82094 +_000184_hash dup_to_netobj 3 26363 _000184_hash NULL
82095 +_000185_hash dvb_aplay 3 56296 _000185_hash NULL
82096 +_000186_hash dvb_ca_en50221_io_write 3 43533 _000186_hash NULL
82097 +_000187_hash dvbdmx_write 3 19423 _000187_hash NULL
82098 +_000188_hash dvb_play 3 50814 _000188_hash NULL
82099 +_000189_hash dw210x_op_rw 6 39915 _000189_hash NULL
82100 +_000190_hash dwc3_link_state_write 3 12641 _000190_hash NULL
82101 +_000191_hash dwc3_mode_write 3 51997 _000191_hash NULL
82102 +_000192_hash dwc3_testmode_write 3 30516 _000192_hash NULL
82103 +_000193_hash ecryptfs_copy_filename 4 11868 _000193_hash NULL
82104 +_000194_hash ecryptfs_miscdev_write 3 26847 _000194_hash NULL
82105 +_000195_hash ecryptfs_send_miscdev 2 64816 _000195_hash NULL
82106 +_000196_hash efx_tsoh_heap_alloc 2 58545 _000196_hash NULL
82107 +_000197_hash emi26_writememory 4 57908 _000197_hash NULL
82108 +_000198_hash emi62_writememory 4 29731 _000198_hash NULL
82109 +_000199_hash encrypted_instantiate 3 3168 _000199_hash NULL
82110 +_000200_hash encrypted_update 3 13414 _000200_hash NULL
82111 +_000201_hash ep0_write 3 14536 _001328_hash NULL nohasharray
82112 +_000202_hash ep_read 3 58813 _000202_hash NULL
82113 +_000203_hash ep_write 3 59008 _000203_hash NULL
82114 +_000204_hash erst_dbg_write 3 46715 _000204_hash NULL
82115 +_000205_hash esp_alloc_tmp 2 40558 _000205_hash NULL
82116 +_000206_hash exofs_read_lookup_dev_table 3 17733 _000206_hash NULL
82117 +_000207_hash ext4_kvmalloc 1 14796 _000207_hash NULL
82118 +_000208_hash ezusb_writememory 4 45976 _000208_hash NULL
82119 +_000209_hash fanotify_write 3 64623 _000209_hash NULL
82120 +_000210_hash fd_copyin 3 56247 _000210_hash NULL
82121 +_000211_hash ffs_epfile_io 3 64886 _000211_hash NULL
82122 +_000212_hash ffs_prepare_buffer 2 59892 _000212_hash NULL
82123 +_000213_hash f_hidg_write 3 7932 _000213_hash NULL
82124 +_000214_hash file_read_actor 4 1401 _000214_hash NULL
82125 +_000215_hash fill_write_buffer 3 3142 _000215_hash NULL
82126 +_000216_hash fl_create 5 56435 _000216_hash NULL
82127 +_000217_hash ftdi_elan_write 3 57309 _000217_hash NULL
82128 +_000218_hash fuse_conn_limit_write 3 30777 _000218_hash NULL
82129 +_000219_hash fw_iso_buffer_init 3 54582 _000219_hash NULL
82130 +_000220_hash garmin_write_bulk 3 58191 _000220_hash NULL
82131 +_000221_hash garp_attr_create 3 3883 _000221_hash NULL
82132 +_000222_hash get_arg 3 5694 _000222_hash NULL
82133 +_000223_hash getdqbuf 1 62908 _000223_hash NULL
82134 +_000224_hash get_fdb_entries 3 41916 _000224_hash NULL
82135 +_000225_hash get_indirect_ea 4 51869 _000225_hash NULL
82136 +_000226_hash get_registers 3 26187 _000226_hash NULL
82137 +_000227_hash get_scq 2 10897 _000227_hash NULL
82138 +_000228_hash get_server_iovec 2 16804 _000228_hash NULL
82139 +_000229_hash get_ucode_user 3 38202 _000229_hash NULL
82140 +_000230_hash get_user_cpu_mask 2 14861 _000230_hash NULL
82141 +_000231_hash gfs2_alloc_sort_buffer 1 18275 _000231_hash NULL
82142 +_000232_hash gfs2_glock_nq_m 1 20347 _000232_hash NULL
82143 +_000233_hash gigaset_initcs 2 43753 _000233_hash NULL
82144 +_000234_hash gigaset_initdriver 2 1060 _000234_hash NULL
82145 +_000235_hash gs_alloc_req 2 58883 _000235_hash NULL
82146 +_000236_hash gs_buf_alloc 2 25067 _000236_hash NULL
82147 +_000237_hash gsm_data_alloc 3 42437 _000237_hash NULL
82148 +_000238_hash gss_pipe_downcall 3 23182 _000238_hash NULL
82149 +_000239_hash handle_request 9 10024 _000239_hash NULL
82150 +_000240_hash hash_new 1 62224 _000240_hash NULL
82151 +_000241_hash hashtab_create 3 33769 _000241_hash NULL
82152 +_000242_hash hcd_buffer_alloc 2 27495 _000242_hash NULL
82153 +_000243_hash hci_sock_setsockopt 5 28993 _000243_hash NULL
82154 +_000244_hash heap_init 2 49617 _000244_hash NULL
82155 +_000245_hash hest_ghes_dev_register 1 46766 _000245_hash NULL
82156 +_000246_hash hidraw_get_report 3 45609 _000246_hash NULL
82157 +_000247_hash hidraw_report_event 3 49578 _000509_hash NULL nohasharray
82158 +_000248_hash hidraw_send_report 3 23449 _000248_hash NULL
82159 +_000249_hash hpfs_translate_name 3 41497 _000249_hash NULL
82160 +_000250_hash hysdn_conf_write 3 52145 _000250_hash NULL
82161 +_000251_hash hysdn_log_write 3 48694 _000251_hash NULL
82162 +_000252_hash __i2400mu_send_barker 3 23652 _000252_hash NULL
82163 +_000253_hash i2cdev_read 3 1206 _000253_hash NULL
82164 +_000254_hash i2cdev_write 3 23310 _000254_hash NULL
82165 +_000255_hash i2o_parm_field_get 5 34477 _000255_hash NULL
82166 +_000256_hash i2o_parm_table_get 6 61635 _000256_hash NULL
82167 +_000257_hash ib_copy_from_udata 3 59502 _000257_hash NULL
82168 +_000258_hash ib_ucm_alloc_data 3 36885 _000258_hash NULL
82169 +_000259_hash ib_umad_write 3 47993 _000259_hash NULL
82170 +_000260_hash ib_uverbs_unmarshall_recv 5 12251 _000260_hash NULL
82171 +_000261_hash icn_writecmd 2 38629 _000261_hash NULL
82172 +_000262_hash ide_driver_proc_write 3 32493 _000262_hash NULL
82173 +_000263_hash ide_settings_proc_write 3 35110 _000263_hash NULL
82174 +_000264_hash idetape_chrdev_write 3 53976 _000264_hash NULL
82175 +_000265_hash idmap_pipe_downcall 3 14591 _000265_hash NULL
82176 +_000266_hash ieee80211_build_probe_req 7 27660 _000266_hash NULL
82177 +_000267_hash ieee80211_if_write 3 34894 _000267_hash NULL
82178 +_000268_hash if_write 3 51756 _000268_hash NULL
82179 +_000269_hash ilo_write 3 64378 _000269_hash NULL
82180 +_000270_hash ima_write_policy 3 40548 _000270_hash NULL
82181 +_000271_hash init_data_container 1 60709 _000271_hash NULL
82182 +_000272_hash init_send_hfcd 1 34586 _000272_hash NULL
82183 +_000273_hash insert_dent 7 65034 _000273_hash NULL
82184 +_000274_hash interpret_user_input 2 19393 _000274_hash NULL
82185 +_000275_hash int_proc_write 3 39542 _000275_hash NULL
82186 +_000276_hash ioctl_private_iw_point 7 1273 _000276_hash NULL
82187 +_000277_hash iov_iter_copy_from_user 4 31942 _000277_hash NULL
82188 +_000278_hash iov_iter_copy_from_user_atomic 4 56368 _000278_hash NULL
82189 +_000279_hash iowarrior_write 3 18604 _000279_hash NULL
82190 +_000280_hash ipc_alloc 1 1192 _000280_hash NULL
82191 +_000281_hash ipc_rcu_alloc 1 21208 _000281_hash NULL
82192 +_000282_hash ip_options_get_from_user 4 64958 _000282_hash NULL
82193 +_000283_hash ipv6_renew_option 3 38813 _000283_hash NULL
82194 +_000284_hash ip_vs_conn_fill_param_sync 6 29771 _002404_hash NULL nohasharray
82195 +_000285_hash ip_vs_create_timeout_table 2 64478 _000285_hash NULL
82196 +_000286_hash ipw_queue_tx_init 3 49161 _000286_hash NULL
82197 +_000287_hash irda_setsockopt 5 19824 _000287_hash NULL
82198 +_000288_hash irias_new_octseq_value 2 13596 _000288_hash NULL
82199 +_000289_hash ir_lirc_transmit_ir 3 64403 _000289_hash NULL
82200 +_000290_hash irnet_ctrl_write 3 24139 _000290_hash NULL
82201 +_000291_hash isdn_add_channels 3 40905 _000291_hash NULL
82202 +_000292_hash isdn_ppp_fill_rq 2 41428 _000292_hash NULL
82203 +_000293_hash isdn_ppp_write 4 29109 _000293_hash NULL
82204 +_000294_hash isdn_read 3 50021 _000294_hash NULL
82205 +_000295_hash isdn_v110_open 3 2418 _000295_hash NULL
82206 +_000296_hash isdn_writebuf_stub 4 52383 _000296_hash NULL
82207 +_000297_hash islpci_mgt_transmit 5 34133 _000297_hash NULL
82208 +_000298_hash iso_callback 3 43208 _000298_hash NULL
82209 +_000299_hash iso_packets_buffer_init 3 29061 _000299_hash NULL
82210 +_000300_hash it821x_firmware_command 3 8628 _000300_hash NULL
82211 +_000301_hash ivtv_buf_copy_from_user 4 25502 _000301_hash NULL
82212 +_000302_hash iwch_alloc_fastreg_pbl 2 40153 _000302_hash NULL
82213 +_000303_hash iwl_calib_set 3 34400 _002188_hash NULL nohasharray
82214 +_000304_hash jbd2_journal_init_revoke_table 1 36336 _000304_hash NULL
82215 +_000305_hash jffs2_alloc_full_dirent 1 60179 _001111_hash NULL nohasharray
82216 +_000306_hash journal_init_revoke_table 1 56331 _000306_hash NULL
82217 +_000307_hash kcalloc 1-2 27770 _000307_hash NULL
82218 +_000309_hash keyctl_instantiate_key_common 4 47889 _000309_hash NULL
82219 +_000310_hash keyctl_update_key 3 26061 _000310_hash NULL
82220 +_000311_hash __kfifo_alloc 2-3 22173 _000311_hash NULL
82221 +_000313_hash kfifo_copy_from_user 3 5091 _000313_hash NULL
82222 +_000314_hash kmalloc_node 1 50163 _000314_hash NULL
82223 +_000315_hash kmalloc_parameter 1 65279 _000315_hash NULL
82224 +_000316_hash kmem_alloc 1 31920 _000316_hash NULL
82225 +_000317_hash kobj_map 2-3 9566 _000317_hash NULL
82226 +_000319_hash kone_receive 4 4690 _000319_hash NULL
82227 +_000320_hash kone_send 4 63435 _000320_hash NULL
82228 +_000321_hash krealloc 2 14908 _000321_hash NULL
82229 +_000322_hash kvmalloc 1 32646 _000322_hash NULL
82230 +_000323_hash kvm_read_guest_atomic 4 10765 _000323_hash NULL
82231 +_000324_hash kvm_read_guest_cached 4 39666 _000324_hash NULL
82232 +_000325_hash kvm_read_guest_page 5 18074 _000325_hash NULL
82233 +_000326_hash kzalloc 1 54740 _000326_hash NULL
82234 +_000327_hash l2cap_sock_setsockopt 5 50207 _000327_hash NULL
82235 +_000328_hash l2cap_sock_setsockopt_old 4 29346 _000328_hash NULL
82236 +_000329_hash lane2_associate_req 4 45398 _000329_hash NULL
82237 +_000330_hash lbs_debugfs_write 3 48413 _000330_hash NULL
82238 +_000331_hash lcd_write 3 14857 _000331_hash &_000014_hash
82239 +_000332_hash ldm_frag_add 2 5611 _000332_hash NULL
82240 +_000333_hash __lgread 4 31668 _000333_hash NULL
82241 +_000334_hash libipw_alloc_txb 1 27579 _000334_hash NULL
82242 +_000335_hash link_send_sections_long 4 46556 _000335_hash NULL
82243 +_000336_hash listxattr 3 12769 _000336_hash NULL
82244 +_000337_hash LoadBitmap 2 19658 _000337_hash NULL
82245 +_000338_hash load_msg 2 95 _000338_hash NULL
82246 +_000339_hash lpfc_debugfs_dif_err_write 3 17424 _000339_hash NULL
82247 +_000340_hash lp_write 3 9511 _000340_hash NULL
82248 +_000341_hash mb_cache_create 2 17307 _000341_hash NULL
82249 +_000342_hash mce_write 3 26201 _000342_hash NULL
82250 +_000343_hash mcs7830_get_reg 3 33308 _000343_hash NULL
82251 +_000344_hash mcs7830_set_reg 3 31413 _000344_hash NULL
82252 +_000345_hash memcpy_fromiovec 3 55247 _000345_hash NULL
82253 +_000346_hash memcpy_fromiovecend 3-4 2707 _000346_hash NULL
82254 +_000348_hash mempool_kmalloc 2 53831 _000348_hash NULL
82255 +_000349_hash mempool_resize 2 47983 _001821_hash NULL nohasharray
82256 +_000350_hash mem_rw 3 22085 _000350_hash NULL
82257 +_000351_hash mgmt_control 3 7349 _000351_hash NULL
82258 +_000352_hash mgmt_pending_add 5 46976 _000352_hash NULL
82259 +_000353_hash mlx4_ib_alloc_fast_reg_page_list 2 46119 _000353_hash NULL
82260 +_000354_hash mmc_alloc_sg 1 21504 _000354_hash NULL
82261 +_000355_hash mmc_send_bus_test 4 18285 _000355_hash NULL
82262 +_000356_hash mmc_send_cxd_data 5 38655 _000356_hash NULL
82263 +_000357_hash module_alloc_update_bounds 1 47205 _000357_hash NULL
82264 +_000358_hash move_addr_to_kernel 2 32673 _000358_hash NULL
82265 +_000359_hash mpi_alloc_limb_space 1 23190 _000359_hash NULL
82266 +_000360_hash mpi_resize 2 44674 _000360_hash NULL
82267 +_000361_hash mptctl_getiocinfo 2 28545 _000361_hash NULL
82268 +_000362_hash mtdchar_readoob 4 31200 _000362_hash NULL
82269 +_000363_hash mtdchar_write 3 56831 _000363_hash NULL
82270 +_000364_hash mtdchar_writeoob 4 3393 _000364_hash NULL
82271 +_000365_hash mtd_device_parse_register 5 5024 _000365_hash NULL
82272 +_000366_hash mtf_test_write 3 18844 _000366_hash NULL
82273 +_000367_hash mtrr_write 3 59622 _000367_hash NULL
82274 +_000368_hash musb_test_mode_write 3 33518 _000368_hash NULL
82275 +_000369_hash mwifiex_get_common_rates 3 17131 _000369_hash NULL
82276 +_000370_hash mwifiex_update_curr_bss_params 5 16908 _000370_hash NULL
82277 +_000371_hash nand_bch_init 2-3 16280 _001341_hash NULL nohasharray
82278 +_000373_hash ncp_file_write 3 3813 _000373_hash NULL
82279 +_000374_hash ncp__vol2io 5 4804 _000374_hash NULL
82280 +_000375_hash nes_alloc_fast_reg_page_list 2 33523 _000375_hash NULL
82281 +_000376_hash nfc_targets_found 3 29886 _000376_hash NULL
82282 +_000377_hash nfs4_acl_new 1 49806 _000377_hash NULL
82283 +_000378_hash nfs4_write_cached_acl 4 15070 _000378_hash NULL
82284 +_000379_hash nfsd_cache_update 3 59574 _000379_hash NULL
82285 +_000380_hash nfsd_symlink 6 63442 _000380_hash NULL
82286 +_000381_hash nfs_idmap_get_desc 2-4 42990 _000381_hash NULL
82287 +_000383_hash nfs_readdir_make_qstr 3 12509 _000383_hash NULL
82288 +_000384_hash note_last_dentry 3 12285 _000384_hash NULL
82289 +_000385_hash ntfs_copy_from_user 3-5 15072 _000385_hash NULL
82290 +_000387_hash __ntfs_copy_from_user_iovec_inatomic 3-4 38153 _000387_hash NULL
82291 +_000389_hash ntfs_ucstonls 3 23097 _000389_hash NULL
82292 +_000390_hash nvme_alloc_iod 1 56027 _000390_hash NULL
82293 +_000391_hash nvram_write 3 3894 _000391_hash NULL
82294 +_000392_hash o2hb_debug_create 4 18744 _000392_hash NULL
82295 +_000393_hash o2net_send_message_vec 4 879 _001792_hash NULL nohasharray
82296 +_000394_hash ocfs2_control_cfu 2 37750 _000394_hash NULL
82297 +_000395_hash oom_adjust_write 3 41116 _000395_hash NULL
82298 +_000396_hash oom_score_adj_write 3 42594 _000396_hash NULL
82299 +_000397_hash opera1_xilinx_rw 5 31453 _000397_hash NULL
82300 +_000398_hash oprofilefs_ulong_from_user 3 57251 _000398_hash NULL
82301 +_000399_hash opticon_write 4 60775 _000399_hash NULL
82302 +_000400_hash orig_node_add_if 2 32833 _000400_hash NULL
82303 +_000401_hash orig_node_del_if 2 28371 _000401_hash NULL
82304 +_000402_hash p9_check_zc_errors 4 15534 _000402_hash NULL
82305 +_000403_hash packet_buffer_init 2 1607 _000403_hash NULL
82306 +_000404_hash packet_setsockopt 5 17662 _000404_hash NULL
82307 +_000405_hash parse_command 2 37079 _000405_hash NULL
82308 +_000406_hash pcbit_writecmd 2 12332 _000406_hash NULL
82309 +_000407_hash pcmcia_replace_cis 3 57066 _000407_hash NULL
82310 +_000408_hash pgctrl_write 3 50453 _000408_hash NULL
82311 +_000409_hash pg_write 3 40766 _000409_hash NULL
82312 +_000410_hash pidlist_allocate 1 64404 _000410_hash NULL
82313 +_000411_hash pipe_iov_copy_from_user 3 23102 _000411_hash NULL
82314 +_000412_hash pipe_iov_copy_to_user 3 3447 _000412_hash NULL
82315 +_000413_hash pkt_add 3 39897 _000413_hash NULL
82316 +_000414_hash pktgen_if_write 3 55628 _000414_hash NULL
82317 +_000415_hash platform_device_add_data 3 310 _000415_hash NULL
82318 +_000416_hash platform_device_add_resources 3 13289 _000416_hash NULL
82319 +_000417_hash pm_qos_power_write 3 52513 _000417_hash NULL
82320 +_000418_hash pnpbios_proc_write 3 19758 _000418_hash NULL
82321 +_000419_hash pool_allocate 3 42012 _000419_hash NULL
82322 +_000420_hash posix_acl_alloc 1 48063 _000420_hash NULL
82323 +_000421_hash ppp_cp_parse_cr 4 5214 _000421_hash NULL
82324 +_000422_hash ppp_write 3 34034 _000422_hash NULL
82325 +_000423_hash pp_read 3 33210 _000423_hash NULL
82326 +_000424_hash pp_write 3 39554 _000424_hash NULL
82327 +_000425_hash printer_req_alloc 2 62687 _001807_hash NULL nohasharray
82328 +_000426_hash printer_write 3 60276 _000426_hash NULL
82329 +_000427_hash prism2_set_genericelement 3 29277 _000427_hash NULL
82330 +_000428_hash __probe_kernel_read 3 61119 _000428_hash NULL
82331 +_000429_hash __probe_kernel_write 3 29842 _000429_hash NULL
82332 +_000430_hash proc_coredump_filter_write 3 25625 _000430_hash NULL
82333 +_000431_hash _proc_do_string 2 6376 _000431_hash NULL
82334 +_000432_hash process_vm_rw_pages 5-6 15954 _000432_hash NULL
82335 +_000434_hash proc_loginuid_write 3 63648 _000434_hash NULL
82336 +_000435_hash proc_pid_attr_write 3 63845 _000435_hash NULL
82337 +_000436_hash proc_scsi_devinfo_write 3 32064 _000436_hash NULL
82338 +_000437_hash proc_scsi_write 3 29142 _000437_hash NULL
82339 +_000438_hash proc_scsi_write_proc 3 267 _000438_hash NULL
82340 +_000439_hash pstore_mkfile 5 50830 _000439_hash NULL
82341 +_000440_hash pti_char_write 3 60960 _000440_hash NULL
82342 +_000441_hash ptrace_writedata 4 45021 _000441_hash NULL
82343 +_000442_hash pt_write 3 40159 _000442_hash NULL
82344 +_000443_hash pvr2_ioread_set_sync_key 3 59882 _000443_hash NULL
82345 +_000444_hash pvr2_stream_buffer_count 2 33719 _000444_hash NULL
82346 +_000445_hash qdisc_class_hash_alloc 1 18262 _000445_hash NULL
82347 +_000446_hash r3964_write 4 57662 _000446_hash NULL
82348 +_000447_hash raw_seticmpfilter 3 6888 _000447_hash NULL
82349 +_000448_hash raw_setsockopt 5 45800 _000448_hash NULL
82350 +_000449_hash rawv6_seticmpfilter 5 12137 _000449_hash NULL
82351 +_000450_hash ray_cs_essid_proc_write 3 17875 _000450_hash NULL
82352 +_000451_hash rbd_add 3 16366 _000451_hash NULL
82353 +_000452_hash rbd_snap_add 4 19678 _000452_hash NULL
82354 +_000453_hash rdma_set_ib_paths 3 45592 _000453_hash NULL
82355 +_000454_hash rds_page_copy_user 4 35691 _000454_hash NULL
82356 +_000455_hash read 3 9397 _000455_hash NULL
82357 +_000456_hash read_buf 2 20469 _000456_hash NULL
82358 +_000457_hash read_cis_cache 4 29735 _000457_hash NULL
82359 +_000458_hash realloc_buffer 2 25816 _000458_hash NULL
82360 +_000459_hash realloc_packet_buffer 2 25569 _000459_hash NULL
82361 +_000460_hash receive_DataRequest 3 9904 _000460_hash NULL
82362 +_000461_hash recent_mt_proc_write 3 8206 _000461_hash NULL
82363 +_000462_hash regmap_access_read_file 3 37223 _000462_hash NULL
82364 +_000463_hash regmap_bulk_write 4 59049 _000463_hash NULL
82365 +_000464_hash regmap_map_read_file 3 37685 _000464_hash NULL
82366 +_000465_hash regset_tls_set 4 18459 _000465_hash NULL
82367 +_000466_hash reg_w_buf 3 27724 _000466_hash NULL
82368 +_000467_hash reg_w_ixbuf 4 34736 _000467_hash NULL
82369 +_000468_hash remote_settings_file_write 3 22987 _000468_hash NULL
82370 +_000469_hash request_key_auth_new 3 38092 _000469_hash NULL
82371 +_000470_hash restore_i387_fxsave 2 17528 _000470_hash NULL
82372 +_000471_hash revalidate 2 19043 _000471_hash NULL
82373 +_000472_hash rfcomm_sock_setsockopt 5 18254 _000472_hash NULL
82374 +_000473_hash rndis_add_response 2 58544 _000473_hash NULL
82375 +_000474_hash rndis_set_oid 4 6547 _000474_hash NULL
82376 +_000475_hash rngapi_reset 3 34366 _000475_hash NULL
82377 +_000476_hash roccat_common_receive 4 53407 _000476_hash NULL
82378 +_000477_hash roccat_common_send 4 12284 _000477_hash NULL
82379 +_000478_hash rpc_malloc 2 43573 _000478_hash NULL
82380 +_000479_hash rt2x00debug_write_bbp 3 8212 _000479_hash NULL
82381 +_000480_hash rt2x00debug_write_csr 3 64753 _000480_hash NULL
82382 +_000481_hash rt2x00debug_write_eeprom 3 23091 _000481_hash NULL
82383 +_000482_hash rt2x00debug_write_rf 3 38195 _000482_hash NULL
82384 +_000483_hash rts51x_read_mem 4 26577 _000483_hash NULL
82385 +_000484_hash rts51x_read_status 4 11830 _000484_hash NULL
82386 +_000485_hash rts51x_write_mem 4 17598 _000485_hash NULL
82387 +_000486_hash rw_copy_check_uvector 3 34271 _000486_hash NULL
82388 +_000487_hash rxrpc_request_key 3 27235 _000487_hash NULL
82389 +_000488_hash rxrpc_server_keyring 3 16431 _000488_hash NULL
82390 +_000489_hash savemem 3 58129 _000489_hash NULL
82391 +_000490_hash sb16_copy_from_user 10-7-6 55836 _000490_hash NULL
82392 +_000493_hash sched_autogroup_write 3 10984 _000493_hash NULL
82393 +_000494_hash scsi_mode_select 6 37330 _000494_hash NULL
82394 +_000495_hash scsi_tgt_copy_sense 3 26933 _000495_hash NULL
82395 +_000496_hash sctp_auth_create_key 1 51641 _000496_hash NULL
82396 +_000497_hash sctp_getsockopt_delayed_ack 2 9232 _000497_hash NULL
82397 +_000498_hash sctp_getsockopt_local_addrs 2 25178 _000498_hash NULL
82398 +_000499_hash sctp_make_abort_user 3 29654 _000499_hash NULL
82399 +_000500_hash sctp_setsockopt_active_key 3 43755 _000500_hash NULL
82400 +_000501_hash sctp_setsockopt_adaptation_layer 3 26935 _001925_hash NULL nohasharray
82401 +_000502_hash sctp_setsockopt_associnfo 3 51684 _000502_hash NULL
82402 +_000503_hash sctp_setsockopt_auth_chunk 3 30843 _000503_hash NULL
82403 +_000504_hash sctp_setsockopt_auth_key 3 3793 _000504_hash NULL
82404 +_000505_hash sctp_setsockopt_autoclose 3 5775 _000505_hash NULL
82405 +_000506_hash sctp_setsockopt_bindx 3 49870 _000506_hash NULL
82406 +_000507_hash __sctp_setsockopt_connectx 3 46949 _000507_hash NULL
82407 +_000508_hash sctp_setsockopt_context 3 31091 _000508_hash NULL
82408 +_000509_hash sctp_setsockopt_default_send_param 3 49578 _000509_hash &_000247_hash
82409 +_000510_hash sctp_setsockopt_delayed_ack 3 40129 _000510_hash NULL
82410 +_000511_hash sctp_setsockopt_del_key 3 42304 _002281_hash NULL nohasharray
82411 +_000512_hash sctp_setsockopt_events 3 18862 _000512_hash NULL
82412 +_000513_hash sctp_setsockopt_hmac_ident 3 11687 _000513_hash NULL
82413 +_000514_hash sctp_setsockopt_initmsg 3 1383 _000514_hash NULL
82414 +_000515_hash sctp_setsockopt_maxburst 3 28041 _000515_hash NULL
82415 +_000516_hash sctp_setsockopt_maxseg 3 11829 _000516_hash NULL
82416 +_000517_hash sctp_setsockopt_peer_addr_params 3 734 _000517_hash NULL
82417 +_000518_hash sctp_setsockopt_peer_primary_addr 3 13440 _000518_hash NULL
82418 +_000519_hash sctp_setsockopt_rtoinfo 3 30941 _000519_hash NULL
82419 +_000520_hash security_context_to_sid_core 2 29248 _000520_hash NULL
82420 +_000521_hash sel_commit_bools_write 3 46077 _000521_hash NULL
82421 +_000522_hash sel_write_avc_cache_threshold 3 2256 _000522_hash NULL
82422 +_000523_hash sel_write_bool 3 46996 _000523_hash NULL
82423 +_000524_hash sel_write_checkreqprot 3 60774 _000524_hash NULL
82424 +_000525_hash sel_write_disable 3 10511 _000525_hash NULL
82425 +_000526_hash sel_write_enforce 3 48998 _000526_hash NULL
82426 +_000527_hash sel_write_load 3 63830 _000527_hash NULL
82427 +_000528_hash send_bulk_static_data 3 61932 _000528_hash NULL
82428 +_000529_hash send_control_msg 6 48498 _000529_hash NULL
82429 +_000530_hash set_aoe_iflist 2 42737 _000530_hash NULL
82430 +_000531_hash setkey_unaligned 3 39474 _000531_hash NULL
82431 +_000532_hash set_registers 3 53582 _000532_hash NULL
82432 +_000533_hash setsockopt 5 54539 _000533_hash NULL
82433 +_000534_hash setup_req 3 5848 _000534_hash NULL
82434 +_000535_hash setup_window 7 59178 _000535_hash NULL
82435 +_000536_hash setxattr 4 37006 _000536_hash NULL
82436 +_000537_hash sfq_alloc 1 2861 _000537_hash NULL
82437 +_000538_hash sg_kmalloc 1 50240 _000538_hash NULL
82438 +_000539_hash sgl_map_user_pages 2 30610 _000539_hash NULL
82439 +_000540_hash shash_setkey_unaligned 3 8620 _000540_hash NULL
82440 +_000541_hash shmem_xattr_alloc 2 61190 _000541_hash NULL
82441 +_000542_hash sierra_setup_urb 5 46029 _000542_hash NULL
82442 +_000543_hash simple_transaction_get 3 50633 _000543_hash NULL
82443 +_000544_hash simple_write_to_buffer 2-5 3122 _000544_hash NULL
82444 +_000546_hash sisusb_send_bulk_msg 3 17864 _000546_hash NULL
82445 +_000547_hash skb_add_data 3 48363 _000547_hash NULL
82446 +_000548_hash skb_do_copy_data_nocache 5 12465 _000548_hash NULL
82447 +_000549_hash sl_alloc_bufs 2 50380 _000549_hash NULL
82448 +_000550_hash sl_realloc_bufs 2 64086 _000550_hash NULL
82449 +_000551_hash smk_write_ambient 3 45691 _000551_hash NULL
82450 +_000552_hash smk_write_cipso 3 17989 _000552_hash NULL
82451 +_000553_hash smk_write_direct 3 46363 _000553_hash NULL
82452 +_000554_hash smk_write_doi 3 49621 _000554_hash NULL
82453 +_000555_hash smk_write_load_list 3 52280 _000555_hash NULL
82454 +_000556_hash smk_write_logging 3 2618 _000556_hash NULL
82455 +_000557_hash smk_write_netlbladdr 3 42525 _000557_hash NULL
82456 +_000558_hash smk_write_onlycap 3 14400 _000558_hash NULL
82457 +_000559_hash snd_ctl_elem_user_tlv 3 11695 _000559_hash NULL
82458 +_000560_hash snd_emu10k1_fx8010_read 5 9605 _000560_hash NULL
82459 +_000561_hash snd_emu10k1_synth_copy_from_user 3-5 9061 _000561_hash NULL
82460 +_000563_hash snd_gus_dram_poke 4 18525 _000563_hash NULL
82461 +_000564_hash snd_hdsp_playback_copy 5 20676 _000564_hash NULL
82462 +_000565_hash snd_info_entry_write 3 63474 _000565_hash NULL
82463 +_000566_hash snd_korg1212_copy_from 6 36169 _000566_hash NULL
82464 +_000567_hash snd_mem_proc_write 3 9786 _000567_hash NULL
82465 +_000568_hash snd_midi_channel_init_set 1 30092 _000568_hash NULL
82466 +_000569_hash snd_midi_event_new 1 9893 _000750_hash NULL nohasharray
82467 +_000570_hash snd_opl4_mem_proc_write 5 9670 _000570_hash NULL
82468 +_000571_hash snd_pcm_aio_read 3 13900 _000571_hash NULL
82469 +_000572_hash snd_pcm_aio_write 3 28738 _000572_hash NULL
82470 +_000573_hash snd_pcm_oss_write1 3 10872 _000573_hash NULL
82471 +_000574_hash snd_pcm_oss_write2 3 27332 _000574_hash NULL
82472 +_000575_hash snd_rawmidi_kernel_write1 4 56847 _000575_hash NULL
82473 +_000576_hash snd_rme9652_playback_copy 5 20970 _000576_hash NULL
82474 +_000577_hash snd_sb_csp_load_user 3 45190 _000577_hash NULL
82475 +_000578_hash snd_usb_ctl_msg 8 8436 _000578_hash NULL
82476 +_000579_hash sock_bindtodevice 3 50942 _000579_hash NULL
82477 +_000580_hash sock_kmalloc 2 62205 _000580_hash NULL
82478 +_000581_hash spidev_write 3 44510 _000581_hash NULL
82479 +_000582_hash squashfs_read_table 3 16945 _000582_hash NULL
82480 +_000583_hash srpt_alloc_ioctx 2-3 51042 _000583_hash NULL
82481 +_000585_hash srpt_alloc_ioctx_ring 2 49330 _000585_hash NULL
82482 +_000586_hash st5481_setup_isocpipes 6-4 61340 _000586_hash NULL
82483 +_000587_hash sta_agg_status_write 3 45164 _000587_hash NULL
82484 +_000588_hash svc_setsockopt 5 36876 _000588_hash NULL
82485 +_000589_hash sys_add_key 4 61288 _000589_hash NULL
82486 +_000590_hash sys_modify_ldt 3 18824 _000590_hash NULL
82487 +_000591_hash sys_semtimedop 3 4486 _000591_hash NULL
82488 +_000592_hash sys_setdomainname 2 4373 _000592_hash NULL
82489 +_000593_hash sys_sethostname 2 42962 _000593_hash NULL
82490 +_000594_hash tda10048_writeregbulk 4 11050 _000594_hash NULL
82491 +_000595_hash tipc_log_resize 1 34803 _000595_hash NULL
82492 +_000596_hash tomoyo_write_self 3 45161 _000596_hash NULL
82493 +_000597_hash tower_write 3 8580 _000597_hash NULL
82494 +_000598_hash tpm_write 3 50798 _000598_hash NULL
82495 +_000599_hash trusted_instantiate 3 4710 _000599_hash NULL
82496 +_000600_hash trusted_update 3 12664 _000600_hash NULL
82497 +_000601_hash tt_changes_fill_buffer 3 62649 _000601_hash NULL
82498 +_000602_hash tty_buffer_alloc 2 45437 _000602_hash NULL
82499 +_000603_hash __tun_chr_ioctl 4 22300 _000603_hash NULL
82500 +_000604_hash ubi_more_leb_change_data 4 63534 _000604_hash NULL
82501 +_000605_hash ubi_more_update_data 4 39189 _000605_hash NULL
82502 +_000606_hash ubi_resize_volume 2 50172 _000606_hash NULL
82503 +_000607_hash udf_alloc_i_data 2 35786 _000607_hash NULL
82504 +_000608_hash uea_idma_write 3 64139 _000608_hash NULL
82505 +_000609_hash uea_request 4 47613 _000609_hash NULL
82506 +_000610_hash uea_send_modem_cmd 3 3888 _000610_hash NULL
82507 +_000611_hash uio_write 3 43202 _000611_hash NULL
82508 +_000612_hash um_idi_write 3 18293 _000612_hash NULL
82509 +_000613_hash us122l_ctl_msg 8 13330 _000613_hash NULL
82510 +_000614_hash usb_alloc_urb 1 43436 _000614_hash NULL
82511 +_000615_hash usblp_new_writeurb 2 22894 _000615_hash NULL
82512 +_000616_hash usblp_write 3 23178 _000616_hash NULL
82513 +_000617_hash usbtest_alloc_urb 3-5 34446 _000617_hash NULL
82514 +_000619_hash usbtmc_write 3 64340 _000619_hash NULL
82515 +_000620_hash user_instantiate 3 26131 _000620_hash NULL
82516 +_000621_hash user_update 3 41332 _000621_hash NULL
82517 +_000622_hash uvc_simplify_fraction 3 31303 _000622_hash NULL
82518 +_000623_hash uwb_rc_cmd_done 4 35892 _000623_hash NULL
82519 +_000624_hash uwb_rc_neh_grok_event 3 55799 _000624_hash NULL
82520 +_000625_hash v9fs_alloc_rdir_buf 2 42150 _000625_hash NULL
82521 +_000626_hash __vb2_perform_fileio 3 63033 _000626_hash NULL
82522 +_000627_hash vc_do_resize 3-4 48842 _000627_hash NULL
82523 +_000629_hash vcs_write 3 3910 _000629_hash NULL
82524 +_000630_hash vfd_write 3 14717 _000630_hash NULL
82525 +_000631_hash vga_arb_write 3 36112 _000631_hash NULL
82526 +_000632_hash vga_switcheroo_debugfs_write 3 33984 _000632_hash NULL
82527 +_000633_hash vhci_get_user 3 45039 _000633_hash NULL
82528 +_000634_hash video_proc_write 3 6724 _000634_hash NULL
82529 +_000635_hash vlsi_alloc_ring 3-4 57003 _000635_hash NULL
82530 +_000637_hash __vmalloc 1 61168 _000637_hash NULL
82531 +_000638_hash vmalloc_32 1 1135 _000638_hash NULL
82532 +_000639_hash vmalloc_32_user 1 37519 _000639_hash NULL
82533 +_000640_hash vmalloc_exec 1 36132 _000640_hash NULL
82534 +_000641_hash vmalloc_node 1 58700 _000641_hash NULL
82535 +_000642_hash __vmalloc_node_flags 1 30352 _000642_hash NULL
82536 +_000643_hash vmalloc_user 1 32308 _000643_hash NULL
82537 +_000644_hash vol_cdev_direct_write 3 20751 _000644_hash NULL
82538 +_000645_hash vp_request_msix_vectors 2 28849 _000645_hash NULL
82539 +_000646_hash vring_add_indirect 3-4 20737 _000646_hash NULL
82540 +_000648_hash vring_new_virtqueue 1 9671 _000648_hash NULL
82541 +_000649_hash vxge_os_dma_malloc 2 46184 _000649_hash NULL
82542 +_000650_hash vxge_os_dma_malloc_async 3 56348 _000650_hash NULL
82543 +_000651_hash wdm_write 3 53735 _000651_hash NULL
82544 +_000652_hash wiimote_hid_send 3 48528 _000652_hash NULL
82545 +_000653_hash wl1273_fm_fops_write 3 60621 _000653_hash NULL
82546 +_000654_hash wlc_phy_loadsampletable_nphy 3 64367 _000654_hash NULL
82547 +_000655_hash write 3 62671 _000655_hash NULL
82548 +_000656_hash write_flush 3 50803 _000656_hash NULL
82549 +_000657_hash write_rio 3 54837 _000657_hash NULL
82550 +_000658_hash x25_asy_change_mtu 2 26928 _000658_hash NULL
82551 +_000659_hash xdi_copy_from_user 4 8395 _000659_hash NULL
82552 +_000660_hash xfrm_dst_alloc_copy 3 3034 _000660_hash NULL
82553 +_000661_hash xfrm_user_policy 4 62573 _000661_hash NULL
82554 +_000662_hash xfs_attrmulti_attr_set 4 59346 _000662_hash NULL
82555 +_000663_hash xfs_handle_to_dentry 3 12135 _000663_hash NULL
82556 +_000664_hash __xip_file_write 3 2733 _000664_hash NULL
82557 +_000665_hash xprt_rdma_allocate 2 31372 _000665_hash NULL
82558 +_000666_hash zd_usb_iowrite16v_async 3 23984 _000666_hash NULL
82559 +_000667_hash zd_usb_read_fw 4 22049 _000667_hash NULL
82560 +_000668_hash zerocopy_sg_from_iovec 3 11828 _000668_hash NULL
82561 +_000669_hash zoran_write 3 22404 _000669_hash NULL
82562 +_000671_hash acpi_ex_allocate_name_string 2 7685 _000671_hash NULL
82563 +_000672_hash acpi_os_allocate_zeroed 1 37422 _000672_hash NULL
82564 +_000673_hash acpi_ut_initialize_buffer 2 47143 _002314_hash NULL nohasharray
82565 +_000674_hash ad7879_spi_xfer 3 36311 _000674_hash NULL
82566 +_000675_hash add_new_gdb 3 27643 _000675_hash NULL
82567 +_000676_hash add_numbered_child 5 14273 _000676_hash NULL
82568 +_000677_hash add_res_range 4 21310 _000677_hash NULL
82569 +_000678_hash addtgt 3 54703 _000678_hash NULL
82570 +_000679_hash add_uuid 4 49831 _000679_hash NULL
82571 +_000680_hash afs_cell_alloc 2 24052 _000680_hash NULL
82572 +_000681_hash aggr_recv_addba_req_evt 4 38037 _000681_hash NULL
82573 +_000682_hash agp_create_memory 1 1075 _000682_hash NULL
82574 +_000683_hash agp_create_user_memory 1 62955 _000683_hash NULL
82575 +_000684_hash alg_setsockopt 5 20985 _000684_hash NULL
82576 +_000685_hash alloc_async 1 14208 _000685_hash NULL
82577 +_000686_hash ___alloc_bootmem_nopanic 1 53626 _000686_hash NULL
82578 +_000687_hash alloc_buf 1 34532 _000687_hash NULL
82579 +_000688_hash alloc_chunk 1 49575 _000688_hash NULL
82580 +_000689_hash alloc_context 1 41283 _000689_hash NULL
82581 +_000690_hash alloc_ctrl_packet 1 44667 _000690_hash NULL
82582 +_000691_hash alloc_data_packet 1 46698 _000691_hash NULL
82583 +_000692_hash alloc_dca_provider 2 59670 _000692_hash NULL
82584 +_000693_hash __alloc_dev_table 2 54343 _000693_hash NULL
82585 +_000694_hash alloc_ep 1 17269 _000694_hash NULL
82586 +_000695_hash __alloc_extent_buffer 3 15093 _000695_hash NULL
82587 +_000696_hash alloc_group_attrs 2 9194 _000719_hash NULL nohasharray
82588 +_000697_hash alloc_large_system_hash 2 64490 _000697_hash NULL
82589 +_000698_hash alloc_netdev_mqs 1 30030 _000698_hash NULL
82590 +_000699_hash __alloc_objio_seg 1 7203 _000699_hash NULL
82591 +_000700_hash alloc_ring 2-4 15345 _000700_hash NULL
82592 +_000701_hash alloc_ring 2-4 39151 _000701_hash NULL
82593 +_000704_hash alloc_session 1-2 64171 _000704_hash NULL
82594 +_000708_hash alloc_smp_req 1 51337 _000708_hash NULL
82595 +_000709_hash alloc_smp_resp 1 3566 _000709_hash NULL
82596 +_000710_hash alloc_ts_config 1 45775 _000710_hash NULL
82597 +_000711_hash alloc_upcall 2 62186 _000711_hash NULL
82598 +_000712_hash altera_drscan 2 48698 _000712_hash NULL
82599 +_000713_hash altera_irscan 2 62396 _000713_hash NULL
82600 +_000714_hash altera_set_dr_post 2 54291 _000714_hash NULL
82601 +_000715_hash altera_set_dr_pre 2 64862 _000715_hash NULL
82602 +_000716_hash altera_set_ir_post 2 20948 _000716_hash NULL
82603 +_000717_hash altera_set_ir_pre 2 54103 _000717_hash NULL
82604 +_000718_hash altera_swap_dr 2 50090 _000718_hash NULL
82605 +_000719_hash altera_swap_ir 2 9194 _000719_hash &_000696_hash
82606 +_000720_hash amd_create_gatt_pages 1 20537 _000720_hash NULL
82607 +_000721_hash aoechr_write 3 62883 _001352_hash NULL nohasharray
82608 +_000722_hash applesmc_create_nodes 2 49392 _000722_hash NULL
82609 +_000723_hash array_zalloc 1-2 7519 _000723_hash NULL
82610 +_000725_hash arvo_sysfs_read 6 31617 _000725_hash NULL
82611 +_000726_hash arvo_sysfs_write 6 3311 _000726_hash NULL
82612 +_000727_hash asd_store_update_bios 4 10165 _000727_hash NULL
82613 +_000728_hash ata_host_alloc 2 46094 _000728_hash NULL
82614 +_000729_hash atalk_sendmsg 4 21677 _000729_hash NULL
82615 +_000730_hash ath6kl_cfg80211_connect_event 7-9-8 13443 _000730_hash NULL
82616 +_000731_hash ath6kl_mgmt_tx 9 21153 _000731_hash NULL
82617 +_000732_hash ath6kl_wmi_roam_tbl_event_rx 3 43440 _000732_hash NULL
82618 +_000733_hash ath6kl_wmi_send_mgmt_cmd 7 17347 _000733_hash NULL
82619 +_000734_hash ath_descdma_setup 5 12257 _000734_hash NULL
82620 +_000735_hash ath_rx_edma_init 2 65483 _000735_hash NULL
82621 +_000736_hash ati_create_gatt_pages 1 4722 _000736_hash NULL
82622 +_000737_hash au0828_init_isoc 2-3 61917 _000737_hash NULL
82623 +_000739_hash audit_init_entry 1 38644 _000739_hash NULL
82624 +_000740_hash ax25_sendmsg 4 62770 _000740_hash NULL
82625 +_000741_hash b1_alloc_card 1 36155 _000741_hash NULL
82626 +_000742_hash b43_nphy_load_samples 3 36481 _000742_hash NULL
82627 +_000743_hash bio_copy_user_iov 4 37660 _000743_hash NULL
82628 +_000744_hash __bio_map_kern 2-3 47379 _000744_hash NULL
82629 +_000746_hash blk_register_region 1-2 51424 _000746_hash NULL
82630 +_000748_hash bm_entry_write 3 28338 _000748_hash NULL
82631 +_000749_hash bm_realloc_pages 2 9431 _000749_hash NULL
82632 +_000750_hash bm_register_write 3 9893 _000750_hash &_000569_hash
82633 +_000751_hash bm_status_write 3 12964 _000751_hash NULL
82634 +_000752_hash br_mdb_rehash 2 42643 _000752_hash NULL
82635 +_000753_hash btrfs_copy_from_user 3 43806 _000753_hash NULL
82636 +_000754_hash btrfs_insert_delayed_dir_index 4 63720 _000754_hash NULL
82637 +_000755_hash __btrfs_map_block 3 49839 _000755_hash NULL
82638 +_000756_hash __c4iw_init_resource_fifo 3 8334 _000756_hash NULL
82639 +_000757_hash cache_downcall 3 13666 _000757_hash NULL
82640 +_000758_hash cache_slow_downcall 2 8570 _000758_hash NULL
82641 +_000759_hash ca_extend 2 64541 _000759_hash NULL
82642 +_000760_hash caif_seqpkt_sendmsg 4 22961 _000760_hash NULL
82643 +_000761_hash caif_stream_sendmsg 4 9110 _000761_hash NULL
82644 +_000762_hash carl9170_cmd_buf 3 950 _000762_hash NULL
82645 +_000763_hash cdev_add 2-3 38176 _000763_hash NULL
82646 +_000765_hash cdrom_read_cdda 4 50478 _000765_hash NULL
82647 +_000766_hash ceph_dns_resolve_name 1 62488 _000766_hash NULL
82648 +_000767_hash ceph_msgpool_get 2 54258 _000767_hash NULL
82649 +_000768_hash cfg80211_connect_result 4-6 56515 _000768_hash NULL
82650 +_000770_hash cfg80211_disconnected 4 57 _000770_hash NULL
82651 +_000771_hash cfg80211_inform_bss 8 19332 _000771_hash NULL
82652 +_000772_hash cfg80211_inform_bss_frame 4 41078 _000772_hash NULL
82653 +_000773_hash cfg80211_mlme_register_mgmt 5 19852 _000773_hash NULL
82654 +_000774_hash cfg80211_roamed_bss 4-6 50198 _000774_hash NULL
82655 +_000776_hash cifs_readdata_alloc 1 50318 _000776_hash NULL
82656 +_000777_hash cifs_readv_from_socket 3 19109 _000777_hash NULL
82657 +_000778_hash cifs_writedata_alloc 1 32880 _000778_hash NULL
82658 +_000779_hash cnic_alloc_dma 3 34641 _000779_hash NULL
82659 +_000780_hash configfs_write_file 3 61621 _000780_hash NULL
82660 +_000781_hash construct_key 3 11329 _000781_hash NULL
82661 +_000782_hash context_alloc 3 24645 _000782_hash NULL
82662 +_000783_hash copy_to_user 3 57835 _000783_hash NULL
82663 +_000784_hash create_attr_set 1 22861 _000784_hash NULL
82664 +_000785_hash create_bounce_buffer 3 39155 _000785_hash NULL
82665 +_000786_hash create_gpadl_header 2 19064 _000786_hash NULL
82666 +_000787_hash _create_sg_bios 4 31244 _000787_hash NULL
82667 +_000788_hash cryptd_alloc_instance 2-3 18048 _000788_hash NULL
82668 +_000790_hash crypto_ahash_setkey 3 55134 _000790_hash NULL
82669 +_000791_hash crypto_alloc_instance2 3 25277 _000791_hash NULL
82670 +_000792_hash crypto_shash_setkey 3 60483 _000792_hash NULL
82671 +_000793_hash cx231xx_init_bulk 3-2 47024 _000793_hash NULL
82672 +_000794_hash cx231xx_init_isoc 2-3 56453 _000794_hash NULL
82673 +_000796_hash cx231xx_init_vbi_isoc 2-3 28053 _000796_hash NULL
82674 +_000798_hash cxgb_alloc_mem 1 24007 _000798_hash NULL
82675 +_000799_hash cxgbi_device_portmap_create 3 25747 _000799_hash NULL
82676 +_000800_hash cxgbi_device_register 1-2 36746 _000800_hash NULL
82677 +_000802_hash __cxio_init_resource_fifo 3 23447 _000802_hash NULL
82678 +_000803_hash dccp_sendmsg 4 56058 _000803_hash NULL
82679 +_000804_hash ddp_make_gl 1 12179 _000804_hash NULL
82680 +_000805_hash depth_write 3 3021 _000805_hash NULL
82681 +_000806_hash dev_irnet_write 3 11398 _000806_hash NULL
82682 +_000807_hash dev_set_alias 3 50084 _000807_hash NULL
82683 +_000808_hash dev_write 3 7708 _000808_hash NULL
82684 +_000809_hash dfs_global_file_write 3 6112 _000809_hash NULL
82685 +_000810_hash dgram_sendmsg 4 45679 _000810_hash NULL
82686 +_000811_hash disconnect 4 32521 _000811_hash NULL
82687 +_000812_hash dma_attach 6-7 50831 _000812_hash NULL
82688 +_000814_hash dn_sendmsg 4 38390 _000814_hash NULL
82689 +_000815_hash do_dccp_setsockopt 5 54377 _000815_hash NULL
82690 +_000816_hash do_jffs2_setxattr 5 25910 _000816_hash NULL
82691 +_000817_hash do_msgsnd 4 1387 _000817_hash NULL
82692 +_000818_hash do_raw_setsockopt 5 55215 _000818_hash NULL
82693 +_000819_hash do_readv_writev 4 51849 _000819_hash NULL
82694 +_000820_hash do_sync 1 9604 _000820_hash NULL
82695 +_000821_hash dup_array 3 33551 _000821_hash NULL
82696 +_000822_hash dvb_audio_write 3 51275 _000822_hash NULL
82697 +_000823_hash dvb_ca_en50221_init 4 45718 _000823_hash NULL
82698 +_000824_hash dvb_video_write 3 754 _000824_hash NULL
82699 +_000825_hash econet_sendmsg 4 51430 _000825_hash NULL
82700 +_000826_hash ecryptfs_decode_and_decrypt_filename 5 10379 _000826_hash NULL
82701 +_000827_hash ecryptfs_encrypt_and_encode_filename 6 2109 _000827_hash NULL
82702 +_000828_hash ecryptfs_send_message_locked 2 31801 _000828_hash NULL
82703 +_000829_hash edac_device_alloc_ctl_info 1 5941 _000829_hash NULL
82704 +_000830_hash edac_mc_alloc 1 54846 _000830_hash NULL
82705 +_000831_hash edac_pci_alloc_ctl_info 1 63388 _000831_hash NULL
82706 +_000832_hash efivar_create_sysfs_entry 2 19485 _000832_hash NULL
82707 +_000833_hash em28xx_alloc_isoc 4 46892 _000833_hash NULL
82708 +_000834_hash enable_write 3 30456 _000834_hash NULL
82709 +_000835_hash enclosure_register 3 57412 _000835_hash NULL
82710 +_000836_hash ext4_kvzalloc 1 47605 _000836_hash NULL
82711 +_000837_hash extend_netdev_table 2 31680 _000837_hash NULL
82712 +_000838_hash __feat_register_sp 6 64712 _000838_hash NULL
82713 +_000839_hash __ffs_ep0_read_events 3 48868 _000839_hash NULL
82714 +_000840_hash ffs_ep0_write 3 9438 _000840_hash NULL
82715 +_000841_hash ffs_epfile_read 3 18775 _000841_hash NULL
82716 +_000842_hash ffs_epfile_write 3 48014 _000842_hash NULL
82717 +_000843_hash fib_info_hash_alloc 1 9075 _000843_hash NULL
82718 +_000844_hash fillonedir 3 41746 _000844_hash NULL
82719 +_000845_hash flexcop_device_kmalloc 1 54793 _000845_hash NULL
82720 +_000846_hash frame_alloc 4 15981 _000846_hash NULL
82721 +_000847_hash fw_node_create 2 9559 _000847_hash NULL
82722 +_000848_hash garmin_read_process 3 27509 _000848_hash NULL
82723 +_000849_hash garp_request_join 4 7471 _000849_hash NULL
82724 +_000850_hash get_derived_key 4 61100 _000850_hash NULL
82725 +_000851_hash get_entry 4 16003 _000851_hash NULL
82726 +_000852_hash get_free_de 2 33714 _000852_hash NULL
82727 +_000853_hash get_new_cssid 2 51665 _000853_hash NULL
82728 +_000854_hash getxattr 4 24398 _000854_hash NULL
82729 +_000855_hash gspca_dev_probe2 4 59833 _000855_hash NULL
82730 +_000856_hash hcd_alloc_coherent 5 55862 _000856_hash NULL
82731 +_000857_hash hci_sock_sendmsg 4 37420 _000857_hash NULL
82732 +_000858_hash hid_register_field 2-3 4874 _000858_hash NULL
82733 +_000860_hash hid_report_raw_event 4 7024 _000860_hash NULL
82734 +_000861_hash hpi_alloc_control_cache 1 35351 _000861_hash NULL
82735 +_000862_hash hugetlbfs_read_actor 2-5-4 34547 _000862_hash NULL
82736 +_000865_hash hvc_alloc 4 12579 _000865_hash NULL
82737 +_000866_hash __hwahc_dev_set_key 5 46328 _000866_hash NULL
82738 +_000867_hash i2400m_zrealloc_2x 3 54166 _001430_hash NULL nohasharray
82739 +_000868_hash ib_alloc_device 1 26483 _000868_hash NULL
82740 +_000869_hash ib_create_send_mad 5 1196 _000869_hash NULL
82741 +_000870_hash ibmasm_new_command 2 25714 _000870_hash NULL
82742 +_000871_hash ib_send_cm_drep 3 50186 _000871_hash NULL
82743 +_000872_hash ib_send_cm_mra 4 60202 _000872_hash NULL
82744 +_000873_hash ib_send_cm_rtu 3 63138 _000873_hash NULL
82745 +_000874_hash ieee80211_key_alloc 3 19065 _000874_hash NULL
82746 +_000875_hash ieee80211_mgmt_tx 9 46860 _000875_hash NULL
82747 +_000876_hash ieee80211_send_probe_req 6 6924 _000876_hash NULL
82748 +_000877_hash if_writecmd 2 815 _000877_hash NULL
82749 +_000878_hash init_bch 1-2 64130 _000878_hash NULL
82750 +_000880_hash init_ipath 1 48187 _000880_hash NULL
82751 +_000881_hash init_list_set 2-3 39188 _000881_hash NULL
82752 +_000883_hash init_q 4 132 _000883_hash NULL
82753 +_000884_hash init_state 2 60165 _000884_hash NULL
82754 +_000885_hash init_tag_map 3 57515 _000885_hash NULL
82755 +_000886_hash input_ff_create 2 21240 _000886_hash NULL
82756 +_000887_hash input_mt_init_slots 2 31183 _000887_hash NULL
82757 +_000888_hash interfaces 2 38859 _000888_hash NULL
82758 +_000889_hash ioat2_alloc_ring 2 11172 _000889_hash NULL
82759 +_000890_hash ip_generic_getfrag 3-4 12187 _000890_hash NULL
82760 +_000892_hash ipr_alloc_ucode_buffer 1 40199 _000892_hash NULL
82761 +_000893_hash ip_set_alloc 1 57953 _000893_hash NULL
82762 +_000894_hash ipv6_flowlabel_opt 3 58135 _001125_hash NULL nohasharray
82763 +_000895_hash ipv6_renew_options 5 28867 _000895_hash NULL
82764 +_000896_hash ipxrtr_route_packet 4 54036 _000896_hash NULL
82765 +_000897_hash irda_sendmsg 4 4388 _000897_hash NULL
82766 +_000898_hash irda_sendmsg_dgram 4 38563 _000898_hash NULL
82767 +_000899_hash irda_sendmsg_ultra 4 42047 _000899_hash NULL
82768 +_000900_hash irias_add_octseq_attrib 4 29983 _000900_hash NULL
82769 +_000901_hash irq_alloc_generic_chip 2 26650 _000901_hash NULL
82770 +_000902_hash irq_domain_add_linear 2 29236 _000902_hash NULL
82771 +_000903_hash iscsi_alloc_session 3 49390 _000903_hash NULL
82772 +_000904_hash iscsi_create_conn 2 50425 _000904_hash NULL
82773 +_000905_hash iscsi_create_endpoint 1 15193 _000905_hash NULL
82774 +_000906_hash iscsi_create_iface 5 38510 _000906_hash NULL
82775 +_000907_hash iscsi_decode_text_input 4 58292 _000907_hash NULL
82776 +_000908_hash iscsi_pool_init 2-4 54913 _000908_hash NULL
82777 +_000910_hash iscsit_dump_data_payload 2 38683 _000910_hash NULL
82778 +_000911_hash isdn_write 3 45863 _000911_hash NULL
82779 +_000912_hash isku_receive 4 54130 _000912_hash NULL
82780 +_000913_hash isku_send 4 41542 _000913_hash NULL
82781 +_000914_hash islpci_mgt_transaction 5 23610 _000914_hash NULL
82782 +_000915_hash iso_sched_alloc 1 13377 _002079_hash NULL nohasharray
82783 +_000916_hash ivtv_v4l2_write 3 39226 _000916_hash NULL
82784 +_000917_hash iwl_trans_txq_alloc 3 36147 _000917_hash NULL
82785 +_000918_hash iwmct_fw_parser_init 4 37876 _000918_hash NULL
82786 +_000919_hash iwm_notif_send 6 12295 _000919_hash NULL
82787 +_000920_hash iwm_ntf_calib_res 3 11686 _000920_hash NULL
82788 +_000921_hash iwm_umac_set_config_var 4 17320 _000921_hash NULL
82789 +_000922_hash ixgbe_alloc_q_vector 3-5 45428 _000922_hash NULL
82790 +_000924_hash jbd2_journal_init_revoke 2 51088 _000924_hash NULL
82791 +_000925_hash jffs2_write_dirent 5 37311 _000925_hash NULL
82792 +_000926_hash journal_init_revoke 2 56933 _000926_hash NULL
82793 +_000927_hash keyctl_instantiate_key 3 41855 _000927_hash NULL
82794 +_000928_hash keyctl_instantiate_key_iov 3 16969 _000928_hash NULL
82795 +_000929_hash __kfifo_from_user 3 20399 _000929_hash NULL
82796 +_000930_hash kimage_crash_alloc 3 3233 _000930_hash NULL
82797 +_000931_hash kimage_normal_alloc 3 31140 _000931_hash NULL
82798 +_000932_hash kmem_realloc 2 37489 _000932_hash NULL
82799 +_000933_hash kmem_zalloc 1 11510 _000933_hash NULL
82800 +_000934_hash koneplus_send 4 18226 _000934_hash NULL
82801 +_000935_hash koneplus_sysfs_read 6 42792 _000935_hash NULL
82802 +_000936_hash kovaplus_send 4 10009 _000936_hash NULL
82803 +_000937_hash kvm_read_guest_page_mmu 6 37611 _000937_hash NULL
82804 +_000938_hash kvm_set_irq_routing 3 48704 _000938_hash NULL
82805 +_000939_hash kvm_write_guest_cached 4 11106 _000939_hash NULL
82806 +_000940_hash kvm_write_guest_page 5 63555 _000940_hash NULL
82807 +_000941_hash l2cap_skbuff_fromiovec 3-4 35003 _000941_hash NULL
82808 +_000943_hash l2tp_ip_sendmsg 4 50411 _000943_hash NULL
82809 +_000944_hash l2tp_session_create 1 25286 _000944_hash NULL
82810 +_000945_hash lc_create 3 48662 _000945_hash NULL
82811 +_000946_hash leaf_dealloc 3 29566 _000946_hash NULL
82812 +_000947_hash linear_conf 2 23485 _000947_hash NULL
82813 +_000948_hash lirc_buffer_init 2-3 53282 _000948_hash NULL
82814 +_000950_hash llc_ui_sendmsg 4 24987 _000950_hash NULL
82815 +_000951_hash lpfc_sli4_queue_alloc 3 62646 _000951_hash NULL
82816 +_000952_hash mce_request_packet 3 1073 _000952_hash NULL
82817 +_000953_hash mdiobus_alloc_size 1 52259 _000953_hash NULL
82818 +_000954_hash media_entity_init 2-4 15870 _001556_hash NULL nohasharray
82819 +_000956_hash memstick_alloc_host 1 142 _000956_hash NULL
82820 +_000957_hash mesh_table_alloc 1 22305 _000957_hash NULL
82821 +_000958_hash mfd_add_devices 4 56753 _000958_hash NULL
82822 +_000959_hash mISDN_sock_sendmsg 4 41035 _000959_hash NULL
82823 +_000960_hash mmc_alloc_host 1 48097 _000960_hash NULL
82824 +_000961_hash mmc_test_alloc_mem 3 28102 _000961_hash NULL
82825 +_000962_hash mpi_alloc 1 18094 _000962_hash NULL
82826 +_000963_hash mpihelp_mul_karatsuba_case 5-3 23918 _000963_hash NULL
82827 +_000964_hash mpihelp_mul_n 4 16405 _000964_hash NULL
82828 +_000965_hash mpi_set_bit 2 15104 _000965_hash NULL
82829 +_000966_hash mpi_set_highbit 2 37327 _001420_hash NULL nohasharray
82830 +_000967_hash mtd_concat_create 2 14416 _000967_hash NULL
82831 +_000968_hash mvumi_alloc_mem_resource 3 47750 _000968_hash NULL
82832 +_000969_hash mwifiex_11n_create_rx_reorder_tbl 4 63806 _000969_hash NULL
82833 +_000970_hash mwifiex_alloc_sdio_mpa_buffers 2-3 60961 _000970_hash NULL
82834 +_000972_hash mwl8k_cmd_set_beacon 4 23110 _000972_hash NULL
82835 +_000973_hash neigh_hash_alloc 1 17595 _000973_hash NULL
82836 +_000974_hash netlink_sendmsg 4 33708 _001172_hash NULL nohasharray
82837 +_000975_hash netxen_alloc_sds_rings 2 13417 _000975_hash NULL
82838 +_000976_hash new_bind_ctl 2 35324 _000976_hash NULL
82839 +_000977_hash new_dir 3 31919 _000977_hash NULL
82840 +_000978_hash new_tape_buffer 2 32866 _000978_hash NULL
82841 +_000979_hash nfc_llcp_build_tlv 3 19536 _000979_hash NULL
82842 +_000980_hash nfc_llcp_send_i_frame 3 59130 _000980_hash NULL
82843 +_000981_hash nfs4_alloc_slots 1 2454 _000981_hash NULL
82844 +_000982_hash nfsctl_transaction_write 3 64800 _000982_hash NULL
82845 +_000983_hash nfs_idmap_request_key 3 30208 _000983_hash NULL
82846 +_000984_hash nfs_readdata_alloc 1 9990 _000984_hash NULL
82847 +_000985_hash nfs_writedata_alloc 1 62868 _000985_hash NULL
82848 +_000986_hash nl_pid_hash_zalloc 1 23314 _000986_hash NULL
82849 +_000987_hash nr_sendmsg 4 53656 _000987_hash NULL
82850 +_000988_hash nsm_create_handle 4 38060 _000988_hash NULL
82851 +_000989_hash ntfs_copy_from_user_iovec 3-6 49829 _000989_hash NULL
82852 +_000991_hash ntfs_file_buffered_write 4-6 41442 _000991_hash NULL
82853 +_000993_hash __ntfs_malloc 1 34022 _000993_hash NULL
82854 +_000994_hash nvme_alloc_queue 3 46865 _000994_hash NULL
82855 +_000995_hash ocfs2_acl_from_xattr 2 21604 _000995_hash NULL
82856 +_000996_hash ocfs2_control_message 3 19564 _000996_hash NULL
82857 +_000997_hash opera1_usb_i2c_msgxfer 4 64521 _000997_hash NULL
82858 +_000998_hash _ore_get_io_state 3 2166 _000998_hash NULL
82859 +_000999_hash orig_hash_add_if 2 53676 _000999_hash NULL
82860 +_001000_hash orig_hash_del_if 2 45080 _001000_hash NULL
82861 +_001001_hash orinoco_set_key 5-7 17878 _001001_hash NULL
82862 +_001003_hash osdmap_set_max_osd 2 57630 _001003_hash NULL
82863 +_001004_hash _osd_realloc_seg 3 54352 _001004_hash NULL
82864 +_001005_hash OSDSetBlock 2-4 38986 _001005_hash NULL
82865 +_001007_hash osst_execute 7-6 17607 _001007_hash NULL
82866 +_001008_hash osst_write 3 31581 _001008_hash NULL
82867 +_001009_hash otp_read 2-5-4 10594 _001009_hash NULL
82868 +_001012_hash ovs_vport_alloc 1 33475 _001012_hash NULL
82869 +_001013_hash packet_sendmsg_spkt 4 28885 _001013_hash NULL
82870 +_001014_hash pair_device 4 61175 _001708_hash NULL nohasharray
82871 +_001015_hash pccard_store_cis 6 18176 _001015_hash NULL
82872 +_001016_hash pci_add_cap_save_buffer 3 3426 _001016_hash NULL
82873 +_001017_hash pcnet32_realloc_rx_ring 3 36598 _001017_hash NULL
82874 +_001018_hash pcnet32_realloc_tx_ring 3 38428 _001018_hash NULL
82875 +_001019_hash pcpu_mem_zalloc 1 22948 _001019_hash NULL
82876 +_001020_hash pep_sendmsg 4 62524 _001020_hash NULL
82877 +_001021_hash pfkey_sendmsg 4 47394 _001021_hash NULL
82878 +_001022_hash pidlist_resize 2 496 _001022_hash NULL
82879 +_001023_hash pin_code_reply 4 46510 _001023_hash NULL
82880 +_001024_hash ping_getfrag 3-4 8360 _001024_hash NULL
82881 +_001026_hash pipe_set_size 2 5204 _001026_hash NULL
82882 +_001027_hash pkt_bio_alloc 1 48284 _001027_hash NULL
82883 +_001028_hash platform_create_bundle 4-6 12785 _001028_hash NULL
82884 +_001030_hash play_iframe 3 8219 _001030_hash NULL
82885 +_001031_hash pm8001_store_update_fw 4 55716 _001031_hash NULL
82886 +_001032_hash pmcraid_alloc_sglist 1 9864 _001032_hash NULL
82887 +_001033_hash pn533_dep_link_up 5 7659 _001033_hash NULL
82888 +_001034_hash pnp_alloc 1 24869 _001419_hash NULL nohasharray
82889 +_001035_hash pn_sendmsg 4 12640 _001035_hash NULL
82890 +_001036_hash pppoe_sendmsg 4 48039 _001036_hash NULL
82891 +_001037_hash pppol2tp_sendmsg 4 56420 _001037_hash NULL
82892 +_001038_hash process_vm_rw 3-5 47533 _001038_hash NULL
82893 +_001040_hash process_vm_rw_single_vec 1-2 26213 _001040_hash NULL
82894 +_001042_hash proc_write 3 51003 _001042_hash NULL
82895 +_001043_hash profile_load 3 58267 _001043_hash NULL
82896 +_001044_hash profile_remove 3 8556 _001044_hash NULL
82897 +_001045_hash profile_replace 3 14652 _001045_hash NULL
82898 +_001046_hash pscsi_get_bio 1 56103 _001046_hash NULL
82899 +_001047_hash pyra_send 4 12061 _001047_hash NULL
82900 +_001048_hash qc_capture 3 19298 _001048_hash NULL
82901 +_001049_hash qla4xxx_alloc_work 2 44813 _001049_hash NULL
82902 +_001050_hash qlcnic_alloc_msix_entries 2 46160 _001050_hash NULL
82903 +_001051_hash qlcnic_alloc_sds_rings 2 26795 _001051_hash NULL
82904 +_001052_hash queue_received_packet 5 9657 _001052_hash NULL
82905 +_001053_hash raw_send_hdrinc 4 58803 _001053_hash NULL
82906 +_001054_hash raw_sendmsg 4 23078 _001054_hash &_000022_hash
82907 +_001055_hash rawsock_sendmsg 4 60010 _001055_hash NULL
82908 +_001056_hash rawv6_send_hdrinc 3 35425 _001056_hash NULL
82909 +_001057_hash rb_alloc 1 3102 _001057_hash NULL
82910 +_001058_hash rbd_alloc_coll 1 33678 _001058_hash NULL
82911 +_001059_hash rbd_create_rw_ops 2 4605 _001059_hash NULL
82912 +_001060_hash rds_ib_inc_copy_to_user 3 55007 _001060_hash NULL
82913 +_001061_hash rds_iw_inc_copy_to_user 3 29214 _001061_hash NULL
82914 +_001062_hash rds_message_alloc 1 10517 _001062_hash NULL
82915 +_001063_hash rds_message_copy_from_user 3 45510 _001063_hash NULL
82916 +_001064_hash rds_message_inc_copy_to_user 3 26540 _001064_hash NULL
82917 +_001065_hash redrat3_transmit_ir 3 64244 _001065_hash NULL
82918 +_001066_hash regcache_rbtree_insert_to_block 5 58009 _001066_hash NULL
82919 +_001067_hash _regmap_raw_write 4 42652 _001067_hash NULL
82920 +_001068_hash regmap_register_patch 3 21681 _001068_hash NULL
82921 +_001069_hash relay_alloc_page_array 1 52735 _001069_hash NULL
82922 +_001070_hash remove_uuid 4 64505 _001070_hash NULL
82923 +_001071_hash reshape_ring 2 29147 _001071_hash NULL
82924 +_001072_hash RESIZE_IF_NEEDED 2 56286 _001072_hash NULL
82925 +_001073_hash resize_stripes 2 61650 _001073_hash NULL
82926 +_001074_hash rfcomm_sock_sendmsg 4 37661 _001074_hash NULL
82927 +_001075_hash rose_sendmsg 4 20249 _001075_hash NULL
82928 +_001076_hash rxrpc_send_data 5 21553 _001076_hash NULL
82929 +_001077_hash rxrpc_setsockopt 5 50286 _001077_hash NULL
82930 +_001078_hash saa7146_vmalloc_build_pgtable 2 19780 _001078_hash NULL
82931 +_001079_hash saa7164_buffer_alloc_user 2 9627 _001079_hash NULL
82932 +_001081_hash sco_send_frame 3 41815 _001081_hash NULL
82933 +_001082_hash scsi_host_alloc 2 63041 _001082_hash NULL
82934 +_001083_hash scsi_tgt_kspace_exec 8 9522 _001083_hash NULL
82935 +_001084_hash sctp_sendmsg 4 61919 _001084_hash NULL
82936 +_001085_hash sctp_setsockopt 5 44788 _001085_hash NULL
82937 +_001086_hash sctp_setsockopt_connectx 3 6073 _001086_hash NULL
82938 +_001087_hash sctp_setsockopt_connectx_old 3 22631 _001087_hash NULL
82939 +_001088_hash sctp_tsnmap_init 2 36446 _001088_hash NULL
82940 +_001089_hash sctp_user_addto_chunk 2-3 62047 _001089_hash NULL
82941 +_001091_hash security_context_to_sid 2 19839 _001091_hash NULL
82942 +_001092_hash security_context_to_sid_default 2 3492 _001092_hash NULL
82943 +_001093_hash security_context_to_sid_force 2 20724 _001093_hash NULL
82944 +_001094_hash selinux_transaction_write 3 59038 _001094_hash NULL
82945 +_001095_hash sel_write_access 3 51704 _001095_hash NULL
82946 +_001096_hash sel_write_create 3 11353 _001096_hash NULL
82947 +_001097_hash sel_write_member 3 28800 _001097_hash NULL
82948 +_001098_hash sel_write_relabel 3 55195 _001098_hash NULL
82949 +_001099_hash sel_write_user 3 45060 _001099_hash NULL
82950 +_001100_hash __seq_open_private 3 40715 _001100_hash NULL
82951 +_001101_hash serverworks_create_gatt_pages 1 46582 _001101_hash NULL
82952 +_001102_hash set_connectable 4 56458 _001102_hash NULL
82953 +_001103_hash set_dev_class 4 39645 _001697_hash NULL nohasharray
82954 +_001104_hash set_discoverable 4 48141 _001104_hash NULL
82955 +_001105_hash setkey 3 14987 _001105_hash NULL
82956 +_001106_hash set_le 4 30581 _001106_hash NULL
82957 +_001107_hash set_link_security 4 4502 _001107_hash NULL
82958 +_001108_hash set_local_name 4 55757 _001108_hash NULL
82959 +_001109_hash set_powered 4 12129 _001109_hash NULL
82960 +_001110_hash set_ssp 4 62411 _001110_hash NULL
82961 +_001111_hash sg_build_sgat 3 60179 _001111_hash &_000305_hash
82962 +_001112_hash sg_read_oxfer 3 51724 _001112_hash NULL
82963 +_001113_hash shmem_xattr_set 4 11843 _001113_hash NULL
82964 +_001114_hash simple_alloc_urb 3 60420 _001114_hash NULL
82965 +_001115_hash sisusb_send_bridge_packet 2 11649 _001115_hash NULL
82966 +_001116_hash sisusb_send_packet 2 20891 _001116_hash NULL
82967 +_001117_hash skb_add_data_nocache 4 4682 _001117_hash NULL
82968 +_001118_hash skb_copy_datagram_from_iovec 2-5-4 52014 _001118_hash NULL
82969 +_001121_hash skb_copy_to_page_nocache 6 58624 _001121_hash NULL
82970 +_001122_hash sk_chk_filter 2 42095 _001122_hash NULL
82971 +_001123_hash skcipher_sendmsg 4 30290 _001123_hash NULL
82972 +_001124_hash sl_change_mtu 2 7396 _001124_hash NULL
82973 +_001125_hash slhc_init 1-2 58135 _001125_hash &_000894_hash
82974 +_001127_hash sm501_create_subdev 3-4 48668 _001127_hash NULL
82975 +_001129_hash smk_write_access 3 49561 _001129_hash NULL
82976 +_001130_hash snapshot_write 3 28351 _001130_hash NULL
82977 +_001131_hash snd_ac97_pcm_assign 2 30218 _001131_hash NULL
82978 +_001132_hash snd_card_create 4 64418 _001411_hash NULL nohasharray
82979 +_001133_hash snd_emux_create_port 3 42533 _001133_hash NULL
82980 +_001134_hash snd_gus_dram_write 4 38784 _001134_hash NULL
82981 +_001135_hash snd_midi_channel_alloc_set 1 28153 _001135_hash NULL
82982 +_001136_hash _snd_pcm_lib_alloc_vmalloc_buffer 2 17820 _001136_hash NULL
82983 +_001137_hash snd_pcm_oss_sync1 2 45298 _001137_hash NULL
82984 +_001138_hash snd_pcm_oss_write 3 38108 _001138_hash NULL
82985 +_001139_hash snd_pcm_plugin_build 5 25505 _001139_hash NULL
82986 +_001140_hash snd_rawmidi_kernel_write 3 25106 _001140_hash NULL
82987 +_001141_hash snd_rawmidi_write 3 28008 _001141_hash NULL
82988 +_001142_hash snd_rme32_playback_copy 5 43732 _001142_hash NULL
82989 +_001143_hash snd_rme96_playback_copy 5 13111 _001143_hash NULL
82990 +_001144_hash snd_seq_device_new 4 31753 _001144_hash NULL
82991 +_001145_hash snd_seq_oss_readq_new 2 14283 _001145_hash NULL
82992 +_001146_hash snd_vx_create 4 40948 _001146_hash NULL
82993 +_001147_hash sock_setsockopt 5 50088 _001147_hash NULL
82994 +_001148_hash sound_write 3 5102 _001148_hash NULL
82995 +_001149_hash _sp2d_alloc 1 16944 _001149_hash NULL
82996 +_001150_hash spi_alloc_master 2 45223 _001150_hash NULL
82997 +_001151_hash spidev_message 3 5518 _001151_hash NULL
82998 +_001152_hash spi_register_board_info 2 35651 _001152_hash NULL
82999 +_001153_hash squashfs_cache_init 2 41656 _001153_hash NULL
83000 +_001154_hash squashfs_read_data 6 59440 _001154_hash NULL
83001 +_001155_hash srp_alloc_iu 2 44227 _001155_hash NULL
83002 +_001156_hash srp_iu_pool_alloc 2 17920 _001156_hash NULL
83003 +_001157_hash srp_ring_alloc 2 26760 _001157_hash NULL
83004 +_001159_hash start_isoc_chain 2 565 _001159_hash NULL
83005 +_001160_hash stk_prepare_sio_buffers 2 57168 _001160_hash NULL
83006 +_001161_hash store_iwmct_log_level 4 60209 _001161_hash NULL
83007 +_001162_hash store_iwmct_log_level_fw 4 1974 _001162_hash NULL
83008 +_001163_hash st_write 3 16874 _001163_hash NULL
83009 +_001164_hash svc_pool_map_alloc_arrays 2 47181 _001164_hash NULL
83010 +_001165_hash symtab_init 2 61050 _001165_hash NULL
83011 +_001166_hash sys_bind 3 10799 _001166_hash NULL
83012 +_001167_hash sys_connect 3 15291 _001167_hash NULL
83013 +_001168_hash sys_flistxattr 3 41407 _001168_hash NULL
83014 +_001169_hash sys_fsetxattr 4 49736 _001169_hash NULL
83015 +_001170_hash sysfs_write_file 3 57116 _001170_hash NULL
83016 +_001171_hash sys_ipc 3 4889 _001171_hash NULL
83017 +_001172_hash sys_keyctl 4 33708 _001172_hash &_000974_hash
83018 +_001173_hash sys_listxattr 3 27833 _001173_hash NULL
83019 +_001174_hash sys_llistxattr 3 4532 _001174_hash NULL
83020 +_001175_hash sys_lsetxattr 4 61177 _001175_hash NULL
83021 +_001176_hash sys_mq_timedsend 3 57661 _001176_hash NULL
83022 +_001177_hash sys_sched_setaffinity 2 32046 _001177_hash NULL
83023 +_001178_hash sys_semop 3 39457 _001178_hash NULL
83024 +_001179_hash sys_sendto 6 20809 _001179_hash NULL
83025 +_001180_hash sys_setxattr 4 37880 _001180_hash NULL
83026 +_001181_hash t4_alloc_mem 1 32342 _001181_hash NULL
83027 +_001182_hash tcf_hash_create 4 54360 _001182_hash NULL
83028 +_001183_hash __team_options_register 3 63941 _001183_hash NULL
83029 +_001184_hash test_unaligned_bulk 3 52333 _001184_hash NULL
83030 +_001185_hash tifm_alloc_adapter 1 10903 _001185_hash NULL
83031 +_001186_hash timeout_write 3 50991 _001186_hash NULL
83032 +_001187_hash tipc_link_send_sections_fast 4 37920 _001187_hash NULL
83033 +_001188_hash tipc_subseq_alloc 1 5957 _001188_hash NULL
83034 +_001189_hash tm6000_read_write_usb 7 50774 _001189_hash NULL
83035 +_001190_hash tnode_alloc 1 49407 _001190_hash NULL
83036 +_001191_hash tomoyo_commit_ok 2 20167 _001191_hash NULL
83037 +_001192_hash tomoyo_scan_bprm 2-4 15642 _001192_hash NULL
83038 +_001194_hash tps65910_i2c_write 3 39531 _001194_hash NULL
83039 +_001195_hash ts_write 3 64336 _001195_hash NULL
83040 +_001196_hash ttusb2_msg 4 3100 _001196_hash NULL
83041 +_001197_hash tty_write 3 5494 _001197_hash NULL
83042 +_001198_hash ubi_dbg_check_all_ff 4 59810 _001198_hash NULL
83043 +_001199_hash ubi_dbg_check_write 5 48525 _001199_hash NULL
83044 +_001200_hash ubifs_setxattr 4 59650 _001370_hash NULL nohasharray
83045 +_001201_hash udf_sb_alloc_partition_maps 2 62313 _001201_hash NULL
83046 +_001202_hash udplite_getfrag 3-4 14479 _001202_hash NULL
83047 +_001204_hash ulong_write_file 3 26485 _001204_hash NULL
83048 +_001205_hash unix_dgram_sendmsg 4 45699 _001205_hash NULL
83049 +_001206_hash unix_stream_sendmsg 4 61455 _001206_hash NULL
83050 +_001207_hash unlink_queued 3-4 645 _001207_hash NULL
83051 +_001208_hash update_pmkid 4 2481 _001208_hash NULL
83052 +_001209_hash usb_alloc_coherent 2 65444 _001209_hash NULL
83053 +_001210_hash uvc_alloc_buffers 2 9656 _001210_hash NULL
83054 +_001211_hash uvc_alloc_entity 3 20836 _001211_hash NULL
83055 +_001212_hash v4l2_ctrl_new 7 38725 _001212_hash NULL
83056 +_001213_hash v4l2_event_subscribe 3 19510 _001213_hash NULL
83057 +_001214_hash vb2_read 3 42703 _001214_hash NULL
83058 +_001215_hash vb2_write 3 31948 _001215_hash NULL
83059 +_001216_hash vc_resize 2-3 3585 _001216_hash NULL
83060 +_001218_hash __vhost_add_used_n 3 26554 _001218_hash NULL
83061 +_001219_hash __videobuf_alloc_vb 1 27062 _001219_hash NULL
83062 +_001220_hash videobuf_dma_init_kernel 3 6963 _001220_hash NULL
83063 +_001221_hash virtqueue_add_buf 3-4 59470 _001221_hash NULL
83064 +_001223_hash vmalloc 1 15464 _001223_hash NULL
83065 +_001224_hash vmalloc_to_sg 2 58354 _001224_hash NULL
83066 +_001225_hash vol_cdev_write 3 40915 _001225_hash NULL
83067 +_001226_hash vxge_device_register 4 7752 _001226_hash NULL
83068 +_001227_hash __vxge_hw_channel_allocate 3 55462 _001227_hash NULL
83069 +_001228_hash vzalloc 1 47421 _001228_hash NULL
83070 +_001229_hash vzalloc_node 1 23424 _001229_hash NULL
83071 +_001230_hash wa_nep_queue 2 8858 _001230_hash NULL
83072 +_001231_hash __wa_xfer_setup_segs 2 56725 _001231_hash NULL
83073 +_001232_hash wiphy_new 2 2482 _001232_hash NULL
83074 +_001233_hash wpan_phy_alloc 1 48056 _001233_hash NULL
83075 +_001234_hash wusb_ccm_mac 7 32199 _001234_hash NULL
83076 +_001235_hash x25_sendmsg 4 12487 _001235_hash NULL
83077 +_001236_hash xfrm_hash_alloc 1 10997 _001236_hash NULL
83078 +_001237_hash _xfs_buf_get_pages 2 46811 _001237_hash NULL
83079 +_001238_hash xfs_da_buf_make 1 55845 _001238_hash NULL
83080 +_001239_hash xfs_da_grow_inode_int 3 21785 _001239_hash NULL
83081 +_001240_hash xfs_dir_cilookup_result 3 64288 _001240_hash NULL
83082 +_001241_hash xfs_iext_add_indirect_multi 3 32400 _001241_hash NULL
83083 +_001242_hash xfs_iext_inline_to_direct 2 12384 _001242_hash NULL
83084 +_001243_hash xfs_iroot_realloc 2 46826 _001243_hash NULL
83085 +_001244_hash xhci_alloc_stream_info 3 63902 _001244_hash NULL
83086 +_001245_hash xlog_recover_add_to_trans 4 62839 _001245_hash NULL
83087 +_001246_hash xprt_alloc 2 1475 _001246_hash NULL
83088 +_001247_hash xt_alloc_table_info 1 57903 _001247_hash NULL
83089 +_001248_hash _zd_iowrite32v_async_locked 3 39034 _001248_hash NULL
83090 +_001249_hash zd_usb_iowrite16v 3 49744 _001249_hash NULL
83091 +_001250_hash acpi_ds_build_internal_package_obj 3 58271 _001250_hash NULL
83092 +_001251_hash acpi_system_read_event 3 55362 _001251_hash NULL
83093 +_001252_hash acpi_ut_create_buffer_object 1 42030 _001252_hash NULL
83094 +_001253_hash acpi_ut_create_package_object 1 17594 _001253_hash NULL
83095 +_001254_hash acpi_ut_create_string_object 1 15360 _001254_hash NULL
83096 +_001255_hash ad7879_spi_multi_read 3 8218 _001255_hash NULL
83097 +_001256_hash add_child 4 45201 _001256_hash NULL
83098 +_001257_hash add_port 2 54941 _001257_hash NULL
83099 +_001258_hash adu_read 3 24177 _001258_hash NULL
83100 +_001259_hash afs_cell_create 2 27346 _001259_hash NULL
83101 +_001260_hash agp_generic_alloc_user 1 9470 _001260_hash NULL
83102 +_001261_hash alloc_agpphysmem_i8xx 1 39427 _001261_hash NULL
83103 +_001262_hash allocate_cnodes 1 5329 _001262_hash NULL
83104 +_001263_hash ___alloc_bootmem 1 11410 _001263_hash NULL
83105 +_001264_hash __alloc_bootmem_nopanic 1 65397 _001264_hash NULL
83106 +_001265_hash alloc_bulk_urbs_generic 5 12127 _001265_hash NULL
83107 +_001266_hash alloc_candev 1-2 7776 _001266_hash NULL
83108 +_001268_hash ____alloc_ei_netdev 1 51475 _001268_hash NULL
83109 +_001269_hash alloc_etherdev_mqs 1 36450 _001269_hash NULL
83110 +_001270_hash alloc_extent_buffer 3 52824 _001270_hash NULL
83111 +_001271_hash alloc_fcdev 1 18780 _001271_hash NULL
83112 +_001272_hash alloc_fddidev 1 15382 _001272_hash NULL
83113 +_001273_hash alloc_hippi_dev 1 51320 _001273_hash NULL
83114 +_001274_hash alloc_irdadev 1 19140 _001274_hash NULL
83115 +_001275_hash alloc_ltalkdev 1 38071 _001275_hash NULL
83116 +_001276_hash alloc_one_pg_vec_page 1 10747 _001276_hash NULL
83117 +_001277_hash alloc_orinocodev 1 21371 _001277_hash NULL
83118 +_001279_hash alloc_trdev 1 16399 _001279_hash NULL
83119 +_001280_hash async_setkey 3 35521 _001280_hash NULL
83120 +_001281_hash ata_host_alloc_pinfo 3 17325 _001281_hash NULL
83121 +_001284_hash ath6kl_connect_event 7-9-8 14267 _001284_hash NULL
83122 +_001285_hash ath6kl_fwlog_block_read 3 49836 _001285_hash NULL
83123 +_001286_hash ath6kl_fwlog_read 3 32101 _001286_hash NULL
83124 +_001287_hash ath_rx_init 2 43564 _001287_hash NULL
83125 +_001288_hash ath_tx_init 2 60515 _001288_hash NULL
83126 +_001289_hash atm_get_addr 3 31221 _001289_hash NULL
83127 +_001290_hash av7110_ipack_init 2 46655 _001290_hash NULL
83128 +_001291_hash bdx_rxdb_create 1 46525 _001291_hash NULL
83129 +_001292_hash bdx_tx_db_init 2 41719 _001292_hash NULL
83130 +_001293_hash bio_map_kern 3 64751 _001293_hash NULL
83131 +_001294_hash bits_to_user 3 47733 _001294_hash NULL
83132 +_001295_hash __blk_queue_init_tags 2 9778 _001295_hash NULL
83133 +_001296_hash blk_queue_resize_tags 2 28670 _001296_hash NULL
83134 +_001297_hash blk_rq_map_user_iov 5 16772 _001297_hash NULL
83135 +_001298_hash bm_init 2 13529 _001298_hash NULL
83136 +_001299_hash brcmf_alloc_wdev 1 60347 _001299_hash NULL
83137 +_001300_hash btrfs_insert_dir_item 4 59304 _001300_hash NULL
83138 +_001301_hash btrfs_map_block 3 64379 _001301_hash NULL
83139 +_001302_hash c4_add_card 3 54968 _001302_hash NULL
83140 +_001303_hash cache_read 3 24790 _001303_hash NULL
83141 +_001304_hash cache_write 3 13589 _001304_hash NULL
83142 +_001305_hash calc_hmac 3 32010 _001305_hash NULL
83143 +_001306_hash ccid_getsockopt_builtin_ccids 2 53634 _001306_hash NULL
83144 +_001307_hash ceph_copy_page_vector_to_user 4 31270 _001307_hash NULL
83145 +_001308_hash ceph_read_dir 3 17005 _001308_hash NULL
83146 +_001309_hash cfg80211_roamed 5-7 32632 _001309_hash NULL
83147 +_001311_hash ci_ll_init 3 12930 _001311_hash NULL
83148 +_001312_hash coda_psdev_read 3 35029 _001312_hash NULL
83149 +_001313_hash construct_key_and_link 4 8321 _001313_hash NULL
83150 +_001314_hash copy_counters_to_user 5 17027 _001824_hash NULL nohasharray
83151 +_001315_hash copy_entries_to_user 1 52367 _001315_hash NULL
83152 +_001316_hash copy_from_buf 4 27308 _001316_hash NULL
83153 +_001317_hash copy_oldmem_page 3 26164 _001317_hash NULL
83154 +_001318_hash copy_to_user_fromio 3 57432 _001318_hash NULL
83155 +_001319_hash cryptd_hash_setkey 3 42781 _001319_hash NULL
83156 +_001320_hash crypto_authenc_esn_setkey 3 6985 _001320_hash NULL
83157 +_001321_hash crypto_authenc_setkey 3 80 _001321_hash NULL
83158 +_001322_hash cx18_copy_buf_to_user 4 22735 _001322_hash NULL
83159 +_001324_hash cxgbi_ddp_reserve 4 30091 _001324_hash NULL
83160 +_001325_hash datablob_hmac_append 3 40038 _001325_hash NULL
83161 +_001326_hash datablob_hmac_verify 4 24786 _001326_hash NULL
83162 +_001327_hash dataflash_read_fact_otp 3-2 33204 _001327_hash NULL
83163 +_001328_hash dataflash_read_user_otp 3-2 14536 _001328_hash &_000201_hash
83164 +_001329_hash dccp_feat_register_sp 5 17914 _001329_hash NULL
83165 +_001330_hash ddb_input_read 3 9743 _001330_hash NULL
83166 +_001331_hash dev_read 3 56369 _001331_hash NULL
83167 +_001332_hash diva_os_copy_to_user 4 48508 _001332_hash NULL
83168 +_001333_hash diva_os_malloc 2 16406 _001333_hash NULL
83169 +_001334_hash dlm_dir_lookup 4 56662 _001334_hash NULL
83170 +_001335_hash dm_vcalloc 1-2 16814 _001335_hash NULL
83171 +_001337_hash do_proc_readlink 3 14096 _001337_hash NULL
83172 +_001338_hash do_readlink 2 43518 _001338_hash NULL
83173 +_001339_hash __do_replace 5 37227 _001339_hash NULL
83174 +_001340_hash do_sigpending 2 9766 _001340_hash NULL
83175 +_001341_hash drbd_setsockopt 5 16280 _001341_hash &_000371_hash
83176 +_001342_hash dsp_buffer_alloc 2 11684 _001342_hash NULL
83177 +_001343_hash dump_midi 3 51040 _001343_hash NULL
83178 +_001344_hash dvb_dmxdev_set_buffer_size 2 55643 _001344_hash NULL
83179 +_001345_hash dvb_dvr_set_buffer_size 2 9840 _001345_hash NULL
83180 +_001346_hash dvb_ringbuffer_pkt_read_user 3-5 4303 _001346_hash NULL
83181 +_001348_hash dvb_ringbuffer_read_user 3 56702 _001348_hash NULL
83182 +_001349_hash ecryptfs_filldir 3 6622 _001349_hash NULL
83183 +_001350_hash ecryptfs_readlink 3 40775 _001350_hash NULL
83184 +_001351_hash ecryptfs_send_message 2 18322 _001351_hash NULL
83185 +_001352_hash em28xx_init_isoc 4 62883 _001352_hash &_000721_hash
83186 +_001353_hash et61x251_read 3 25420 _001353_hash NULL
83187 +_001354_hash ext4_add_new_descs 3 19509 _001354_hash NULL
83188 +_001355_hash fat_ioctl_filldir 3 36621 _001355_hash NULL
83189 +_001356_hash fd_copyout 3 59323 _001356_hash NULL
83190 +_001357_hash f_hidg_read 3 6238 _001357_hash NULL
83191 +_001358_hash filldir 3 55137 _001358_hash NULL
83192 +_001359_hash filldir64 3 46469 _001359_hash NULL
83193 +_001360_hash fops_read 3 40672 _001360_hash NULL
83194 +_001361_hash from_buffer 3 18625 _001361_hash NULL
83195 +_001362_hash fsm_init 2 16134 _001362_hash NULL
83196 +_001363_hash get_subdir 3 62581 _001363_hash NULL
83197 +_001364_hash gspca_dev_probe 4 2570 _001364_hash NULL
83198 +_001365_hash handle_received_packet 3 22457 _001365_hash NULL
83199 +_001366_hash hash_setkey 3 48310 _001366_hash NULL
83200 +_001367_hash hdlcdrv_register 2 6792 _001367_hash NULL
83201 +_001368_hash hdpvr_read 3 9273 _001368_hash NULL
83202 +_001369_hash hid_input_report 4 32458 _001369_hash NULL
83203 +_001370_hash hidraw_read 3 59650 _001370_hash &_001200_hash
83204 +_001371_hash HiSax_readstatus 2 15752 _001371_hash NULL
83205 +_001373_hash __hwahc_op_set_gtk 4 42038 _001373_hash NULL
83206 +_001374_hash __hwahc_op_set_ptk 5 36510 _001374_hash NULL
83207 +_001375_hash ib_copy_to_udata 3 27525 _001375_hash NULL
83208 +_001376_hash idetape_chrdev_read 3 2097 _001376_hash NULL
83209 +_001377_hash ieee80211_alloc_hw 1 43829 _001377_hash NULL
83210 +_001378_hash ieee80211_bss_info_update 4 13991 _001378_hash NULL
83211 +_001379_hash ilo_read 3 32531 _001379_hash NULL
83212 +_001380_hash init_map_ipmac 3-4 63896 _001380_hash NULL
83213 +_001382_hash init_tid_tabs 2-4-3 13252 _001382_hash NULL
83214 +_001385_hash iowarrior_read 3 53483 _001385_hash NULL
83215 +_001386_hash ipv6_getsockopt_sticky 5 56711 _001386_hash NULL
83216 +_001387_hash ipwireless_send_packet 4 8328 _001387_hash NULL
83217 +_001388_hash ipx_sendmsg 4 1362 _001388_hash NULL
83218 +_001389_hash iscsi_conn_setup 2 35159 _001389_hash NULL
83219 +_001390_hash iscsi_create_session 3 51647 _001390_hash NULL
83220 +_001391_hash iscsi_host_alloc 2 36671 _001391_hash NULL
83221 +_001392_hash iscsi_session_setup 4-5 196 _001392_hash NULL
83222 +_001394_hash iscsit_find_cmd_from_itt_or_dump 3 17194 _001701_hash NULL nohasharray
83223 +_001395_hash isdn_ppp_read 4 50356 _001395_hash NULL
83224 +_001396_hash isku_sysfs_read 6 58806 _001396_hash NULL
83225 +_001397_hash isku_sysfs_write 6 49767 _001397_hash NULL
83226 +_001398_hash iso_alloc_urb 4-5 45206 _001398_hash NULL
83227 +_001400_hash ivtv_copy_buf_to_user 4 6159 _001400_hash NULL
83228 +_001401_hash iwm_rx_handle 3 24899 _001401_hash NULL
83229 +_001402_hash iwm_wdev_alloc 1 38415 _001402_hash NULL
83230 +_001403_hash jbd2_alloc 1 41359 _001403_hash NULL
83231 +_001404_hash jffs2_do_link 6 42048 _001404_hash NULL
83232 +_001405_hash jffs2_do_unlink 4 62020 _001405_hash NULL
83233 +_001406_hash jffs2_security_setxattr 4 62107 _001406_hash NULL
83234 +_001407_hash jffs2_trusted_setxattr 4 17048 _001407_hash NULL
83235 +_001408_hash jffs2_user_setxattr 4 10182 _001408_hash NULL
83236 +_001409_hash kernel_setsockopt 5 35913 _001409_hash NULL
83237 +_001410_hash keyctl_describe_key 3 36853 _001410_hash NULL
83238 +_001411_hash keyctl_get_security 3 64418 _001411_hash &_001132_hash
83239 +_001412_hash keyring_read 3 13438 _001412_hash NULL
83240 +_001413_hash kfifo_copy_to_user 3 20646 _001413_hash NULL
83241 +_001414_hash kmem_zalloc_large 1 56128 _001414_hash NULL
83242 +_001415_hash kmp_init 2 41373 _001415_hash NULL
83243 +_001416_hash koneplus_sysfs_write 6 35993 _001416_hash NULL
83244 +_001417_hash kvm_clear_guest_page 4 2308 _001417_hash NULL
83245 +_001418_hash kvm_read_nested_guest_page 5 13337 _001418_hash NULL
83246 +_001419_hash l2cap_create_basic_pdu 3 24869 _001419_hash &_001034_hash
83247 +_001420_hash l2cap_create_connless_pdu 3 37327 _001420_hash &_000966_hash
83248 +_001421_hash l2cap_create_iframe_pdu 3 51801 _001421_hash NULL
83249 +_001422_hash __lgwrite 4 57669 _001422_hash NULL
83250 +_001423_hash libfc_host_alloc 2 7917 _001423_hash NULL
83251 +_001424_hash llcp_sock_sendmsg 4 1092 _001424_hash NULL
83252 +_001425_hash macvtap_get_user 4 28185 _001425_hash NULL
83253 +_001426_hash mcam_v4l_read 3 36513 _001426_hash NULL
83254 +_001427_hash mce_async_out 3 58056 _001427_hash NULL
83255 +_001428_hash mce_flush_rx_buffer 2 14976 _001428_hash NULL
83256 +_001429_hash mdc800_device_read 3 22896 _001429_hash NULL
83257 +_001430_hash memcpy_toiovec 3 54166 _001430_hash &_000867_hash
83258 +_001431_hash memcpy_toiovecend 3-4 19736 _001431_hash NULL
83259 +_001433_hash mgt_set_varlen 4 60916 _001433_hash NULL
83260 +_001434_hash mlx4_en_create_rx_ring 3 62498 _001434_hash NULL
83261 +_001435_hash mlx4_en_create_tx_ring 4 48501 _001435_hash NULL
83262 +_001436_hash mon_bin_get_event 4 52863 _001436_hash NULL
83263 +_001437_hash mousedev_read 3 47123 _001437_hash NULL
83264 +_001438_hash move_addr_to_user 2 2868 _001438_hash NULL
83265 +_001439_hash mpihelp_mul 5-3 27805 _001439_hash NULL
83266 +_001441_hash mpi_lshift_limbs 2 9337 _001441_hash NULL
83267 +_001442_hash msnd_fifo_alloc 2 23179 _001442_hash NULL
83268 +_001443_hash mtdswap_init 2 55719 _001443_hash NULL
83269 +_001444_hash neigh_hash_grow 2 17283 _001444_hash NULL
83270 +_001445_hash nfs4_realloc_slot_table 2 22859 _001445_hash NULL
83271 +_001446_hash nfs_idmap_get_key 2 39616 _001446_hash NULL
83272 +_001447_hash nsm_get_handle 4 52089 _001447_hash NULL
83273 +_001448_hash ntfs_malloc_nofs 1 49572 _001448_hash NULL
83274 +_001449_hash ntfs_malloc_nofs_nofail 1 63631 _001449_hash NULL
83275 +_001450_hash nvme_create_queue 3 170 _001450_hash NULL
83276 +_001451_hash ocfs2_control_write 3 54737 _001451_hash NULL
83277 +_001452_hash orinoco_add_extscan_result 3 18207 _001452_hash NULL
83278 +_001454_hash override_release 2 52032 _001454_hash NULL
83279 +_001455_hash packet_snd 3 13634 _001455_hash NULL
83280 +_001456_hash pcbit_stat 2 27364 _001456_hash NULL
83281 +_001457_hash pcpu_extend_area_map 2 12589 _001457_hash NULL
83282 +_001458_hash pg_read 3 17276 _001458_hash NULL
83283 +_001459_hash picolcd_debug_eeprom_read 3 14549 _001459_hash NULL
83284 +_001460_hash pkt_alloc_packet_data 1 37928 _001460_hash NULL
83285 +_001461_hash pmcraid_build_passthrough_ioadls 2 62034 _001461_hash NULL
83286 +_001462_hash pms_capture 4 27142 _001462_hash NULL
83287 +_001463_hash posix_clock_register 2 5662 _001463_hash NULL
83288 +_001464_hash printer_read 3 54851 _001464_hash NULL
83289 +_001465_hash __proc_file_read 3 54978 _001465_hash NULL
83290 +_001466_hash pt_read 3 49136 _001466_hash NULL
83291 +_001467_hash put_cmsg 4 36589 _001467_hash NULL
83292 +_001468_hash pvr2_ioread_read 3 10720 _001505_hash NULL nohasharray
83293 +_001469_hash pwc_video_read 3 51735 _001469_hash NULL
83294 +_001470_hash px_raw_event 4 49371 _001470_hash NULL
83295 +_001471_hash qcam_read 3 13977 _001471_hash NULL
83296 +_001472_hash rawv6_sendmsg 4 20080 _001472_hash NULL
83297 +_001473_hash rds_sendmsg 4 40976 _001473_hash NULL
83298 +_001474_hash read_flush 3 43851 _001474_hash NULL
83299 +_001475_hash read_profile 3 27859 _001475_hash NULL
83300 +_001476_hash read_vmcore 3 26501 _001476_hash NULL
83301 +_001477_hash redirected_tty_write 3 65297 _001477_hash NULL
83302 +_001478_hash __register_chrdev 2-3 54223 _001478_hash NULL
83303 +_001480_hash regmap_raw_write 4 53803 _001480_hash NULL
83304 +_001481_hash reiserfs_allocate_list_bitmaps 3 21732 _001481_hash NULL
83305 +_001482_hash reiserfs_resize 2 34377 _001482_hash NULL
83306 +_001483_hash request_key_auth_read 3 24109 _001483_hash NULL
83307 +_001484_hash rfkill_fop_read 3 54711 _001484_hash NULL
83308 +_001485_hash rng_dev_read 3 41581 _001485_hash NULL
83309 +_001486_hash roccat_read 3 41093 _001486_hash NULL
83310 +_001487_hash sco_sock_sendmsg 4 62542 _001487_hash NULL
83311 +_001488_hash scsi_register 2 49094 _001488_hash NULL
83312 +_001489_hash sctp_getsockopt_events 2 3607 _001489_hash NULL
83313 +_001490_hash sctp_getsockopt_maxburst 2 42941 _001490_hash NULL
83314 +_001491_hash sctp_getsockopt_maxseg 2 10737 _001491_hash NULL
83315 +_001492_hash sctpprobe_read 3 17741 _001492_hash NULL
83316 +_001493_hash sdhci_alloc_host 2 7509 _001493_hash NULL
83317 +_001494_hash selinux_inode_post_setxattr 4 26037 _001494_hash NULL
83318 +_001495_hash selinux_inode_setsecurity 4 18148 _001495_hash NULL
83319 +_001496_hash selinux_inode_setxattr 4 10708 _001496_hash NULL
83320 +_001497_hash selinux_secctx_to_secid 2 63744 _001497_hash NULL
83321 +_001498_hash selinux_setprocattr 4 55611 _001498_hash NULL
83322 +_001499_hash sel_write_context 3 25726 _002397_hash NULL nohasharray
83323 +_001500_hash seq_copy_in_user 3 18543 _001500_hash NULL
83324 +_001501_hash seq_open_net 4 8968 _001594_hash NULL nohasharray
83325 +_001502_hash seq_open_private 3 61589 _001502_hash NULL
83326 +_001503_hash set_arg 3 42824 _001503_hash NULL
83327 +_001504_hash sg_read 3 25799 _001504_hash NULL
83328 +_001505_hash shash_async_setkey 3 10720 _001505_hash &_001468_hash
83329 +_001506_hash shash_compat_setkey 3 12267 _001506_hash NULL
83330 +_001507_hash shmem_setxattr 4 55867 _001507_hash NULL
83331 +_001508_hash simple_read_from_buffer 2-5 55957 _001508_hash NULL
83332 +_001511_hash sm_checker_extend 2 23615 _001511_hash NULL
83333 +_001512_hash sn9c102_read 3 29305 _001512_hash NULL
83334 +_001513_hash snd_es1938_capture_copy 5 25930 _001513_hash NULL
83335 +_001514_hash snd_gus_dram_peek 4 9062 _001514_hash NULL
83336 +_001515_hash snd_hdsp_capture_copy 5 4011 _001515_hash NULL
83337 +_001516_hash snd_korg1212_copy_to 6 92 _001516_hash NULL
83338 +_001517_hash snd_opl4_mem_proc_read 5 63774 _001517_hash NULL
83339 +_001518_hash snd_pcm_alloc_vmalloc_buffer 2 44595 _001518_hash NULL
83340 +_001519_hash snd_pcm_oss_read1 3 63771 _001519_hash NULL
83341 +_001520_hash snd_rawmidi_kernel_read1 4 36740 _001520_hash NULL
83342 +_001521_hash snd_rme9652_capture_copy 5 10287 _001521_hash NULL
83343 +_001522_hash srp_target_alloc 3 37288 _001522_hash NULL
83344 +_001523_hash stk_allocate_buffers 2 16291 _001523_hash NULL
83345 +_001524_hash store_ifalias 4 35088 _001524_hash NULL
83346 +_001525_hash store_msg 3 56417 _001525_hash NULL
83347 +_001526_hash str_to_user 2 11411 _001526_hash NULL
83348 +_001527_hash subbuf_read_actor 3 2071 _001527_hash NULL
83349 +_001528_hash sys_fgetxattr 4 25166 _001528_hash NULL
83350 +_001529_hash sys_gethostname 2 49698 _001529_hash NULL
83351 +_001530_hash sys_getxattr 4 37418 _001530_hash NULL
83352 +_001531_hash sys_kexec_load 2 14222 _001531_hash NULL
83353 +_001532_hash sys_msgsnd 3 44537 _001532_hash &_000129_hash
83354 +_001533_hash sys_process_vm_readv 3-5 19090 _001533_hash NULL
83355 +_001535_hash sys_process_vm_writev 3-5 4928 _001535_hash NULL
83356 +_001537_hash sys_sched_getaffinity 2 60033 _001537_hash NULL
83357 +_001538_hash sys_setsockopt 5 35320 _001538_hash NULL
83358 +_001539_hash t3_init_l2t 1 8261 _001539_hash NULL
83359 +_001540_hash team_options_register 3 20091 _001540_hash NULL
83360 +_001541_hash tipc_send2name 6 16809 _001541_hash NULL
83361 +_001542_hash tipc_send2port 5 63935 _001542_hash NULL
83362 +_001543_hash tipc_send 4 51238 _001543_hash NULL
83363 +_001544_hash tm6000_i2c_recv_regs16 5 2949 _001544_hash NULL
83364 +_001545_hash tm6000_i2c_recv_regs 5 46215 _001545_hash NULL
83365 +_001546_hash tm6000_i2c_send_regs 5 20250 _001546_hash NULL
83366 +_001547_hash tnode_new 3 44757 _001547_hash NULL
83367 +_001548_hash tomoyo_read_self 3 33539 _001548_hash NULL
83368 +_001549_hash tomoyo_update_domain 2 5498 _001549_hash NULL
83369 +_001550_hash tomoyo_update_policy 2 40458 _001550_hash NULL
83370 +_001551_hash tpm_read 3 50344 _001551_hash NULL
83371 +_001552_hash TSS_rawhmac 3 17486 _001552_hash NULL
83372 +_001553_hash tt3650_ci_msg 4 57219 _001553_hash NULL
83373 +_001554_hash tun_get_user 3 33178 _001554_hash NULL
83374 +_001555_hash ubi_dbg_dump_flash 4 3870 _001555_hash NULL
83375 +_001556_hash ubi_io_write 4-5 15870 _001556_hash &_000954_hash
83376 +_001558_hash uio_read 3 49300 _001558_hash NULL
83377 +_001559_hash unix_seqpacket_sendmsg 4 27893 _001559_hash NULL
83378 +_001560_hash unlink1 3 63059 _001560_hash NULL
83379 +_001562_hash usb_allocate_stream_buffers 3 8964 _001562_hash NULL
83380 +_001563_hash usbdev_read 3 45114 _001563_hash NULL
83381 +_001564_hash usblp_read 3 57342 _001564_hash NULL
83382 +_001565_hash usbtmc_read 3 32377 _001565_hash NULL
83383 +_001566_hash usbvision_v4l2_read 3 34386 _001566_hash NULL
83384 +_001567_hash _usb_writeN_sync 4 31682 _001567_hash NULL
83385 +_001568_hash user_read 3 51881 _001568_hash NULL
83386 +_001569_hash v4l_stk_read 3 39672 _001569_hash NULL
83387 +_001570_hash vcs_read 3 8017 _001570_hash NULL
83388 +_001571_hash vdma_mem_alloc 1 6171 _001571_hash NULL
83389 +_001572_hash venus_create 4 20555 _001572_hash NULL
83390 +_001573_hash venus_link 5 32165 _001573_hash NULL
83391 +_001574_hash venus_lookup 4 8121 _001574_hash NULL
83392 +_001575_hash venus_mkdir 4 8967 _001575_hash NULL
83393 +_001576_hash venus_remove 4 59781 _001576_hash NULL
83394 +_001577_hash venus_rename 4-5 17707 _001577_hash NULL
83395 +_001579_hash venus_rmdir 4 45564 _001579_hash NULL
83396 +_001580_hash venus_symlink 4-6 23570 _001580_hash NULL
83397 +_001582_hash vfs_readlink 3 54368 _001582_hash NULL
83398 +_001583_hash vfs_readv 3 38011 _001583_hash NULL
83399 +_001584_hash vfs_writev 3 25278 _001584_hash NULL
83400 +_001585_hash vga_arb_read 3 4886 _001585_hash NULL
83401 +_001586_hash vhci_put_user 4 12604 _001586_hash NULL
83402 +_001587_hash vhost_add_used_n 3 10760 _001587_hash NULL
83403 +_001588_hash __videobuf_copy_to_user 4 15423 _001588_hash NULL
83404 +_001589_hash videobuf_pages_to_sg 2 3708 _001589_hash NULL
83405 +_001590_hash videobuf_vmalloc_to_sg 2 4548 _001590_hash NULL
83406 +_001591_hash virtnet_send_command 5-6 61993 _001591_hash NULL
83407 +_001593_hash vmbus_establish_gpadl 3 4495 _001593_hash NULL
83408 +_001594_hash vol_cdev_read 3 8968 _001594_hash &_001501_hash
83409 +_001595_hash w9966_v4l_read 3 31148 _001595_hash NULL
83410 +_001596_hash wdm_read 3 6549 _001596_hash NULL
83411 +_001597_hash wusb_prf 7 54261 _001597_hash &_000063_hash
83412 +_001598_hash xdi_copy_to_user 4 48900 _001598_hash NULL
83413 +_001599_hash xfs_buf_get_uncached 2 51477 _001599_hash NULL
83414 +_001600_hash xfs_efd_init 3 5463 _001600_hash NULL
83415 +_001601_hash xfs_efi_init 2 5476 _001601_hash NULL
83416 +_001602_hash xfs_iext_realloc_direct 2 20521 _001602_hash NULL
83417 +_001603_hash xfs_iext_realloc_indirect 2 59211 _001603_hash NULL
83418 +_001604_hash xfs_inumbers_fmt 3 12817 _001604_hash NULL
83419 +_001605_hash xlog_recover_add_to_cont_trans 4 44102 _001605_hash NULL
83420 +_001606_hash xz_dec_lzma2_create 2 36353 _001606_hash NULL
83421 +_001607_hash _zd_iowrite32v_locked 3 44725 _001607_hash NULL
83422 +_001608_hash aat2870_reg_read_file 3 12221 _001608_hash NULL
83423 +_001609_hash add_sctp_bind_addr 3 12269 _001609_hash NULL
83424 +_001610_hash aes_decrypt_fail_read 3 54815 _001610_hash NULL
83425 +_001611_hash aes_decrypt_interrupt_read 3 19910 _001611_hash NULL
83426 +_001612_hash aes_decrypt_packets_read 3 10155 _001612_hash NULL
83427 +_001613_hash aes_encrypt_fail_read 3 32562 _001613_hash NULL
83428 +_001614_hash aes_encrypt_interrupt_read 3 39919 _001614_hash NULL
83429 +_001615_hash aes_encrypt_packets_read 3 48666 _001615_hash NULL
83430 +_001616_hash afs_cell_lookup 2 8482 _001616_hash NULL
83431 +_001617_hash agp_allocate_memory 2 58761 _001617_hash NULL
83432 +_001618_hash __alloc_bootmem 1 31498 _001618_hash NULL
83433 +_001619_hash __alloc_bootmem_low 1 43423 _001619_hash NULL
83434 +_001620_hash __alloc_bootmem_node_nopanic 2 6432 _001620_hash NULL
83435 +_001621_hash alloc_cc770dev 1 48186 _001621_hash NULL
83436 +_001622_hash __alloc_ei_netdev 1 29338 _001622_hash NULL
83437 +_001623_hash __alloc_eip_netdev 1 51549 _001623_hash NULL
83438 +_001624_hash alloc_libipw 1 22708 _001624_hash NULL
83439 +_001625_hash alloc_pg_vec 2 8533 _001625_hash NULL
83440 +_001626_hash alloc_sja1000dev 1 17868 _001626_hash NULL
83441 +_001627_hash alloc_targets 2 8074 _001627_hash NULL
83442 +_001630_hash ath6kl_disconnect_timeout_read 3 3650 _001630_hash NULL
83443 +_001631_hash ath6kl_endpoint_stats_read 3 41554 _001631_hash NULL
83444 +_001632_hash ath6kl_fwlog_mask_read 3 2050 _001632_hash NULL
83445 +_001633_hash ath6kl_keepalive_read 3 44303 _001633_hash NULL
83446 +_001634_hash ath6kl_listen_int_read 3 10355 _001634_hash NULL
83447 +_001635_hash ath6kl_lrssi_roam_read 3 61022 _001635_hash NULL
83448 +_001636_hash ath6kl_regdump_read 3 14393 _001636_hash NULL
83449 +_001637_hash ath6kl_regread_read 3 25884 _001637_hash NULL
83450 +_001638_hash ath6kl_regwrite_read 3 48747 _001638_hash NULL
83451 +_001639_hash ath6kl_roam_table_read 3 26166 _001639_hash NULL
83452 +_001640_hash ath9k_debugfs_read_buf 3 25316 _001640_hash NULL
83453 +_001641_hash atk_debugfs_ggrp_read 3 29522 _001641_hash NULL
83454 +_001642_hash b43_debugfs_read 3 24425 _001642_hash NULL
83455 +_001643_hash b43legacy_debugfs_read 3 2473 _001643_hash NULL
83456 +_001644_hash bcm_recvmsg 4 43992 _001644_hash NULL
83457 +_001645_hash bfad_debugfs_read 3 13119 _001645_hash NULL
83458 +_001646_hash bfad_debugfs_read_regrd 3 57830 _001646_hash NULL
83459 +_001647_hash blk_init_tags 1 30592 _001647_hash NULL
83460 +_001648_hash blk_queue_init_tags 2 44355 _001648_hash NULL
83461 +_001649_hash blk_rq_map_kern 4 47004 _001649_hash NULL
83462 +_001650_hash bm_entry_read 3 10976 _001650_hash NULL
83463 +_001651_hash bm_status_read 3 19583 _001651_hash NULL
83464 +_001652_hash bnad_debugfs_read 3 50665 _001652_hash NULL
83465 +_001653_hash bnad_debugfs_read_regrd 3 51308 _001653_hash NULL
83466 +_001654_hash btmrvl_curpsmode_read 3 46939 _001654_hash NULL
83467 +_001655_hash btmrvl_gpiogap_read 3 4718 _001655_hash NULL
83468 +_001656_hash btmrvl_hscfgcmd_read 3 56303 _001656_hash NULL
83469 +_001657_hash btmrvl_hscmd_read 3 1614 _001657_hash NULL
83470 +_001658_hash btmrvl_hsmode_read 3 1647 _001658_hash NULL
83471 +_001659_hash btmrvl_hsstate_read 3 920 _001659_hash NULL
83472 +_001660_hash btmrvl_pscmd_read 3 24308 _001660_hash NULL
83473 +_001661_hash btmrvl_psmode_read 3 22395 _001661_hash NULL
83474 +_001662_hash btmrvl_psstate_read 3 50683 _001662_hash NULL
83475 +_001663_hash btmrvl_txdnldready_read 3 413 _001663_hash NULL
83476 +_001664_hash btrfs_add_link 5 9973 _001664_hash NULL
83477 +_001665_hash btrfs_discard_extent 2 38547 _001665_hash NULL
83478 +_001666_hash btrfs_find_create_tree_block 3 55812 _001666_hash NULL
83479 +_001667_hash btrfsic_map_block 2 56751 _001667_hash NULL
83480 +_001668_hash caif_stream_recvmsg 4 13173 _001668_hash NULL
83481 +_001669_hash carl9170_alloc 1 27 _001669_hash NULL
83482 +_001670_hash carl9170_debugfs_read 3 47738 _001670_hash NULL
83483 +_001671_hash cgroup_read_s64 5 19570 _001671_hash NULL
83484 +_001672_hash cgroup_read_u64 5 45532 _001672_hash NULL
83485 +_001673_hash channel_type_read 3 47308 _001673_hash NULL
83486 +_001674_hash codec_list_read_file 3 24910 _001674_hash NULL
83487 +_001675_hash configfs_read_file 3 1683 _001675_hash NULL
83488 +_001676_hash cpuset_common_file_read 5 8800 _001676_hash NULL
83489 +_001677_hash create_subvol 4 2347 _001677_hash NULL
83490 +_001678_hash cx18_copy_mdl_to_user 4 45549 _001678_hash NULL
83491 +_001679_hash dai_list_read_file 3 25421 _001679_hash NULL
83492 +_001680_hash dapm_bias_read_file 3 64715 _001680_hash NULL
83493 +_001681_hash dapm_widget_power_read_file 3 59950 _001754_hash NULL nohasharray
83494 +_001684_hash dbgfs_frame 3 45917 _001684_hash NULL
83495 +_001685_hash dbgfs_state 3 38894 _001685_hash NULL
83496 +_001686_hash debugfs_read 3 62535 _001686_hash NULL
83497 +_001687_hash debug_output 3 18575 _001687_hash NULL
83498 +_001688_hash debug_read 3 19322 _001688_hash NULL
83499 +_001689_hash dfs_file_read 3 18116 _001689_hash NULL
83500 +_001690_hash dma_memcpy_pg_to_iovec 6 1725 _001690_hash NULL
83501 +_001691_hash dma_memcpy_to_iovec 5 12173 _001691_hash NULL
83502 +_001692_hash dma_rx_errors_read 3 52045 _001692_hash NULL
83503 +_001693_hash dma_rx_requested_read 3 65354 _001693_hash NULL
83504 +_001694_hash dma_show_regs 3 35266 _001694_hash NULL
83505 +_001695_hash dma_tx_errors_read 3 46060 _001695_hash NULL
83506 +_001696_hash dma_tx_requested_read 3 16110 _001775_hash NULL nohasharray
83507 +_001697_hash dm_exception_table_init 2 39645 _001697_hash &_001103_hash
83508 +_001698_hash dn_recvmsg 4 17213 _001698_hash NULL
83509 +_001699_hash dns_resolver_read 3 54658 _001699_hash NULL
83510 +_001700_hash do_msgrcv 4 5590 _001700_hash NULL
83511 +_001701_hash driver_state_read 3 17194 _001701_hash &_001394_hash
83512 +_001702_hash dvb_demux_do_ioctl 3 34871 _001702_hash NULL
83513 +_001703_hash dvb_dmxdev_buffer_read 4 20682 _001703_hash NULL
83514 +_001704_hash dvb_dvr_do_ioctl 3 43355 _001704_hash NULL
83515 +_001705_hash econet_recvmsg 4 40978 _001705_hash NULL
83516 +_001706_hash event_calibration_read 3 21083 _001706_hash NULL
83517 +_001707_hash event_heart_beat_read 3 48961 _001707_hash NULL
83518 +_001708_hash event_oom_late_read 3 61175 _001708_hash &_001014_hash
83519 +_001709_hash event_phy_transmit_error_read 3 10471 _001709_hash NULL
83520 +_001710_hash event_rx_mem_empty_read 3 40363 _001710_hash NULL
83521 +_001711_hash event_rx_mismatch_read 3 38518 _001711_hash NULL
83522 +_001712_hash event_rx_pool_read 3 25792 _001712_hash NULL
83523 +_001713_hash event_tx_stuck_read 3 19305 _001713_hash NULL
83524 +_001714_hash excessive_retries_read 3 60425 _001714_hash NULL
83525 +_001715_hash fallback_on_nodma_alloc 2 35332 _001715_hash NULL
83526 +_001716_hash filter_read 3 61692 _001716_hash NULL
83527 +_001717_hash format_devstat_counter 3 32550 _001717_hash NULL
83528 +_001718_hash fragmentation_threshold_read 3 61718 _001718_hash NULL
83529 +_001719_hash fuse_conn_limit_read 3 20084 _001719_hash NULL
83530 +_001720_hash fuse_conn_waiting_read 3 49762 _001720_hash NULL
83531 +_001721_hash generic_readlink 3 32654 _001721_hash NULL
83532 +_001722_hash gpio_power_read 3 36059 _001722_hash NULL
83533 +_001723_hash hash_recvmsg 4 50924 _001723_hash NULL
83534 +_001724_hash ht40allow_map_read 3 55209 _001724_hash NULL
83535 +_001725_hash hwflags_read 3 52318 _001725_hash NULL
83536 +_001726_hash hysdn_conf_read 3 42324 _001726_hash NULL
83537 +_001727_hash i2400m_rx_stats_read 3 57706 _001727_hash NULL
83538 +_001728_hash i2400m_tx_stats_read 3 28527 _001728_hash NULL
83539 +_001729_hash idmouse_read 3 63374 _001729_hash NULL
83540 +_001730_hash ieee80211_if_read 3 6785 _001730_hash NULL
83541 +_001731_hash ieee80211_rx_bss_info 3 61630 _001731_hash NULL
83542 +_001732_hash ikconfig_read_current 3 1658 _001732_hash NULL
83543 +_001733_hash il3945_sta_dbgfs_stats_table_read 3 48802 _001733_hash NULL
83544 +_001734_hash il3945_ucode_general_stats_read 3 46111 _001734_hash NULL
83545 +_001735_hash il3945_ucode_rx_stats_read 3 3048 _001735_hash NULL
83546 +_001736_hash il3945_ucode_tx_stats_read 3 36016 _001736_hash NULL
83547 +_001737_hash il4965_rs_sta_dbgfs_rate_scale_data_read 3 37792 _001737_hash NULL
83548 +_001738_hash il4965_rs_sta_dbgfs_scale_table_read 3 38564 _001738_hash NULL
83549 +_001739_hash il4965_rs_sta_dbgfs_stats_table_read 3 49206 _001739_hash NULL
83550 +_001740_hash il4965_ucode_general_stats_read 3 56277 _001740_hash NULL
83551 +_001741_hash il4965_ucode_rx_stats_read 3 61948 _001741_hash NULL
83552 +_001742_hash il4965_ucode_tx_stats_read 3 12064 _001742_hash NULL
83553 +_001743_hash il_dbgfs_chain_noise_read 3 38044 _001743_hash NULL
83554 +_001744_hash il_dbgfs_channels_read 3 25005 _001744_hash NULL
83555 +_001745_hash il_dbgfs_disable_ht40_read 3 42386 _001745_hash NULL
83556 +_001746_hash il_dbgfs_fh_reg_read 3 40993 _001746_hash NULL
83557 +_001747_hash il_dbgfs_force_reset_read 3 57517 _001747_hash NULL
83558 +_001748_hash il_dbgfs_interrupt_read 3 3351 _001748_hash NULL
83559 +_001749_hash il_dbgfs_missed_beacon_read 3 59956 _001749_hash NULL
83560 +_001750_hash il_dbgfs_nvm_read 3 12288 _001750_hash NULL
83561 +_001751_hash il_dbgfs_power_save_status_read 3 43165 _001751_hash NULL
83562 +_001752_hash il_dbgfs_qos_read 3 33615 _001752_hash NULL
83563 +_001753_hash il_dbgfs_rxon_filter_flags_read 3 19281 _001753_hash NULL
83564 +_001754_hash il_dbgfs_rxon_flags_read 3 59950 _001754_hash &_001681_hash
83565 +_001755_hash il_dbgfs_rx_queue_read 3 11221 _001755_hash NULL
83566 +_001756_hash il_dbgfs_rx_stats_read 3 15243 _001756_hash NULL
83567 +_001757_hash il_dbgfs_sensitivity_read 3 2370 _001757_hash NULL
83568 +_001758_hash il_dbgfs_sram_read 3 62296 _001758_hash NULL
83569 +_001759_hash il_dbgfs_stations_read 3 21532 _001759_hash NULL
83570 +_001760_hash il_dbgfs_status_read 3 58388 _001760_hash NULL
83571 +_001761_hash il_dbgfs_tx_queue_read 3 55668 _001761_hash NULL
83572 +_001762_hash il_dbgfs_tx_stats_read 3 32913 _001762_hash NULL
83573 +_001763_hash ima_show_htable_value 2 57136 _001763_hash NULL
83574 +_001765_hash ipw_write 3 59807 _001765_hash NULL
83575 +_001766_hash irda_recvmsg_stream 4 35280 _001766_hash NULL
83576 +_001767_hash iscsi_tcp_conn_setup 2 16376 _001767_hash NULL
83577 +_001768_hash isr_cmd_cmplt_read 3 53439 _001768_hash NULL
83578 +_001769_hash isr_commands_read 3 41398 _001769_hash NULL
83579 +_001770_hash isr_decrypt_done_read 3 49490 _001770_hash NULL
83580 +_001771_hash isr_dma0_done_read 3 8574 _001771_hash NULL
83581 +_001772_hash isr_dma1_done_read 3 48159 _001772_hash NULL
83582 +_001773_hash isr_fiqs_read 3 34687 _001773_hash NULL
83583 +_001774_hash isr_host_acknowledges_read 3 54136 _001774_hash NULL
83584 +_001775_hash isr_hw_pm_mode_changes_read 3 16110 _001775_hash &_001696_hash
83585 +_001776_hash isr_irqs_read 3 9181 _001776_hash NULL
83586 +_001777_hash isr_low_rssi_read 3 64789 _001777_hash NULL
83587 +_001778_hash isr_pci_pm_read 3 30271 _001778_hash NULL
83588 +_001779_hash isr_rx_headers_read 3 38325 _001779_hash NULL
83589 +_001780_hash isr_rx_mem_overflow_read 3 43025 _001780_hash NULL
83590 +_001781_hash isr_rx_procs_read 3 31804 _001781_hash NULL
83591 +_001782_hash isr_rx_rdys_read 3 35283 _001782_hash NULL
83592 +_001783_hash isr_tx_exch_complete_read 3 16103 _001783_hash NULL
83593 +_001784_hash isr_tx_procs_read 3 23084 _001784_hash NULL
83594 +_001785_hash isr_wakeups_read 3 49607 _001785_hash NULL
83595 +_001786_hash ivtv_read 3 57796 _001786_hash NULL
83596 +_001787_hash iwl_dbgfs_bt_traffic_read 3 35534 _001787_hash NULL
83597 +_001788_hash iwl_dbgfs_chain_noise_read 3 46355 _001788_hash NULL
83598 +_001789_hash iwl_dbgfs_channels_read 3 6784 _001789_hash NULL
83599 +_001790_hash iwl_dbgfs_current_sleep_command_read 3 2081 _001790_hash NULL
83600 +_001791_hash iwl_dbgfs_disable_ht40_read 3 35761 _001791_hash NULL
83601 +_001792_hash iwl_dbgfs_fh_reg_read 3 879 _001792_hash &_000393_hash
83602 +_001793_hash iwl_dbgfs_force_reset_read 3 62628 _001793_hash NULL
83603 +_001794_hash iwl_dbgfs_interrupt_read 3 23574 _001794_hash NULL
83604 +_001795_hash iwl_dbgfs_log_event_read 3 2107 _001795_hash NULL
83605 +_001796_hash iwl_dbgfs_missed_beacon_read 3 50584 _001796_hash NULL
83606 +_001797_hash iwl_dbgfs_nvm_read 3 23845 _001797_hash NULL
83607 +_001798_hash iwl_dbgfs_plcp_delta_read 3 55407 _001798_hash NULL
83608 +_001799_hash iwl_dbgfs_power_save_status_read 3 54392 _001799_hash NULL
83609 +_001800_hash iwl_dbgfs_protection_mode_read 3 13943 _001800_hash NULL
83610 +_001801_hash iwl_dbgfs_qos_read 3 11753 _001801_hash NULL
83611 +_001802_hash iwl_dbgfs_reply_tx_error_read 3 19205 _001802_hash NULL
83612 +_001803_hash iwl_dbgfs_rx_handlers_read 3 18708 _001803_hash NULL
83613 +_001804_hash iwl_dbgfs_rxon_filter_flags_read 3 28832 _001804_hash NULL
83614 +_001805_hash iwl_dbgfs_rxon_flags_read 3 20795 _001805_hash NULL
83615 +_001806_hash iwl_dbgfs_rx_queue_read 3 19943 _001806_hash NULL
83616 +_001807_hash iwl_dbgfs_rx_statistics_read 3 62687 _001807_hash &_000425_hash
83617 +_001808_hash iwl_dbgfs_sensitivity_read 3 63116 _001808_hash NULL
83618 +_001809_hash iwl_dbgfs_sleep_level_override_read 3 3038 _001809_hash NULL
83619 +_001810_hash iwl_dbgfs_sram_read 3 44505 _001810_hash NULL
83620 +_001811_hash iwl_dbgfs_stations_read 3 9309 _001811_hash NULL
83621 +_001812_hash iwl_dbgfs_status_read 3 5171 _001812_hash NULL
83622 +_001813_hash iwl_dbgfs_temperature_read 3 29224 _001813_hash NULL
83623 +_001814_hash iwl_dbgfs_thermal_throttling_read 3 38779 _001814_hash NULL
83624 +_001815_hash iwl_dbgfs_traffic_log_read 3 58870 _001815_hash NULL
83625 +_001816_hash iwl_dbgfs_tx_queue_read 3 4635 _001816_hash NULL
83626 +_001817_hash iwl_dbgfs_tx_statistics_read 3 314 _001817_hash NULL
83627 +_001818_hash iwl_dbgfs_ucode_bt_stats_read 3 42820 _001818_hash NULL
83628 +_001819_hash iwl_dbgfs_ucode_general_stats_read 3 49199 _001819_hash NULL
83629 +_001820_hash iwl_dbgfs_ucode_rx_stats_read 3 58023 _001820_hash NULL
83630 +_001821_hash iwl_dbgfs_ucode_tracing_read 3 47983 _001821_hash &_000349_hash
83631 +_001822_hash iwl_dbgfs_ucode_tx_stats_read 3 31611 _001822_hash NULL
83632 +_001823_hash iwl_dbgfs_wowlan_sram_read 3 540 _001823_hash NULL
83633 +_001824_hash iwm_if_alloc 1 17027 _001824_hash &_001314_hash
83634 +_001825_hash kernel_readv 3 35617 _001825_hash NULL
83635 +_001826_hash key_algorithm_read 3 57946 _001826_hash NULL
83636 +_001827_hash key_icverrors_read 3 20895 _001827_hash NULL
83637 +_001828_hash key_key_read 3 3241 _001828_hash NULL
83638 +_001829_hash key_replays_read 3 62746 _001829_hash NULL
83639 +_001830_hash key_rx_spec_read 3 12736 _001830_hash NULL
83640 +_001831_hash key_tx_spec_read 3 4862 _001831_hash NULL
83641 +_001832_hash __kfifo_to_user 3 36555 _002199_hash NULL nohasharray
83642 +_001833_hash __kfifo_to_user_r 3 39123 _001833_hash NULL
83643 +_001834_hash kmem_zalloc_greedy 2-3 65268 _001834_hash NULL
83644 +_001836_hash l2cap_chan_send 3 49995 _001836_hash NULL
83645 +_001837_hash l2cap_sar_segment_sdu 3 27701 _001837_hash NULL
83646 +_001838_hash lbs_debugfs_read 3 30721 _001838_hash NULL
83647 +_001839_hash lbs_dev_info 3 51023 _001839_hash NULL
83648 +_001840_hash lbs_host_sleep_read 3 31013 _001840_hash NULL
83649 +_001841_hash lbs_rdbbp_read 3 45805 _001841_hash NULL
83650 +_001842_hash lbs_rdmac_read 3 418 _001842_hash NULL
83651 +_001843_hash lbs_rdrf_read 3 41431 _001843_hash NULL
83652 +_001844_hash lbs_sleepparams_read 3 10840 _001844_hash NULL
83653 +_001845_hash lbs_threshold_read 5 21046 _001845_hash NULL
83654 +_001846_hash libfc_vport_create 2 4415 _001846_hash NULL
83655 +_001847_hash lkdtm_debugfs_read 3 45752 _001847_hash NULL
83656 +_001848_hash llcp_sock_recvmsg 4 13556 _001848_hash NULL
83657 +_001849_hash long_retry_limit_read 3 59766 _001849_hash NULL
83658 +_001850_hash lpfc_debugfs_dif_err_read 3 36303 _001850_hash NULL
83659 +_001851_hash lpfc_debugfs_read 3 16566 _001851_hash NULL
83660 +_001852_hash lpfc_idiag_baracc_read 3 58466 _002447_hash NULL nohasharray
83661 +_001853_hash lpfc_idiag_ctlacc_read 3 33943 _001853_hash NULL
83662 +_001854_hash lpfc_idiag_drbacc_read 3 15948 _001854_hash NULL
83663 +_001855_hash lpfc_idiag_extacc_read 3 48301 _001855_hash NULL
83664 +_001856_hash lpfc_idiag_mbxacc_read 3 28061 _001856_hash NULL
83665 +_001857_hash lpfc_idiag_pcicfg_read 3 50334 _001857_hash NULL
83666 +_001858_hash lpfc_idiag_queacc_read 3 13950 _001858_hash NULL
83667 +_001859_hash lpfc_idiag_queinfo_read 3 55662 _001859_hash NULL
83668 +_001860_hash mac80211_format_buffer 2 41010 _001860_hash NULL
83669 +_001861_hash macvtap_put_user 4 55609 _001861_hash NULL
83670 +_001862_hash macvtap_sendmsg 4 30629 _001862_hash NULL
83671 +_001863_hash mic_calc_failure_read 3 59700 _001863_hash NULL
83672 +_001864_hash mic_rx_pkts_read 3 27972 _001864_hash NULL
83673 +_001865_hash minstrel_stats_read 3 17290 _001865_hash NULL
83674 +_001866_hash mmc_ext_csd_read 3 13205 _001866_hash NULL
83675 +_001867_hash mon_bin_read 3 6841 _001867_hash NULL
83676 +_001868_hash mon_stat_read 3 25238 _001868_hash NULL
83677 +_001870_hash mqueue_read_file 3 6228 _001870_hash NULL
83678 +_001871_hash mwifiex_debug_read 3 53074 _001871_hash NULL
83679 +_001872_hash mwifiex_getlog_read 3 54269 _001872_hash NULL
83680 +_001873_hash mwifiex_info_read 3 53447 _001873_hash NULL
83681 +_001874_hash mwifiex_rdeeprom_read 3 51429 _001874_hash NULL
83682 +_001875_hash mwifiex_regrdwr_read 3 34472 _001875_hash NULL
83683 +_001876_hash nfsd_vfs_read 6 62605 _001876_hash NULL
83684 +_001877_hash nfsd_vfs_write 6 54577 _001877_hash NULL
83685 +_001878_hash nfs_idmap_lookup_id 2 10660 _001878_hash NULL
83686 +_001879_hash o2hb_debug_read 3 37851 _001879_hash NULL
83687 +_001880_hash o2net_debug_read 3 52105 _001880_hash NULL
83688 +_001881_hash ocfs2_control_read 3 56405 _001881_hash NULL
83689 +_001882_hash ocfs2_debug_read 3 14507 _001882_hash NULL
83690 +_001883_hash ocfs2_readlink 3 50656 _001883_hash NULL
83691 +_001884_hash oom_adjust_read 3 25127 _001884_hash NULL
83692 +_001885_hash oom_score_adj_read 3 39921 _002116_hash NULL nohasharray
83693 +_001886_hash oprofilefs_str_to_user 3 42182 _001886_hash NULL
83694 +_001887_hash oprofilefs_ulong_to_user 3 11582 _001887_hash NULL
83695 +_001888_hash _osd_req_list_objects 6 4204 _001888_hash NULL
83696 +_001889_hash osd_req_read_kern 5 59990 _001889_hash NULL
83697 +_001890_hash osd_req_write_kern 5 53486 _001890_hash NULL
83698 +_001891_hash p54_init_common 1 23850 _001891_hash NULL
83699 +_001892_hash packet_sendmsg 4 24954 _001892_hash NULL
83700 +_001893_hash page_readlink 3 23346 _001893_hash NULL
83701 +_001894_hash pcf50633_write_block 3 2124 _001894_hash NULL
83702 +_001895_hash platform_list_read_file 3 34734 _001895_hash NULL
83703 +_001896_hash pm860x_bulk_write 3 43875 _001896_hash NULL
83704 +_001897_hash pm_qos_power_read 3 55891 _001897_hash NULL
83705 +_001898_hash pms_read 3 53873 _001898_hash NULL
83706 +_001899_hash port_show_regs 3 5904 _001899_hash NULL
83707 +_001900_hash proc_coredump_filter_read 3 39153 _001900_hash NULL
83708 +_001901_hash proc_fdinfo_read 3 62043 _001901_hash NULL
83709 +_001902_hash proc_info_read 3 63344 _001902_hash NULL
83710 +_001903_hash proc_loginuid_read 3 15631 _001903_hash NULL
83711 +_001904_hash proc_pid_attr_read 3 10173 _001904_hash NULL
83712 +_001905_hash proc_pid_readlink 3 52186 _001905_hash NULL
83713 +_001906_hash proc_read 3 43614 _001906_hash NULL
83714 +_001907_hash proc_self_readlink 3 38094 _001907_hash NULL
83715 +_001908_hash proc_sessionid_read 3 6911 _002038_hash NULL nohasharray
83716 +_001909_hash provide_user_output 3 41105 _001909_hash NULL
83717 +_001910_hash ps_pspoll_max_apturn_read 3 6699 _001910_hash NULL
83718 +_001911_hash ps_pspoll_timeouts_read 3 11776 _001911_hash NULL
83719 +_001912_hash ps_pspoll_utilization_read 3 5361 _001912_hash NULL
83720 +_001913_hash pstore_file_read 3 57288 _001913_hash NULL
83721 +_001914_hash ps_upsd_max_apturn_read 3 19918 _001914_hash NULL
83722 +_001915_hash ps_upsd_max_sptime_read 3 63362 _001915_hash NULL
83723 +_001916_hash ps_upsd_timeouts_read 3 28924 _001916_hash NULL
83724 +_001917_hash ps_upsd_utilization_read 3 51669 _001917_hash NULL
83725 +_001918_hash pvr2_v4l2_read 3 18006 _001918_hash NULL
83726 +_001919_hash pwr_disable_ps_read 3 13176 _001919_hash NULL
83727 +_001920_hash pwr_elp_enter_read 3 5324 _001920_hash NULL
83728 +_001921_hash pwr_enable_ps_read 3 17686 _001921_hash NULL
83729 +_001922_hash pwr_fix_tsf_ps_read 3 26627 _001922_hash NULL
83730 +_001923_hash pwr_missing_bcns_read 3 25824 _001923_hash NULL
83731 +_001924_hash pwr_power_save_off_read 3 18355 _001924_hash NULL
83732 +_001925_hash pwr_ps_enter_read 3 26935 _001925_hash &_000501_hash
83733 +_001926_hash pwr_rcvd_awake_beacons_read 3 50505 _001926_hash NULL
83734 +_001927_hash pwr_rcvd_beacons_read 3 52836 _001927_hash NULL
83735 +_001928_hash pwr_tx_without_ps_read 3 48423 _001928_hash NULL
83736 +_001929_hash pwr_tx_with_ps_read 3 60851 _001929_hash NULL
83737 +_001930_hash pwr_wake_on_host_read 3 26321 _001930_hash NULL
83738 +_001931_hash pwr_wake_on_timer_exp_read 3 22640 _001931_hash NULL
83739 +_001932_hash queues_read 3 24877 _001932_hash NULL
83740 +_001933_hash raw_recvmsg 4 17277 _001933_hash NULL
83741 +_001934_hash rcname_read 3 25919 _001934_hash NULL
83742 +_001935_hash read_4k_modal_eeprom 3 30212 _001935_hash NULL
83743 +_001936_hash read_9287_modal_eeprom 3 59327 _001936_hash NULL
83744 +_001937_hash reada_find_extent 2 63486 _001937_hash NULL
83745 +_001938_hash read_def_modal_eeprom 3 14041 _001938_hash NULL
83746 +_001939_hash read_enabled_file_bool 3 37744 _001939_hash NULL
83747 +_001940_hash read_file_ani 3 23161 _001940_hash NULL
83748 +_001941_hash read_file_antenna 3 13574 _001941_hash NULL
83749 +_001942_hash read_file_base_eeprom 3 42168 _001942_hash NULL
83750 +_001943_hash read_file_beacon 3 32595 _001943_hash NULL
83751 +_001944_hash read_file_blob 3 57406 _001944_hash NULL
83752 +_001945_hash read_file_bool 3 4180 _001945_hash NULL
83753 +_001946_hash read_file_credit_dist_stats 3 54367 _001946_hash NULL
83754 +_001947_hash read_file_debug 3 58256 _001947_hash NULL
83755 +_001948_hash read_file_disable_ani 3 6536 _001948_hash NULL
83756 +_001949_hash read_file_dma 3 9530 _001949_hash NULL
83757 +_001950_hash read_file_dump_nfcal 3 18766 _001950_hash NULL
83758 +_001951_hash read_file_frameerrors 3 64001 _001951_hash NULL
83759 +_001952_hash read_file_interrupt 3 61742 _001959_hash NULL nohasharray
83760 +_001953_hash read_file_misc 3 9948 _001953_hash NULL
83761 +_001954_hash read_file_modal_eeprom 3 39909 _001954_hash NULL
83762 +_001955_hash read_file_queue 3 40895 _001955_hash NULL
83763 +_001956_hash read_file_rcstat 3 22854 _001956_hash NULL
83764 +_001957_hash read_file_recv 3 48232 _001957_hash NULL
83765 +_001958_hash read_file_regidx 3 33370 _001958_hash NULL
83766 +_001959_hash read_file_regval 3 61742 _001959_hash &_001952_hash
83767 +_001960_hash read_file_reset 3 52310 _001960_hash NULL
83768 +_001961_hash read_file_rx_chainmask 3 41605 _001961_hash NULL
83769 +_001962_hash read_file_slot 3 50111 _001962_hash NULL
83770 +_001963_hash read_file_stations 3 35795 _001963_hash NULL
83771 +_001964_hash read_file_tgt_int_stats 3 20697 _001964_hash NULL
83772 +_001965_hash read_file_tgt_rx_stats 3 33944 _001965_hash NULL
83773 +_001966_hash read_file_tgt_stats 3 8959 _001966_hash NULL
83774 +_001967_hash read_file_tgt_tx_stats 3 51847 _001967_hash NULL
83775 +_001968_hash read_file_tx_chainmask 3 3829 _001968_hash NULL
83776 +_001969_hash read_file_war_stats 3 292 _001969_hash NULL
83777 +_001970_hash read_file_xmit 3 21487 _001970_hash NULL
83778 +_001971_hash read_from_oldmem 2 3337 _001971_hash NULL
83779 +_001972_hash read_oldmem 3 55658 _001972_hash NULL
83780 +_001973_hash regmap_name_read_file 3 39379 _001973_hash NULL
83781 +_001974_hash repair_io_failure 4 4815 _001974_hash NULL
83782 +_001975_hash request_key_and_link 4 42693 _001975_hash NULL
83783 +_001976_hash res_counter_read 4 33499 _001976_hash NULL
83784 +_001977_hash retry_count_read 3 52129 _001977_hash NULL
83785 +_001978_hash rs_sta_dbgfs_rate_scale_data_read 3 47165 _001978_hash NULL
83786 +_001979_hash rs_sta_dbgfs_scale_table_read 3 40262 _001979_hash NULL
83787 +_001980_hash rs_sta_dbgfs_stats_table_read 3 56573 _001980_hash NULL
83788 +_001981_hash rts_threshold_read 3 44384 _001981_hash NULL
83789 +_001982_hash rx_dropped_read 3 44799 _001982_hash NULL
83790 +_001983_hash rx_fcs_err_read 3 62844 _001983_hash NULL
83791 +_001984_hash rx_hdr_overflow_read 3 64407 _001984_hash NULL
83792 +_001985_hash rx_hw_stuck_read 3 57179 _001985_hash NULL
83793 +_001986_hash rx_out_of_mem_read 3 10157 _001986_hash NULL
83794 +_001987_hash rx_path_reset_read 3 23801 _001987_hash NULL
83795 +_001988_hash rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read 3 55106 _001988_hash NULL
83796 +_001989_hash rxpipe_descr_host_int_trig_rx_data_read 3 22001 _001989_hash NULL
83797 +_001990_hash rxpipe_missed_beacon_host_int_trig_rx_data_read 3 63405 _001990_hash NULL
83798 +_001991_hash rxpipe_rx_prep_beacon_drop_read 3 2403 _001991_hash NULL
83799 +_001992_hash rxpipe_tx_xfr_host_int_trig_rx_data_read 3 35538 _001992_hash NULL
83800 +_001993_hash rx_reset_counter_read 3 58001 _001993_hash NULL
83801 +_001994_hash rx_xfr_hint_trig_read 3 40283 _001994_hash NULL
83802 +_001995_hash s5m_bulk_write 3 4833 _001995_hash NULL
83803 +_001996_hash scrub_setup_recheck_block 3-4 56245 _001996_hash NULL
83804 +_001998_hash scsi_adjust_queue_depth 3 12802 _001998_hash NULL
83805 +_001999_hash selinux_inode_notifysecctx 3 36896 _001999_hash NULL
83806 +_002000_hash sel_read_avc_cache_threshold 3 33942 _002000_hash NULL
83807 +_002001_hash sel_read_avc_hash_stats 3 1984 _002001_hash NULL
83808 +_002002_hash sel_read_bool 3 24236 _002002_hash NULL
83809 +_002003_hash sel_read_checkreqprot 3 33068 _002003_hash NULL
83810 +_002004_hash sel_read_class 3 12669 _002541_hash NULL nohasharray
83811 +_002005_hash sel_read_enforce 3 2828 _002005_hash NULL
83812 +_002006_hash sel_read_handle_status 3 56139 _002006_hash NULL
83813 +_002007_hash sel_read_handle_unknown 3 57933 _002007_hash NULL
83814 +_002008_hash sel_read_initcon 3 32362 _002008_hash NULL
83815 +_002009_hash sel_read_mls 3 25369 _002009_hash NULL
83816 +_002010_hash sel_read_perm 3 42302 _002010_hash NULL
83817 +_002011_hash sel_read_policy 3 55947 _002011_hash NULL
83818 +_002012_hash sel_read_policycap 3 28544 _002012_hash NULL
83819 +_002013_hash sel_read_policyvers 3 55 _002013_hash NULL
83820 +_002014_hash send_msg 4 37323 _002014_hash NULL
83821 +_002015_hash send_packet 4 52960 _002015_hash NULL
83822 +_002016_hash short_retry_limit_read 3 4687 _002016_hash NULL
83823 +_002017_hash simple_attr_read 3 24738 _002017_hash NULL
83824 +_002018_hash simple_transaction_read 3 17076 _002018_hash NULL
83825 +_002019_hash skb_copy_datagram_const_iovec 2-5-4 48102 _002019_hash NULL
83826 +_002022_hash skb_copy_datagram_iovec 2-4 5806 _002022_hash NULL
83827 +_002024_hash smk_read_ambient 3 61220 _002024_hash NULL
83828 +_002025_hash smk_read_direct 3 15803 _002025_hash NULL
83829 +_002026_hash smk_read_doi 3 30813 _002026_hash NULL
83830 +_002027_hash smk_read_logging 3 37804 _002027_hash NULL
83831 +_002028_hash smk_read_onlycap 3 3855 _002028_hash NULL
83832 +_002029_hash snapshot_read 3 22601 _002029_hash NULL
83833 +_002030_hash snd_cs4281_BA0_read 5 6847 _002030_hash NULL
83834 +_002031_hash snd_cs4281_BA1_read 5 20323 _002031_hash NULL
83835 +_002032_hash snd_cs46xx_io_read 5 45734 _002032_hash NULL
83836 +_002033_hash snd_gus_dram_read 4 56686 _002033_hash NULL
83837 +_002034_hash snd_pcm_oss_read 3 28317 _002034_hash NULL
83838 +_002035_hash snd_rme32_capture_copy 5 39653 _002035_hash NULL
83839 +_002036_hash snd_rme96_capture_copy 5 58484 _002036_hash NULL
83840 +_002037_hash snd_soc_hw_bulk_write_raw 4 14245 _002037_hash NULL
83841 +_002038_hash spi_show_regs 3 6911 _002038_hash &_001908_hash
83842 +_002039_hash sta_agg_status_read 3 14058 _002039_hash NULL
83843 +_002040_hash sta_connected_time_read 3 17435 _002040_hash NULL
83844 +_002041_hash sta_flags_read 3 56710 _002041_hash NULL
83845 +_002042_hash sta_ht_capa_read 3 10366 _002042_hash NULL
83846 +_002043_hash sta_last_seq_ctrl_read 3 19106 _002043_hash NULL
83847 +_002044_hash sta_num_ps_buf_frames_read 3 1488 _002044_hash NULL
83848 +_002045_hash st_read 3 51251 _002045_hash NULL
83849 +_002046_hash supply_map_read_file 3 10608 _002046_hash NULL
83850 +_002047_hash sysfs_read_file 3 42113 _002047_hash NULL
83851 +_002048_hash sys_lgetxattr 4 45531 _002048_hash NULL
83852 +_002049_hash sys_preadv 3 17100 _002049_hash NULL
83853 +_002050_hash sys_pwritev 3 41722 _002050_hash NULL
83854 +_002051_hash sys_readv 3 50664 _002051_hash NULL
83855 +_002052_hash sys_rt_sigpending 2 24961 _002052_hash NULL
83856 +_002053_hash sys_writev 3 28384 _002053_hash NULL
83857 +_002054_hash test_iso_queue 5 62534 _002054_hash NULL
83858 +_002055_hash ts_read 3 44687 _002055_hash NULL
83859 +_002056_hash TSS_authhmac 3 12839 _002056_hash NULL
83860 +_002057_hash TSS_checkhmac1 5 31429 _002057_hash NULL
83861 +_002058_hash TSS_checkhmac2 5-7 40520 _002058_hash NULL
83862 +_002060_hash tt3650_ci_msg_locked 4 8013 _002060_hash NULL
83863 +_002061_hash tun_sendmsg 4 10337 _002061_hash NULL
83864 +_002062_hash tx_internal_desc_overflow_read 3 47300 _002062_hash NULL
83865 +_002063_hash tx_queue_len_read 3 1463 _002063_hash NULL
83866 +_002064_hash tx_queue_status_read 3 44978 _002064_hash NULL
83867 +_002065_hash ubi_io_write_data 4-5 40305 _002065_hash NULL
83868 +_002067_hash uhci_debug_read 3 5911 _002067_hash NULL
83869 +_002068_hash unix_stream_recvmsg 4 35210 _002068_hash NULL
83870 +_002069_hash uvc_debugfs_stats_read 3 56651 _002069_hash NULL
83871 +_002070_hash vhost_add_used_and_signal_n 4 8038 _002070_hash NULL
83872 +_002071_hash vifs_state_read 3 33762 _002071_hash NULL
83873 +_002072_hash vmbus_open 2-3 12154 _002072_hash NULL
83874 +_002074_hash waiters_read 3 40902 _002074_hash NULL
83875 +_002075_hash wep_addr_key_count_read 3 20174 _002075_hash NULL
83876 +_002076_hash wep_decrypt_fail_read 3 58567 _002076_hash NULL
83877 +_002077_hash wep_default_key_count_read 3 43035 _002077_hash NULL
83878 +_002078_hash wep_interrupt_read 3 41492 _002078_hash NULL
83879 +_002079_hash wep_key_not_found_read 3 13377 _002079_hash &_000915_hash
83880 +_002080_hash wep_packets_read 3 18751 _002080_hash NULL
83881 +_002081_hash wl1271_format_buffer 2 20834 _002081_hash NULL
83882 +_002082_hash wm8994_bulk_write 3 13615 _002082_hash NULL
83883 +_002083_hash wusb_prf_256 7 29203 _002083_hash NULL
83884 +_002084_hash wusb_prf_64 7 51065 _002084_hash NULL
83885 +_002085_hash xfs_buf_read_uncached 4 27519 _002085_hash NULL
83886 +_002086_hash xfs_iext_add 3 41422 _002086_hash NULL
83887 +_002087_hash xfs_iext_remove_direct 3 40744 _002087_hash NULL
83888 +_002088_hash xfs_trans_get_efd 3 51148 _002088_hash NULL
83889 +_002089_hash xfs_trans_get_efi 2 7898 _002089_hash NULL
83890 +_002090_hash xlog_get_bp 2 23229 _002090_hash NULL
83891 +_002091_hash xz_dec_init 2 29029 _002091_hash NULL
83892 +_002092_hash aac_change_queue_depth 2 825 _002092_hash NULL
83893 +_002093_hash agp_allocate_memory_wrap 1 16576 _002093_hash NULL
83894 +_002094_hash arcmsr_adjust_disk_queue_depth 2 16756 _002094_hash NULL
83895 +_002095_hash atalk_recvmsg 4 22053 _002095_hash NULL
83896 +_002097_hash atomic_read_file 3 16227 _002097_hash NULL
83897 +_002098_hash ax25_recvmsg 4 64441 _002098_hash NULL
83898 +_002099_hash beacon_interval_read 3 7091 _002099_hash NULL
83899 +_002100_hash btrfs_init_new_buffer 4 55761 _002100_hash NULL
83900 +_002101_hash btrfs_mksubvol 3 39479 _002101_hash NULL
83901 +_002102_hash bt_sock_recvmsg 4 12316 _002102_hash NULL
83902 +_002103_hash bt_sock_stream_recvmsg 4 52518 _002103_hash NULL
83903 +_002104_hash caif_seqpkt_recvmsg 4 32241 _002104_hash NULL
83904 +_002105_hash cpu_type_read 3 36540 _002105_hash NULL
83905 +_002106_hash cx18_read 3 23699 _002106_hash NULL
83906 +_002107_hash dccp_recvmsg 4 16056 _002107_hash NULL
83907 +_002108_hash depth_read 3 31112 _002108_hash NULL
83908 +_002109_hash dfs_global_file_read 3 7787 _002109_hash NULL
83909 +_002110_hash dgram_recvmsg 4 23104 _002110_hash NULL
83910 +_002111_hash dma_skb_copy_datagram_iovec 3-5 21516 _002111_hash NULL
83911 +_002113_hash dtim_interval_read 3 654 _002113_hash NULL
83912 +_002114_hash dynamic_ps_timeout_read 3 10110 _002114_hash NULL
83913 +_002115_hash enable_read 3 2117 _002115_hash NULL
83914 +_002116_hash exofs_read_kern 6 39921 _002116_hash &_001885_hash
83915 +_002117_hash fc_change_queue_depth 2 36841 _002117_hash NULL
83916 +_002118_hash forced_ps_read 3 31685 _002118_hash NULL
83917 +_002119_hash frequency_read 3 64031 _002119_hash NULL
83918 +_002120_hash get_alua_req 3 4166 _002120_hash NULL
83919 +_002121_hash get_rdac_req 3 45882 _002121_hash NULL
83920 +_002122_hash hci_sock_recvmsg 4 7072 _002122_hash NULL
83921 +_002123_hash hpsa_change_queue_depth 2 15449 _002123_hash NULL
83922 +_002124_hash hptiop_adjust_disk_queue_depth 2 20122 _002124_hash NULL
83923 +_002125_hash ide_queue_pc_tail 5 11673 _002125_hash NULL
83924 +_002126_hash ide_raw_taskfile 4 42355 _002126_hash NULL
83925 +_002127_hash idetape_queue_rw_tail 3 29562 _002127_hash NULL
83926 +_002128_hash ieee80211_if_read_aid 3 9705 _002128_hash NULL
83927 +_002129_hash ieee80211_if_read_auto_open_plinks 3 38268 _002129_hash NULL
83928 +_002130_hash ieee80211_if_read_ave_beacon 3 64924 _002130_hash NULL
83929 +_002131_hash ieee80211_if_read_bssid 3 35161 _002131_hash NULL
83930 +_002132_hash ieee80211_if_read_channel_type 3 23884 _002132_hash NULL
83931 +_002133_hash ieee80211_if_read_dot11MeshConfirmTimeout 3 60670 _002133_hash NULL
83932 +_002134_hash ieee80211_if_read_dot11MeshGateAnnouncementProtocol 3 14486 _002134_hash NULL
83933 +_002135_hash ieee80211_if_read_dot11MeshHoldingTimeout 3 47356 _002135_hash NULL
83934 +_002136_hash ieee80211_if_read_dot11MeshHWMPactivePathTimeout 3 7368 _002136_hash NULL
83935 +_002137_hash ieee80211_if_read_dot11MeshHWMPmaxPREQretries 3 59829 _002137_hash NULL
83936 +_002138_hash ieee80211_if_read_dot11MeshHWMPnetDiameterTraversalTime 3 1589 _002138_hash NULL
83937 +_002139_hash ieee80211_if_read_dot11MeshHWMPperrMinInterval 3 17346 _002139_hash NULL
83938 +_002140_hash ieee80211_if_read_dot11MeshHWMPpreqMinInterval 3 24208 _002140_hash NULL
83939 +_002141_hash ieee80211_if_read_dot11MeshHWMPRannInterval 3 2249 _002141_hash NULL
83940 +_002142_hash ieee80211_if_read_dot11MeshHWMPRootMode 3 51441 _002142_hash NULL
83941 +_002143_hash ieee80211_if_read_dot11MeshMaxPeerLinks 3 23878 _002143_hash NULL
83942 +_002144_hash ieee80211_if_read_dot11MeshMaxRetries 3 12756 _002144_hash NULL
83943 +_002145_hash ieee80211_if_read_dot11MeshRetryTimeout 3 52168 _002145_hash NULL
83944 +_002146_hash ieee80211_if_read_dot11MeshTTL 3 58307 _002146_hash NULL
83945 +_002147_hash ieee80211_if_read_dropped_frames_congestion 3 32603 _002147_hash NULL
83946 +_002148_hash ieee80211_if_read_dropped_frames_no_route 3 33383 _002148_hash NULL
83947 +_002149_hash ieee80211_if_read_dropped_frames_ttl 3 44500 _002149_hash NULL
83948 +_002150_hash ieee80211_if_read_drop_unencrypted 3 37053 _002150_hash NULL
83949 +_002151_hash ieee80211_if_read_dtim_count 3 38419 _002151_hash NULL
83950 +_002152_hash ieee80211_if_read_element_ttl 3 18869 _002152_hash NULL
83951 +_002153_hash ieee80211_if_read_estab_plinks 3 32533 _002153_hash NULL
83952 +_002154_hash ieee80211_if_read_flags 3 57470 _002389_hash NULL nohasharray
83953 +_002155_hash ieee80211_if_read_fwded_frames 3 36520 _002155_hash NULL
83954 +_002156_hash ieee80211_if_read_fwded_mcast 3 39571 _002156_hash &_000151_hash
83955 +_002157_hash ieee80211_if_read_fwded_unicast 3 59740 _002157_hash NULL
83956 +_002158_hash ieee80211_if_read_last_beacon 3 31257 _002158_hash NULL
83957 +_002159_hash ieee80211_if_read_min_discovery_timeout 3 13946 _002159_hash NULL
83958 +_002160_hash ieee80211_if_read_num_buffered_multicast 3 12716 _002160_hash NULL
83959 +_002161_hash ieee80211_if_read_num_sta_authorized 3 56177 _002161_hash NULL
83960 +_002162_hash ieee80211_if_read_num_sta_ps 3 34722 _002162_hash NULL
83961 +_002163_hash ieee80211_if_read_path_refresh_time 3 25545 _002163_hash NULL
83962 +_002164_hash ieee80211_if_read_peer 3 45233 _002164_hash NULL
83963 +_002165_hash ieee80211_if_read_rc_rateidx_mask_2ghz 3 61570 _002165_hash NULL
83964 +_002166_hash ieee80211_if_read_rc_rateidx_mask_5ghz 3 27183 _002166_hash NULL
83965 +_002167_hash ieee80211_if_read_rc_rateidx_mcs_mask_2ghz 3 37675 _002167_hash NULL
83966 +_002168_hash ieee80211_if_read_rc_rateidx_mcs_mask_5ghz 3 44423 _002168_hash NULL
83967 +_002169_hash ieee80211_if_read_rssi_threshold 3 49260 _002169_hash NULL
83968 +_002170_hash ieee80211_if_read_smps 3 27416 _002170_hash NULL
83969 +_002171_hash ieee80211_if_read_state 3 9813 _002280_hash NULL nohasharray
83970 +_002172_hash ieee80211_if_read_tkip_mic_test 3 19565 _002172_hash NULL
83971 +_002173_hash ieee80211_if_read_tsf 3 16420 _002173_hash NULL
83972 +_002174_hash ieee80211_if_read_uapsd_max_sp_len 3 15067 _002174_hash NULL
83973 +_002175_hash ieee80211_if_read_uapsd_queues 3 55150 _002175_hash NULL
83974 +_002176_hash ieee80211_rx_mgmt_beacon 3 24430 _002176_hash NULL
83975 +_002177_hash ieee80211_rx_mgmt_probe_resp 3 6918 _002177_hash NULL
83976 +_002178_hash ima_show_htable_violations 3 10619 _002178_hash NULL
83977 +_002179_hash ima_show_measurements_count 3 23536 _002179_hash NULL
83978 +_002180_hash insert_one_name 7 61668 _002180_hash NULL
83979 +_002181_hash ipr_change_queue_depth 2 6431 _002181_hash NULL
83980 +_002182_hash ip_recv_error 3 23109 _002182_hash NULL
83981 +_002183_hash ipv6_recv_error 3 56347 _002183_hash NULL
83982 +_002184_hash ipv6_recv_rxpmtu 3 7142 _002184_hash NULL
83983 +_002185_hash ipx_recvmsg 4 44366 _002185_hash NULL
83984 +_002186_hash irda_recvmsg_dgram 4 32631 _002186_hash NULL
83985 +_002187_hash iscsi_change_queue_depth 2 23416 _002187_hash NULL
83986 +_002188_hash ivtv_read_pos 3 34400 _002188_hash &_000303_hash
83987 +_002189_hash key_conf_hw_key_idx_read 3 25003 _002189_hash NULL
83988 +_002190_hash key_conf_keyidx_read 3 42443 _002190_hash NULL
83989 +_002191_hash key_conf_keylen_read 3 49758 _002191_hash NULL
83990 +_002192_hash key_flags_read 3 25931 _002192_hash NULL
83991 +_002193_hash key_ifindex_read 3 31411 _002193_hash NULL
83992 +_002194_hash key_tx_rx_count_read 3 44742 _002194_hash NULL
83993 +_002195_hash l2cap_sock_sendmsg 4 63427 _002195_hash NULL
83994 +_002196_hash l2tp_ip_recvmsg 4 22681 _002196_hash NULL
83995 +_002197_hash llc_ui_recvmsg 4 3826 _002197_hash NULL
83996 +_002198_hash lpfc_change_queue_depth 2 25905 _002198_hash NULL
83997 +_002199_hash macvtap_do_read 4 36555 _002199_hash &_001832_hash
83998 +_002200_hash megaraid_change_queue_depth 2 64815 _002200_hash NULL
83999 +_002201_hash megasas_change_queue_depth 2 32747 _002201_hash NULL
84000 +_002202_hash mptscsih_change_queue_depth 2 26036 _002202_hash NULL
84001 +_002203_hash NCR_700_change_queue_depth 2 31742 _002203_hash NULL
84002 +_002204_hash netlink_recvmsg 4 61600 _002204_hash NULL
84003 +_002205_hash nfsctl_transaction_read 3 48250 _002205_hash NULL
84004 +_002206_hash nfs_map_group_to_gid 3 15892 _002206_hash NULL
84005 +_002207_hash nfs_map_name_to_uid 3 51132 _002207_hash NULL
84006 +_002208_hash nr_recvmsg 4 12649 _002208_hash NULL
84007 +_002209_hash osd_req_list_collection_objects 5 36664 _002209_hash NULL
84008 +_002210_hash osd_req_list_partition_objects 5 56464 _002210_hash NULL
84009 +_002212_hash packet_recv_error 3 16669 _002212_hash NULL
84010 +_002213_hash packet_recvmsg 4 47700 _002213_hash NULL
84011 +_002214_hash pep_recvmsg 4 19402 _002214_hash NULL
84012 +_002215_hash pfkey_recvmsg 4 53604 _002215_hash NULL
84013 +_002216_hash ping_recvmsg 4 25597 _002216_hash NULL
84014 +_002217_hash pmcraid_change_queue_depth 2 9116 _002217_hash NULL
84015 +_002218_hash pn_recvmsg 4 30887 _002218_hash NULL
84016 +_002219_hash pointer_size_read 3 51863 _002219_hash NULL
84017 +_002220_hash power_read 3 15939 _002220_hash NULL
84018 +_002221_hash pppoe_recvmsg 4 15073 _002221_hash NULL
84019 +_002222_hash pppol2tp_recvmsg 4 57742 _002222_hash NULL
84020 +_002223_hash qla2x00_adjust_sdev_qdepth_up 2 20097 _002223_hash NULL
84021 +_002224_hash qla2x00_change_queue_depth 2 24742 _002224_hash NULL
84022 +_002225_hash raw_recvmsg 4 52529 _002225_hash NULL
84023 +_002226_hash rawsock_recvmsg 4 12144 _002226_hash NULL
84024 +_002227_hash rawv6_recvmsg 4 30265 _002227_hash NULL
84025 +_002228_hash reada_add_block 2 54247 _002228_hash NULL
84026 +_002229_hash readahead_tree_block 3 36285 _002229_hash NULL
84027 +_002230_hash reada_tree_block_flagged 3 18402 _002230_hash NULL
84028 +_002231_hash read_tree_block 3 841 _002231_hash NULL
84029 +_002232_hash recover_peb 6-7 29238 _002232_hash NULL
84030 +_002234_hash recv_msg 4 48709 _002234_hash NULL
84031 +_002235_hash recv_stream 4 30138 _002235_hash NULL
84032 +_002236_hash _req_append_segment 2 41031 _002236_hash NULL
84033 +_002237_hash request_key_async 4 6990 _002237_hash NULL
84034 +_002238_hash request_key_async_with_auxdata 4 46624 _002238_hash NULL
84035 +_002239_hash request_key_with_auxdata 4 24515 _002239_hash NULL
84036 +_002240_hash rose_recvmsg 4 2368 _002240_hash NULL
84037 +_002241_hash rxrpc_recvmsg 4 26233 _002241_hash NULL
84038 +_002242_hash rx_streaming_always_read 3 49401 _002242_hash NULL
84039 +_002243_hash rx_streaming_interval_read 3 55291 _002243_hash NULL
84040 +_002244_hash sas_change_queue_depth 2 18555 _002244_hash NULL
84041 +_002245_hash scsi_activate_tcq 2 42640 _002245_hash NULL
84042 +_002246_hash scsi_deactivate_tcq 2 47086 _002246_hash NULL
84043 +_002247_hash scsi_execute 5 33596 _002247_hash NULL
84044 +_002248_hash _scsih_adjust_queue_depth 2 1083 _002248_hash NULL
84045 +_002249_hash scsi_init_shared_tag_map 2 59812 _002249_hash NULL
84046 +_002250_hash scsi_track_queue_full 2 44239 _002250_hash NULL
84047 +_002251_hash sctp_recvmsg 4 23265 _002251_hash NULL
84048 +_002252_hash send_stream 4 3397 _002252_hash NULL
84049 +_002253_hash skb_copy_and_csum_datagram_iovec 2 24466 _002253_hash NULL
84050 +_002255_hash snd_gf1_mem_proc_dump 5 16926 _002255_hash NULL
84051 +_002256_hash split_scan_timeout_read 3 20029 _002256_hash NULL
84052 +_002257_hash sta_dev_read 3 14782 _002257_hash NULL
84053 +_002258_hash sta_inactive_ms_read 3 25690 _002258_hash NULL
84054 +_002259_hash sta_last_signal_read 3 31818 _002259_hash NULL
84055 +_002260_hash stats_dot11ACKFailureCount_read 3 45558 _002260_hash NULL
84056 +_002261_hash stats_dot11FCSErrorCount_read 3 28154 _002261_hash NULL
84057 +_002262_hash stats_dot11RTSFailureCount_read 3 43948 _002262_hash NULL
84058 +_002263_hash stats_dot11RTSSuccessCount_read 3 33065 _002263_hash NULL
84059 +_002264_hash storvsc_connect_to_vsp 2 22 _002264_hash NULL
84060 +_002265_hash suspend_dtim_interval_read 3 64971 _002265_hash NULL
84061 +_002266_hash sys_msgrcv 3 959 _002266_hash NULL
84062 +_002267_hash tcm_loop_change_queue_depth 2 42454 _002267_hash NULL
84063 +_002268_hash tcp_copy_to_iovec 3 28344 _002268_hash NULL
84064 +_002269_hash tcp_recvmsg 4 31238 _002269_hash NULL
84065 +_002270_hash timeout_read 3 47915 _002270_hash NULL
84066 +_002271_hash total_ps_buffered_read 3 16365 _002271_hash NULL
84067 +_002272_hash tun_put_user 4 59849 _002272_hash NULL
84068 +_002273_hash twa_change_queue_depth 2 48808 _002273_hash NULL
84069 +_002274_hash tw_change_queue_depth 2 11116 _002274_hash NULL
84070 +_002275_hash twl_change_queue_depth 2 41342 _002275_hash NULL
84071 +_002276_hash ubi_eba_write_leb 5-6 19826 _002276_hash NULL
84072 +_002278_hash ubi_eba_write_leb_st 5 27896 _002278_hash NULL
84073 +_002279_hash udp_recvmsg 4 42558 _002279_hash NULL
84074 +_002280_hash udpv6_recvmsg 4 9813 _002280_hash &_002171_hash
84075 +_002281_hash ulong_read_file 3 42304 _002281_hash &_000511_hash
84076 +_002282_hash unix_dgram_recvmsg 4 14952 _002282_hash NULL
84077 +_002283_hash user_power_read 3 39414 _002283_hash NULL
84078 +_002284_hash vcc_recvmsg 4 37198 _002284_hash NULL
84079 +_002285_hash wep_iv_read 3 54744 _002285_hash NULL
84080 +_002286_hash x25_recvmsg 4 42777 _002286_hash NULL
84081 +_002287_hash xfs_iext_insert 3 18667 _002287_hash NULL
84082 +_002288_hash xfs_iext_remove 3 50909 _002288_hash NULL
84083 +_002289_hash xlog_find_verify_log_record 2 18870 _002289_hash NULL
84084 +_002290_hash btrfs_alloc_free_block 3 29982 _002290_hash NULL
84085 +_002291_hash cx18_read_pos 3 4683 _002291_hash NULL
84086 +_002292_hash l2cap_sock_recvmsg 4 59886 _002292_hash NULL
84087 +_002293_hash osd_req_list_dev_partitions 4 60027 _002293_hash NULL
84088 +_002294_hash osd_req_list_partition_collections 5 38223 _002294_hash NULL
84089 +_002295_hash osst_do_scsi 4 44410 _002295_hash NULL
84090 +_002296_hash qla2x00_handle_queue_full 2 24365 _002296_hash NULL
84091 +_002297_hash rfcomm_sock_recvmsg 4 22227 _002297_hash NULL
84092 +_002298_hash scsi_execute_req 5 42088 _002298_hash NULL
84093 +_002299_hash _scsih_change_queue_depth 2 26230 _002299_hash NULL
84094 +_002300_hash spi_execute 5 28736 _002300_hash NULL
84095 +_002301_hash submit_inquiry 3 42108 _002301_hash NULL
84096 +_002302_hash tcp_dma_try_early_copy 3 37651 _002302_hash NULL
84097 +_002303_hash tun_do_read 4 50800 _002303_hash NULL
84098 +_002304_hash ubi_eba_atomic_leb_change 5 13041 _002304_hash NULL
84099 +_002305_hash ubi_leb_write 4-5 41691 _002305_hash NULL
84100 +_002307_hash unix_seqpacket_recvmsg 4 23062 _002307_hash NULL
84101 +_002308_hash write_leb 5 36957 _002308_hash NULL
84102 +_002309_hash ch_do_scsi 4 31171 _002309_hash NULL
84103 +_002310_hash dbg_leb_write 4-5 20478 _002310_hash NULL
84104 +_002312_hash scsi_mode_sense 5 16835 _002312_hash NULL
84105 +_002313_hash scsi_vpd_inquiry 4 30040 _002313_hash NULL
84106 +_002314_hash ses_recv_diag 4 47143 _002314_hash &_000673_hash
84107 +_002315_hash ses_send_diag 4 64527 _002315_hash NULL
84108 +_002316_hash spi_dv_device_echo_buffer 2-3 39846 _002316_hash NULL
84109 +_002318_hash ubifs_leb_write 4-5 61226 _002318_hash NULL
84110 +_002320_hash ubi_leb_change 4 14899 _002320_hash NULL
84111 +_002321_hash ubi_write 4-5 30809 _002321_hash NULL
84112 +_002322_hash dbg_leb_change 4 19969 _002322_hash NULL
84113 +_002323_hash gluebi_write 3 27905 _002323_hash NULL
84114 +_002324_hash scsi_get_vpd_page 4 51951 _002324_hash NULL
84115 +_002325_hash sd_do_mode_sense 5 11507 _002325_hash NULL
84116 +_002326_hash ubifs_leb_change 4 22399 _002436_hash NULL nohasharray
84117 +_002327_hash ubifs_write_node 5 15088 _002327_hash NULL
84118 +_002328_hash fixup_leb 3 43256 _002328_hash NULL
84119 +_002329_hash recover_head 3 17904 _002329_hash NULL
84120 +_002330_hash alloc_cpu_rmap 1 65363 _002330_hash NULL
84121 +_002331_hash alloc_ebda_hpc 1-2 50046 _002331_hash NULL
84122 +_002333_hash alloc_sched_domains 1 28972 _002333_hash NULL
84123 +_002334_hash amthi_read 4 45831 _002334_hash NULL
84124 +_002335_hash bcm_char_read 3 31750 _002335_hash NULL
84125 +_002336_hash BcmCopySection 5 2035 _002336_hash NULL
84126 +_002337_hash buffer_from_user 3 51826 _002337_hash NULL
84127 +_002338_hash buffer_to_user 3 35439 _002338_hash NULL
84128 +_002339_hash c4iw_init_resource_fifo 3 48090 _002339_hash NULL
84129 +_002340_hash c4iw_init_resource_fifo_random 3 25547 _002340_hash NULL
84130 +_002341_hash card_send_command 3 40757 _002341_hash NULL
84131 +_002342_hash chd_dec_fetch_cdata 3 50926 _002342_hash NULL
84132 +_002343_hash crystalhd_create_dio_pool 2 3427 _002343_hash NULL
84133 +_002344_hash crystalhd_user_data 3 18407 _002344_hash NULL
84134 +_002345_hash cxio_init_resource_fifo 3 28764 _002345_hash NULL
84135 +_002346_hash cxio_init_resource_fifo_random 3 47151 _002346_hash NULL
84136 +_002347_hash do_pages_stat 2 4437 _002347_hash NULL
84137 +_002348_hash do_read_log_to_user 4 3236 _002348_hash NULL
84138 +_002349_hash do_write_log_from_user 3 39362 _002349_hash NULL
84139 +_002350_hash dt3155_read 3 59226 _002350_hash NULL
84140 +_002351_hash easycap_alsa_vmalloc 2 14426 _002351_hash NULL
84141 +_002352_hash evm_read_key 3 54674 _002352_hash NULL
84142 +_002353_hash evm_write_key 3 27715 _002353_hash NULL
84143 +_002354_hash fir16_create 3 5574 _002354_hash NULL
84144 +_002355_hash iio_allocate_device 1 18821 _002355_hash NULL
84145 +_002356_hash __iio_allocate_kfifo 2-3 55738 _002356_hash NULL
84146 +_002358_hash __iio_allocate_sw_ring_buffer 3 4843 _002358_hash NULL
84147 +_002359_hash iio_debugfs_read_reg 3 60908 _002359_hash NULL
84148 +_002360_hash iio_debugfs_write_reg 3 22742 _002360_hash NULL
84149 +_002361_hash iio_event_chrdev_read 3 54757 _002361_hash NULL
84150 +_002362_hash iio_read_first_n_kfifo 2 57910 _002362_hash NULL
84151 +_002363_hash iio_read_first_n_sw_rb 2 51911 _002363_hash NULL
84152 +_002364_hash ioapic_setup_resources 1 35255 _002364_hash NULL
84153 +_002365_hash keymap_store 4 45406 _002365_hash NULL
84154 +_002366_hash kzalloc_node 1 24352 _002366_hash NULL
84155 +_002367_hash line6_alloc_sysex_buffer 4 28225 _002367_hash NULL
84156 +_002368_hash line6_dumpreq_initbuf 3 53123 _002368_hash NULL
84157 +_002369_hash line6_midibuf_init 2 52425 _002369_hash NULL
84158 +_002370_hash lirc_write 3 20604 _002370_hash NULL
84159 +_002371_hash _malloc 1 54077 _002371_hash NULL
84160 +_002372_hash mei_read 3 6507 _002372_hash NULL
84161 +_002373_hash mei_write 3 4005 _002373_hash NULL
84162 +_002374_hash mempool_create_node 1 44715 _002374_hash NULL
84163 +_002375_hash msg_set 3 51725 _002375_hash NULL
84164 +_002376_hash newpart 6 47485 _002376_hash NULL
84165 +_002377_hash OS_kmalloc 1 36909 _002377_hash NULL
84166 +_002378_hash pcpu_alloc_bootmem 2 62074 _002378_hash NULL
84167 +_002379_hash pcpu_get_vm_areas 3 50085 _002379_hash NULL
84168 +_002380_hash resource_from_user 3 30341 _002380_hash NULL
84169 +_002381_hash sca3000_read_data 4 57064 _002381_hash NULL
84170 +_002382_hash sca3000_read_first_n_hw_rb 2 11479 _002382_hash NULL
84171 +_002383_hash send_midi_async 3 57463 _002383_hash NULL
84172 +_002384_hash sep_create_dcb_dmatables_context 6 37551 _002384_hash NULL
84173 +_002385_hash sep_create_dcb_dmatables_context_kernel 6 49728 _002385_hash NULL
84174 +_002386_hash sep_create_msgarea_context 4 33829 _002386_hash NULL
84175 +_002387_hash sep_lli_table_secure_dma 2-3 64042 _002387_hash NULL
84176 +_002389_hash sep_lock_user_pages 2-3 57470 _002389_hash &_002154_hash
84177 +_002391_hash sep_prepare_input_output_dma_table_in_dcb 4-5 63087 _002391_hash NULL
84178 +_002393_hash sep_read 3 17161 _002393_hash NULL
84179 +_002394_hash TransmitTcb 4 12989 _002394_hash NULL
84180 +_002395_hash ValidateDSDParamsChecksum 3 63654 _002395_hash NULL
84181 +_002396_hash Wb35Reg_BurstWrite 4 62327 _002396_hash NULL
84182 +_002397_hash __alloc_bootmem_low_node 2 25726 _002397_hash &_001499_hash
84183 +_002398_hash __alloc_bootmem_node 2 1992 _002398_hash NULL
84184 +_002399_hash alloc_irq_cpu_rmap 1 28459 _002399_hash NULL
84185 +_002400_hash alloc_ring 2-4 18278 _002400_hash NULL
84186 +_002402_hash c4iw_init_resource 2-3 30393 _002402_hash NULL
84187 +_002404_hash cxio_hal_init_resource 2-7-6 29771 _002404_hash &_000284_hash
84188 +_002407_hash cxio_hal_init_rhdl_resource 1 25104 _002407_hash NULL
84189 +_002408_hash disk_expand_part_tbl 2 30561 _002408_hash NULL
84190 +_002409_hash InterfaceTransmitPacket 3 42058 _002409_hash NULL
84191 +_002410_hash line6_dumpreq_init 3 34473 _002410_hash NULL
84192 +_002411_hash mempool_create 1 29437 _002411_hash NULL
84193 +_002412_hash pcpu_fc_alloc 2 11818 _002412_hash NULL
84194 +_002413_hash pod_alloc_sysex_buffer 3 31651 _002413_hash NULL
84195 +_002414_hash r8712_usbctrl_vendorreq 6 48489 _002414_hash NULL
84196 +_002415_hash r871x_set_wpa_ie 3 7000 _002415_hash NULL
84197 +_002416_hash sys_move_pages 2 42626 _002416_hash NULL
84198 +_002417_hash variax_alloc_sysex_buffer 3 15237 _002417_hash NULL
84199 +_002418_hash vme_user_write 3 15587 _002418_hash NULL
84200 +_002419_hash add_partition 2 55588 _002419_hash NULL
84201 +_002420_hash __alloc_bootmem_node_high 2 65076 _002420_hash NULL
84202 +_002421_hash ceph_msgpool_init 3 33312 _002421_hash NULL
84203 +_002423_hash mempool_create_kmalloc_pool 1 41650 _002423_hash NULL
84204 +_002424_hash mempool_create_page_pool 1 30189 _002424_hash NULL
84205 +_002425_hash mempool_create_slab_pool 1 62907 _002425_hash NULL
84206 +_002426_hash variax_set_raw2 4 32374 _002426_hash NULL
84207 +_002427_hash bioset_create 1 5580 _002427_hash NULL
84208 +_002428_hash bioset_integrity_create 2 62708 _002428_hash NULL
84209 +_002429_hash biovec_create_pools 2 9575 _002429_hash NULL
84210 +_002430_hash i2o_pool_alloc 4 55485 _002430_hash NULL
84211 +_002431_hash prison_create 1 43623 _002431_hash NULL
84212 +_002432_hash unlink_simple 3 47506 _002432_hash NULL
84213 +_002433_hash alloc_ieee80211 1 20063 _002433_hash NULL
84214 +_002434_hash alloc_ieee80211_rsl 1 34564 _002434_hash NULL
84215 +_002435_hash alloc_page_cgroup 1 2919 _002435_hash NULL
84216 +_002436_hash alloc_private 2 22399 _002436_hash &_002326_hash
84217 +_002437_hash alloc_rtllib 1 51136 _002437_hash NULL
84218 +_002438_hash alloc_rx_desc_ring 2 18016 _002438_hash NULL
84219 +_002439_hash alloc_subdevices 2 43300 _002439_hash NULL
84220 +_002440_hash atomic_counters_read 3 48827 _002440_hash NULL
84221 +_002441_hash atomic_stats_read 3 36228 _002441_hash NULL
84222 +_002442_hash capabilities_read 3 58457 _002442_hash NULL
84223 +_002443_hash comedi_read 3 13199 _002443_hash NULL
84224 +_002444_hash comedi_write 3 47926 _002444_hash NULL
84225 +_002445_hash compat_do_arpt_set_ctl 4 12184 _002445_hash NULL
84226 +_002446_hash compat_do_ip6t_set_ctl 4 3184 _002446_hash NULL
84227 +_002447_hash compat_do_ipt_set_ctl 4 58466 _002447_hash &_001852_hash
84228 +_002448_hash compat_filldir 3 32999 _002448_hash NULL
84229 +_002449_hash compat_filldir64 3 35354 _002449_hash NULL
84230 +_002450_hash compat_fillonedir 3 15620 _002450_hash NULL
84231 +_002451_hash compat_rw_copy_check_uvector 3 25242 _002451_hash NULL
84232 +_002452_hash compat_sock_setsockopt 5 23 _002452_hash NULL
84233 +_002453_hash compat_sys_kexec_load 2 35674 _002453_hash NULL
84234 +_002454_hash compat_sys_keyctl 4 9639 _002454_hash NULL
84235 +_002455_hash compat_sys_move_pages 2 5861 _002455_hash NULL
84236 +_002456_hash compat_sys_mq_timedsend 3 31060 _002456_hash NULL
84237 +_002457_hash compat_sys_msgrcv 2 7482 _002457_hash NULL
84238 +_002458_hash compat_sys_msgsnd 2 10738 _002458_hash NULL
84239 +_002459_hash compat_sys_semtimedop 3 3606 _002459_hash NULL
84240 +_002460_hash __copy_in_user 3 34790 _002460_hash NULL
84241 +_002461_hash copy_in_user 3 57502 _002461_hash NULL
84242 +_002462_hash dev_counters_read 3 19216 _002462_hash NULL
84243 +_002463_hash dev_names_read 3 38509 _002463_hash NULL
84244 +_002464_hash do_arpt_set_ctl 4 51053 _002464_hash NULL
84245 +_002465_hash do_ip6t_set_ctl 4 60040 _002465_hash NULL
84246 +_002466_hash do_ipt_set_ctl 4 56238 _002466_hash NULL
84247 +_002467_hash drbd_bm_resize 2 20522 _002467_hash NULL
84248 +_002468_hash driver_names_read 3 60399 _002468_hash NULL
84249 +_002469_hash driver_stats_read 3 8944 _002469_hash NULL
84250 +_002470_hash __earlyonly_bootmem_alloc 2 23824 _002470_hash NULL
84251 +_002471_hash evtchn_read 3 3569 _002471_hash NULL
84252 +_002472_hash ext_sd_execute_read_data 9 48589 _002472_hash NULL
84253 +_002473_hash ext_sd_execute_write_data 9 8175 _002473_hash NULL
84254 +_002474_hash fat_compat_ioctl_filldir 3 36328 _002474_hash NULL
84255 +_002475_hash firmwareUpload 3 32794 _002475_hash NULL
84256 +_002476_hash flash_read 3 57843 _002476_hash NULL
84257 +_002477_hash flash_write 3 62354 _002477_hash NULL
84258 +_002478_hash gather_array 3 56641 _002478_hash NULL
84259 +_002479_hash ghash_async_setkey 3 60001 _002479_hash NULL
84260 +_002480_hash gntdev_alloc_map 2 35145 _002480_hash NULL
84261 +_002481_hash gnttab_map 2 56439 _002481_hash NULL
84262 +_002482_hash gru_alloc_gts 2-3 60056 _002482_hash NULL
84263 +_002484_hash handle_eviocgbit 3 44193 _002484_hash NULL
84264 +_002485_hash hid_parse_report 3 51737 _002485_hash NULL
84265 +_002486_hash ieee80211_alloc_txb 1 52477 _002486_hash NULL
84266 +_002487_hash ieee80211_wx_set_gen_ie 3 51399 _002487_hash NULL
84267 +_002488_hash ieee80211_wx_set_gen_ie_rsl 3 3521 _002488_hash NULL
84268 +_002489_hash init_cdev 1 8274 _002489_hash NULL
84269 +_002490_hash init_per_cpu 1 17880 _002490_hash NULL
84270 +_002491_hash ipath_create_cq 2 45586 _002491_hash NULL
84271 +_002492_hash ipath_get_base_info 3 7043 _002492_hash NULL
84272 +_002493_hash ipath_init_qp_table 2 25167 _002493_hash NULL
84273 +_002494_hash ipath_resize_cq 2 712 _002494_hash NULL
84274 +_002495_hash ni_gpct_device_construct 5 610 _002495_hash NULL
84275 +_002496_hash options_write 3 47243 _002496_hash NULL
84276 +_002497_hash portcntrs_1_read 3 47253 _002497_hash NULL
84277 +_002498_hash portcntrs_2_read 3 56586 _002498_hash NULL
84278 +_002499_hash portnames_read 3 41958 _002499_hash NULL
84279 +_002500_hash ptc_proc_write 3 12076 _002500_hash NULL
84280 +_002501_hash put_cmsg_compat 4 35937 _002501_hash NULL
84281 +_002502_hash qib_alloc_devdata 2 51819 _002502_hash NULL
84282 +_002503_hash qib_alloc_fast_reg_page_list 2 10507 _002503_hash NULL
84283 +_002504_hash qib_cdev_init 1 34778 _002504_hash NULL
84284 +_002505_hash qib_create_cq 2 27497 _002505_hash NULL
84285 +_002506_hash qib_diag_write 3 62133 _002506_hash NULL
84286 +_002507_hash qib_get_base_info 3 11369 _002507_hash NULL
84287 +_002508_hash qib_resize_cq 2 53090 _002508_hash NULL
84288 +_002509_hash qsfp_1_read 3 21915 _002509_hash NULL
84289 +_002510_hash qsfp_2_read 3 31491 _002510_hash NULL
84290 +_002511_hash queue_reply 3 22416 _002511_hash NULL
84291 +_002512_hash Realloc 2 34961 _002512_hash NULL
84292 +_002513_hash rfc4106_set_key 3 54519 _002513_hash NULL
84293 +_002514_hash rtllib_alloc_txb 1 21687 _002514_hash NULL
84294 +_002515_hash rtllib_wx_set_gen_ie 3 59808 _002515_hash NULL
84295 +_002516_hash rts51x_transfer_data_partial 6 5735 _002516_hash NULL
84296 +_002517_hash sparse_early_usemaps_alloc_node 4 9269 _002517_hash NULL
84297 +_002518_hash split 2 11691 _002518_hash NULL
84298 +_002519_hash stats_read_ul 3 32751 _002519_hash NULL
84299 +_002520_hash store_debug_level 3 35652 _002520_hash NULL
84300 +_002521_hash sys32_ipc 3 7238 _002521_hash NULL
84301 +_002522_hash sys32_rt_sigpending 2 25814 _002522_hash NULL
84302 +_002523_hash tunables_read 3 36385 _002523_hash NULL
84303 +_002524_hash tunables_write 3 59563 _002524_hash NULL
84304 +_002525_hash u32_array_read 3 2219 _002525_hash NULL
84305 +_002526_hash usb_buffer_alloc 2 36276 _002526_hash NULL
84306 +_002527_hash xenbus_file_write 3 6282 _002527_hash NULL
84307 +_002528_hash xpc_kmalloc_cacheline_aligned 1 42895 _002528_hash NULL
84308 +_002529_hash xpc_kzalloc_cacheline_aligned 1 65433 _002529_hash NULL
84309 +_002530_hash xsd_read 3 15653 _002530_hash NULL
84310 +_002531_hash compat_do_readv_writev 4 49102 _002531_hash NULL
84311 +_002532_hash compat_keyctl_instantiate_key_iov 3 57431 _002532_hash NULL
84312 +_002533_hash compat_process_vm_rw 3-5 22254 _002533_hash NULL
84313 +_002535_hash compat_sys_setsockopt 5 3326 _002535_hash NULL
84314 +_002536_hash ipath_cdev_init 1 37752 _002536_hash NULL
84315 +_002537_hash ms_read_multiple_pages 4-5 8052 _002537_hash NULL
84316 +_002539_hash ms_write_multiple_pages 5-6 10362 _002539_hash NULL
84317 +_002541_hash sparse_mem_maps_populate_node 4 12669 _002541_hash &_002004_hash
84318 +_002542_hash vmemmap_alloc_block 1 43245 _002542_hash NULL
84319 +_002543_hash xd_read_multiple_pages 4-5 11422 _002543_hash NULL
84320 +_002545_hash xd_write_multiple_pages 5-6 53633 _002545_hash NULL
84321 +_002546_hash compat_readv 3 30273 _002546_hash NULL
84322 +_002547_hash compat_sys_process_vm_readv 3-5 15374 _002547_hash NULL
84323 +_002549_hash compat_sys_process_vm_writev 3-5 41194 _002549_hash NULL
84324 +_002551_hash compat_writev 3 60063 _002551_hash NULL
84325 +_002552_hash ms_rw_multi_sector 4 7459 _002552_hash NULL
84326 +_002553_hash sparse_early_mem_maps_alloc_node 4 36971 _002553_hash NULL
84327 +_002554_hash vmemmap_alloc_block_buf 1 61126 _002554_hash NULL
84328 +_002555_hash xd_rw 4 49020 _002555_hash NULL
84329 +_002556_hash compat_sys_preadv64 3 24283 _002556_hash NULL
84330 +_002557_hash compat_sys_pwritev64 3 51151 _002557_hash NULL
84331 +_002558_hash compat_sys_readv 3 20911 _002558_hash NULL
84332 +_002559_hash compat_sys_writev 3 5784 _002559_hash NULL
84333 +_002560_hash ms_rw 4 17220 _002560_hash NULL
84334 +_002561_hash compat_sys_preadv 3 583 _002561_hash NULL
84335 +_002562_hash compat_sys_pwritev 3 17886 _002562_hash NULL
84336 +_002563_hash alloc_apertures 1 56561 _002563_hash NULL
84337 +_002564_hash bin_uuid 3 28999 _002564_hash NULL
84338 +_002565_hash __copy_from_user_inatomic_nocache 3 49921 _002565_hash NULL
84339 +_002566_hash do_dmabuf_dirty_sou 7 3017 _002566_hash NULL
84340 +_002567_hash do_surface_dirty_sou 7 39678 _002567_hash NULL
84341 +_002568_hash drm_agp_bind_pages 3 56748 _002568_hash NULL
84342 +_002569_hash drm_calloc_large 1-2 65421 _002569_hash NULL
84343 +_002571_hash drm_fb_helper_init 3-4 19044 _002571_hash NULL
84344 +_002573_hash drm_ht_create 2 18853 _002573_hash NULL
84345 +_002574_hash drm_malloc_ab 1-2 16831 _002574_hash NULL
84346 +_002576_hash drm_mode_crtc_set_gamma_size 2 31881 _002576_hash NULL
84347 +_002577_hash drm_plane_init 6 28731 _002577_hash NULL
84348 +_002578_hash drm_property_create 4 51239 _002578_hash NULL
84349 +_002579_hash drm_property_create_blob 2 7414 _002579_hash NULL
84350 +_002580_hash drm_vblank_init 2 11362 _002580_hash NULL
84351 +_002581_hash drm_vmalloc_dma 1 14550 _002581_hash NULL
84352 +_002582_hash fb_alloc_cmap_gfp 2 20792 _002582_hash NULL
84353 +_002583_hash fbcon_prepare_logo 5 6246 _002583_hash NULL
84354 +_002584_hash fb_read 3 33506 _002584_hash NULL
84355 +_002585_hash fb_write 3 46924 _002585_hash NULL
84356 +_002586_hash framebuffer_alloc 1 59145 _002586_hash NULL
84357 +_002587_hash i915_cache_sharing_read 3 24775 _002587_hash NULL
84358 +_002588_hash i915_cache_sharing_write 3 57961 _002588_hash NULL
84359 +_002589_hash i915_max_freq_read 3 20581 _002589_hash NULL
84360 +_002590_hash i915_max_freq_write 3 11350 _002590_hash NULL
84361 +_002591_hash i915_wedged_read 3 35474 _002591_hash NULL
84362 +_002592_hash i915_wedged_write 3 47771 _002592_hash NULL
84363 +_002593_hash p9_client_read 5 19750 _002593_hash NULL
84364 +_002594_hash probe_kernel_write 3 17481 _002594_hash NULL
84365 +_002595_hash sched_feat_write 3 55202 _002595_hash NULL
84366 +_002596_hash sd_alloc_ctl_entry 1 29708 _002596_hash NULL
84367 +_002597_hash tstats_write 3 60432 _002597_hash &_000009_hash
84368 +_002598_hash ttm_bo_fbdev_io 4 9805 _002598_hash NULL
84369 +_002599_hash ttm_bo_io 5 47000 _002599_hash NULL
84370 +_002600_hash ttm_dma_page_pool_free 2 34135 _002600_hash NULL
84371 +_002601_hash ttm_page_pool_free 2 61661 _002601_hash NULL
84372 +_002602_hash vmw_execbuf_process 5 22885 _002602_hash NULL
84373 +_002603_hash vmw_fifo_reserve 2 12141 _002603_hash NULL
84374 +_002604_hash vmw_kms_present 9 38130 _002604_hash NULL
84375 +_002605_hash vmw_kms_readback 6 5727 _002605_hash NULL
84376 +_002606_hash do_dmabuf_dirty_ldu 6 52241 _002606_hash NULL
84377 +_002607_hash drm_mode_create_tv_properties 2 23122 _002607_hash NULL
84378 +_002608_hash drm_property_create_enum 5 29201 _002608_hash NULL
84379 +_002609_hash fast_user_write 5 20494 _002609_hash NULL
84380 +_002610_hash fb_alloc_cmap 2 6554 _002610_hash NULL
84381 +_002611_hash i915_gem_execbuffer_relocate_slow 7 25355 _002611_hash NULL
84382 +_002612_hash kgdb_hex2mem 3 24755 _002612_hash NULL
84383 +_002613_hash ttm_object_device_init 2 10321 _002613_hash NULL
84384 +_002614_hash ttm_object_file_init 2 27804 _002614_hash NULL
84385 +_002615_hash vmw_cursor_update_image 3-4 16332 _002615_hash NULL
84386 +_002617_hash vmw_gmr2_bind 3 21305 _002617_hash NULL
84387 +_002618_hash vmw_cursor_update_dmabuf 3-4 32045 _002618_hash NULL
84388 +_002620_hash vmw_gmr_bind 3 44130 _002620_hash NULL
84389 +_002621_hash vmw_du_crtc_cursor_set 4-5 28479 _002621_hash NULL
84390 +_002622_hash __module_alloc 1 50004 _002622_hash NULL
84391 +_002623_hash module_alloc_update_bounds_rw 1 63233 _002623_hash NULL
84392 +_002624_hash module_alloc_update_bounds_rx 1 58634 _002624_hash NULL
84393 +_002625_hash acpi_system_write_alarm 3 40205 _002625_hash NULL
84394 +_002626_hash create_table 2 16213 _002626_hash NULL
84395 +_002627_hash mem_read 3 57631 _002627_hash NULL
84396 +_002628_hash mem_write 3 22232 _002628_hash NULL
84397 +_002629_hash proc_fault_inject_read 3 36802 _002629_hash NULL
84398 +_002630_hash proc_fault_inject_write 3 21058 _002630_hash NULL
84399 +_002631_hash v9fs_fid_readn 4 60544 _002631_hash NULL
84400 +_002632_hash v9fs_file_read 3 40858 _002632_hash NULL
84401 +_002633_hash __devres_alloc 2 25598 _002633_hash NULL
84402 +_002634_hash acl_alloc 1 35979 _002634_hash NULL
84403 +_002635_hash acl_alloc_stack_init 1 60630 _002635_hash NULL
84404 +_002636_hash acl_alloc_num 1-2 60778 _002636_hash NULL
84405 diff --git a/tools/gcc/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin.c
84406 new file mode 100644
84407 index 0000000..cc96254
84408 --- /dev/null
84409 +++ b/tools/gcc/size_overflow_plugin.c
84410 @@ -0,0 +1,1204 @@
84411 +/*
84412 + * Copyright 2011, 2012 by Emese Revfy <re.emese@gmail.com>
84413 + * Licensed under the GPL v2, or (at your option) v3
84414 + *
84415 + * Homepage:
84416 + * http://www.grsecurity.net/~ephox/overflow_plugin/
84417 + *
84418 + * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
84419 + * with double integer precision (DImode/TImode for 32/64 bit integer types).
84420 + * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
84421 + *
84422 + * Usage:
84423 + * $ gcc -I`gcc -print-file-name=plugin`/include/c-family -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -ggdb -Wall -W -Wno-missing-field-initializers -o size_overflow_plugin.so size_overflow_plugin.c
84424 + * $ gcc -fplugin=size_overflow_plugin.so test.c -O2
84425 + */
84426 +
84427 +#include "gcc-plugin.h"
84428 +#include "config.h"
84429 +#include "system.h"
84430 +#include "coretypes.h"
84431 +#include "tree.h"
84432 +#include "tree-pass.h"
84433 +#include "intl.h"
84434 +#include "plugin-version.h"
84435 +#include "tm.h"
84436 +#include "toplev.h"
84437 +#include "function.h"
84438 +#include "tree-flow.h"
84439 +#include "plugin.h"
84440 +#include "gimple.h"
84441 +#include "c-common.h"
84442 +#include "diagnostic.h"
84443 +#include "cfgloop.h"
84444 +
84445 +struct size_overflow_hash {
84446 + struct size_overflow_hash *next;
84447 + const char *name;
84448 + unsigned int param;
84449 +};
84450 +
84451 +#include "size_overflow_hash.h"
84452 +
84453 +#define __unused __attribute__((__unused__))
84454 +#define NAME(node) IDENTIFIER_POINTER(DECL_NAME(node))
84455 +#define NAME_LEN(node) IDENTIFIER_LENGTH(DECL_NAME(node))
84456 +#define BEFORE_STMT true
84457 +#define AFTER_STMT false
84458 +#define CREATE_NEW_VAR NULL_TREE
84459 +#define CODES_LIMIT 32
84460 +#define MAX_PARAM 10
84461 +
84462 +#if BUILDING_GCC_VERSION == 4005
84463 +#define DECL_CHAIN(NODE) (TREE_CHAIN(DECL_MINIMAL_CHECK(NODE)))
84464 +#endif
84465 +
84466 +int plugin_is_GPL_compatible;
84467 +void debug_gimple_stmt(gimple gs);
84468 +
84469 +static tree expand(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var);
84470 +static tree signed_size_overflow_type;
84471 +static tree unsigned_size_overflow_type;
84472 +static tree report_size_overflow_decl;
84473 +static tree const_char_ptr_type_node;
84474 +static unsigned int handle_function(void);
84475 +
84476 +static struct plugin_info size_overflow_plugin_info = {
84477 + .version = "20120618beta",
84478 + .help = "no-size-overflow\tturn off size overflow checking\n",
84479 +};
84480 +
84481 +static tree handle_size_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs)
84482 +{
84483 + unsigned int arg_count = type_num_arguments(*node);
84484 +
84485 + for (; args; args = TREE_CHAIN(args)) {
84486 + tree position = TREE_VALUE(args);
84487 + if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_HIGH(position) || TREE_INT_CST_LOW(position) < 1 || TREE_INT_CST_LOW(position) > arg_count ) {
84488 + error("handle_size_overflow_attribute: overflow parameter outside range.");
84489 + *no_add_attrs = true;
84490 + }
84491 + }
84492 + return NULL_TREE;
84493 +}
84494 +
84495 +static struct attribute_spec no_size_overflow_attr = {
84496 + .name = "size_overflow",
84497 + .min_length = 1,
84498 + .max_length = -1,
84499 + .decl_required = false,
84500 + .type_required = true,
84501 + .function_type_required = true,
84502 + .handler = handle_size_overflow_attribute,
84503 +#if BUILDING_GCC_VERSION >= 4007
84504 + .affects_type_identity = false
84505 +#endif
84506 +};
84507 +
84508 +static void register_attributes(void __unused *event_data, void __unused *data)
84509 +{
84510 + register_attribute(&no_size_overflow_attr);
84511 +}
84512 +
84513 +// http://www.team5150.com/~andrew/noncryptohashzoo2~/CrapWow.html
84514 +static unsigned int CrapWow(const char *key, unsigned int len, unsigned int seed)
84515 +{
84516 +#define cwfold( a, b, lo, hi ) { p = (unsigned int)(a) * (unsigned long long)(b); lo ^= (unsigned int)p; hi ^= (unsigned int)(p >> 32); }
84517 +#define cwmixa( in ) { cwfold( in, m, k, h ); }
84518 +#define cwmixb( in ) { cwfold( in, n, h, k ); }
84519 +
84520 + const unsigned int m = 0x57559429;
84521 + const unsigned int n = 0x5052acdb;
84522 + const unsigned int *key4 = (const unsigned int *)key;
84523 + unsigned int h = len;
84524 + unsigned int k = len + seed + n;
84525 + unsigned long long p;
84526 +
84527 + while (len >= 8) {
84528 + cwmixb(key4[0]) cwmixa(key4[1]) key4 += 2;
84529 + len -= 8;
84530 + }
84531 + if (len >= 4) {
84532 + cwmixb(key4[0]) key4 += 1;
84533 + len -= 4;
84534 + }
84535 + if (len)
84536 + cwmixa(key4[0] & ((1 << (len * 8)) - 1 ));
84537 + cwmixb(h ^ (k + n));
84538 + return k ^ h;
84539 +
84540 +#undef cwfold
84541 +#undef cwmixa
84542 +#undef cwmixb
84543 +}
84544 +
84545 +static inline unsigned int get_hash_num(const char *fndecl, const char *tree_codes, unsigned int len, unsigned int seed)
84546 +{
84547 + unsigned int fn = CrapWow(fndecl, strlen(fndecl), seed) & 0xffff;
84548 + unsigned int codes = CrapWow(tree_codes, len, seed) & 0xffff;
84549 + return fn ^ codes;
84550 +}
84551 +
84552 +static inline tree get_original_function_decl(tree fndecl)
84553 +{
84554 + if (DECL_ABSTRACT_ORIGIN(fndecl))
84555 + return DECL_ABSTRACT_ORIGIN(fndecl);
84556 + return fndecl;
84557 +}
84558 +
84559 +static inline gimple get_def_stmt(tree node)
84560 +{
84561 + gcc_assert(TREE_CODE(node) == SSA_NAME);
84562 + return SSA_NAME_DEF_STMT(node);
84563 +}
84564 +
84565 +static unsigned char get_tree_code(tree type)
84566 +{
84567 + switch (TREE_CODE(type)) {
84568 + case ARRAY_TYPE:
84569 + return 0;
84570 + case BOOLEAN_TYPE:
84571 + return 1;
84572 + case ENUMERAL_TYPE:
84573 + return 2;
84574 + case FUNCTION_TYPE:
84575 + return 3;
84576 + case INTEGER_TYPE:
84577 + return 4;
84578 + case POINTER_TYPE:
84579 + return 5;
84580 + case RECORD_TYPE:
84581 + return 6;
84582 + case UNION_TYPE:
84583 + return 7;
84584 + case VOID_TYPE:
84585 + return 8;
84586 + case REAL_TYPE:
84587 + return 9;
84588 + case VECTOR_TYPE:
84589 + return 10;
84590 + case REFERENCE_TYPE:
84591 + return 11;
84592 + default:
84593 + debug_tree(type);
84594 + gcc_unreachable();
84595 + }
84596 +}
84597 +
84598 +static size_t add_type_codes(tree type, unsigned char *tree_codes, size_t len)
84599 +{
84600 + gcc_assert(type != NULL_TREE);
84601 +
84602 + while (type && len < CODES_LIMIT) {
84603 + tree_codes[len] = get_tree_code(type);
84604 + len++;
84605 + type = TREE_TYPE(type);
84606 + }
84607 + return len;
84608 +}
84609 +
84610 +static unsigned int get_function_decl(tree fndecl, unsigned char *tree_codes)
84611 +{
84612 + tree arg, result, type = TREE_TYPE(fndecl);
84613 + enum tree_code code = TREE_CODE(type);
84614 + size_t len = 0;
84615 +
84616 + gcc_assert(code == FUNCTION_TYPE);
84617 +
84618 + arg = TYPE_ARG_TYPES(type);
84619 + // skip builtins __builtin_constant_p
84620 + if (!arg && DECL_BUILT_IN(fndecl))
84621 + return 0;
84622 + gcc_assert(arg != NULL_TREE);
84623 +
84624 + if (TREE_CODE_CLASS(code) == tcc_type)
84625 + result = type;
84626 + else
84627 + result = DECL_RESULT(fndecl);
84628 +
84629 + gcc_assert(result != NULL_TREE);
84630 + len = add_type_codes(TREE_TYPE(result), tree_codes, len);
84631 +
84632 + while (arg && len < CODES_LIMIT) {
84633 + len = add_type_codes(TREE_VALUE(arg), tree_codes, len);
84634 + arg = TREE_CHAIN(arg);
84635 + }
84636 +
84637 + gcc_assert(len != 0);
84638 + return len;
84639 +}
84640 +
84641 +static struct size_overflow_hash *get_function_hash(tree fndecl)
84642 +{
84643 + unsigned int hash;
84644 + struct size_overflow_hash *entry;
84645 + unsigned char tree_codes[CODES_LIMIT];
84646 + size_t len;
84647 + const char *func_name = NAME(fndecl);
84648 +
84649 + len = get_function_decl(fndecl, tree_codes);
84650 + if (len == 0)
84651 + return NULL;
84652 +
84653 + hash = get_hash_num(func_name, (const char*) tree_codes, len, 0);
84654 +
84655 + entry = size_overflow_hash[hash];
84656 + while (entry) {
84657 + if (!strcmp(entry->name, func_name))
84658 + return entry;
84659 + entry = entry->next;
84660 + }
84661 +
84662 + return NULL;
84663 +}
84664 +
84665 +static void check_arg_type(tree var)
84666 +{
84667 + tree type = TREE_TYPE(var);
84668 + enum tree_code code = TREE_CODE(type);
84669 +
84670 + gcc_assert(code == INTEGER_TYPE || code == ENUMERAL_TYPE ||
84671 + (code == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == VOID_TYPE) ||
84672 + (code == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == INTEGER_TYPE));
84673 +}
84674 +
84675 +static int find_arg_number(tree arg, tree func)
84676 +{
84677 + tree var;
84678 + bool match = false;
84679 + unsigned int argnum = 1;
84680 +
84681 + if (TREE_CODE(arg) == SSA_NAME)
84682 + arg = SSA_NAME_VAR(arg);
84683 +
84684 + for (var = DECL_ARGUMENTS(func); var; var = TREE_CHAIN(var)) {
84685 + if (strcmp(NAME(arg), NAME(var))) {
84686 + argnum++;
84687 + continue;
84688 + }
84689 + check_arg_type(var);
84690 +
84691 + match = true;
84692 + break;
84693 + }
84694 + if (!match) {
84695 + warning(0, "find_arg_number: cannot find the %s argument in %s", NAME(arg), NAME(func));
84696 + return 0;
84697 + }
84698 + return argnum;
84699 +}
84700 +
84701 +static void print_missing_msg(tree func, unsigned int argnum)
84702 +{
84703 + unsigned int new_hash;
84704 + size_t len;
84705 + unsigned char tree_codes[CODES_LIMIT];
84706 + location_t loc = DECL_SOURCE_LOCATION(func);
84707 + const char *curfunc = NAME(func);
84708 +
84709 + len = get_function_decl(func, tree_codes);
84710 + new_hash = get_hash_num(curfunc, (const char *) tree_codes, len, 0);
84711 + inform(loc, "Function %s is missing from the size_overflow hash table +%s+%d+%u+", curfunc, curfunc, argnum, new_hash);
84712 +}
84713 +
84714 +static void check_missing_attribute(tree arg)
84715 +{
84716 + tree type, func = get_original_function_decl(current_function_decl);
84717 + unsigned int argnum;
84718 + struct size_overflow_hash *hash;
84719 +
84720 + gcc_assert(TREE_CODE(arg) != COMPONENT_REF);
84721 +
84722 + type = TREE_TYPE(arg);
84723 + // skip function pointers
84724 + if (TREE_CODE(type) == POINTER_TYPE && TREE_CODE(TREE_TYPE(type)) == FUNCTION_TYPE)
84725 + return;
84726 +
84727 + if (lookup_attribute("size_overflow", TYPE_ATTRIBUTES(TREE_TYPE(func))))
84728 + return;
84729 +
84730 + argnum = find_arg_number(arg, func);
84731 + if (argnum == 0)
84732 + return;
84733 +
84734 + hash = get_function_hash(func);
84735 + if (!hash || !(hash->param & (1U << argnum)))
84736 + print_missing_msg(func, argnum);
84737 +}
84738 +
84739 +static tree create_new_var(tree type)
84740 +{
84741 + tree new_var = create_tmp_var(type, "cicus");
84742 +
84743 + add_referenced_var(new_var);
84744 + mark_sym_for_renaming(new_var);
84745 + return new_var;
84746 +}
84747 +
84748 +static bool is_bool(tree node)
84749 +{
84750 + tree type;
84751 +
84752 + if (node == NULL_TREE)
84753 + return false;
84754 +
84755 + type = TREE_TYPE(node);
84756 + if (!INTEGRAL_TYPE_P(type))
84757 + return false;
84758 + if (TREE_CODE(type) == BOOLEAN_TYPE)
84759 + return true;
84760 + if (TYPE_PRECISION(type) == 1)
84761 + return true;
84762 + return false;
84763 +}
84764 +
84765 +static tree cast_a_tree(tree type, tree var)
84766 +{
84767 + gcc_assert(type != NULL_TREE && var != NULL_TREE);
84768 + gcc_assert(fold_convertible_p(type, var));
84769 +
84770 + return fold_convert(type, var);
84771 +}
84772 +
84773 +static tree signed_cast(tree var)
84774 +{
84775 + return cast_a_tree(signed_size_overflow_type, var);
84776 +}
84777 +
84778 +static gimple build_cast_stmt(tree type, tree var, tree new_var, location_t loc)
84779 +{
84780 + gimple assign;
84781 +
84782 + if (new_var == CREATE_NEW_VAR)
84783 + new_var = create_new_var(type);
84784 +
84785 + assign = gimple_build_assign(new_var, cast_a_tree(type, var));
84786 + gimple_set_location(assign, loc);
84787 + gimple_set_lhs(assign, make_ssa_name(new_var, assign));
84788 +
84789 + return assign;
84790 +}
84791 +
84792 +static tree create_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt, tree rhs1, bool before)
84793 +{
84794 + tree oldstmt_rhs1;
84795 + enum tree_code code;
84796 + gimple stmt;
84797 + gimple_stmt_iterator gsi;
84798 +
84799 + if (!*potentionally_overflowed)
84800 + return NULL_TREE;
84801 +
84802 + if (rhs1 == NULL_TREE) {
84803 + debug_gimple_stmt(oldstmt);
84804 + error("create_assign: rhs1 is NULL_TREE");
84805 + gcc_unreachable();
84806 + }
84807 +
84808 + oldstmt_rhs1 = gimple_assign_rhs1(oldstmt);
84809 + code = TREE_CODE(oldstmt_rhs1);
84810 + if (code == PARM_DECL || (code == SSA_NAME && gimple_code(get_def_stmt(oldstmt_rhs1)) == GIMPLE_NOP))
84811 + check_missing_attribute(oldstmt_rhs1);
84812 +
84813 + stmt = build_cast_stmt(signed_size_overflow_type, rhs1, CREATE_NEW_VAR, gimple_location(oldstmt));
84814 + gsi = gsi_for_stmt(oldstmt);
84815 + if (lookup_stmt_eh_lp(oldstmt) != 0) {
84816 + basic_block next_bb, cur_bb;
84817 + edge e;
84818 +
84819 + gcc_assert(before == false);
84820 + gcc_assert(stmt_can_throw_internal(oldstmt));
84821 + gcc_assert(gimple_code(oldstmt) == GIMPLE_CALL);
84822 + gcc_assert(!gsi_end_p(gsi));
84823 +
84824 + cur_bb = gimple_bb(oldstmt);
84825 + next_bb = cur_bb->next_bb;
84826 + e = find_edge(cur_bb, next_bb);
84827 + gcc_assert(e != NULL);
84828 + gcc_assert(e->flags & EDGE_FALLTHRU);
84829 +
84830 + gsi = gsi_after_labels(next_bb);
84831 + gcc_assert(!gsi_end_p(gsi));
84832 + before = true;
84833 + }
84834 + if (before)
84835 + gsi_insert_before(&gsi, stmt, GSI_NEW_STMT);
84836 + else
84837 + gsi_insert_after(&gsi, stmt, GSI_NEW_STMT);
84838 + update_stmt(stmt);
84839 + pointer_set_insert(visited, oldstmt);
84840 + return gimple_get_lhs(stmt);
84841 +}
84842 +
84843 +static tree dup_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt, tree rhs1, tree rhs2, tree __unused rhs3)
84844 +{
84845 + tree new_var, lhs = gimple_get_lhs(oldstmt);
84846 + gimple stmt;
84847 + gimple_stmt_iterator gsi;
84848 +
84849 + if (!*potentionally_overflowed)
84850 + return NULL_TREE;
84851 +
84852 + if (gimple_num_ops(oldstmt) != 4 && rhs1 == NULL_TREE) {
84853 + rhs1 = gimple_assign_rhs1(oldstmt);
84854 + rhs1 = create_assign(visited, potentionally_overflowed, oldstmt, rhs1, BEFORE_STMT);
84855 + }
84856 + if (gimple_num_ops(oldstmt) == 3 && rhs2 == NULL_TREE) {
84857 + rhs2 = gimple_assign_rhs2(oldstmt);
84858 + rhs2 = create_assign(visited, potentionally_overflowed, oldstmt, rhs2, BEFORE_STMT);
84859 + }
84860 +
84861 + stmt = gimple_copy(oldstmt);
84862 + gimple_set_location(stmt, gimple_location(oldstmt));
84863 +
84864 + if (gimple_assign_rhs_code(oldstmt) == WIDEN_MULT_EXPR)
84865 + gimple_assign_set_rhs_code(stmt, MULT_EXPR);
84866 +
84867 + if (is_bool(lhs))
84868 + new_var = SSA_NAME_VAR(lhs);
84869 + else
84870 + new_var = create_new_var(signed_size_overflow_type);
84871 + new_var = make_ssa_name(new_var, stmt);
84872 + gimple_set_lhs(stmt, new_var);
84873 +
84874 + if (rhs1 != NULL_TREE) {
84875 + if (!gimple_assign_cast_p(oldstmt))
84876 + rhs1 = signed_cast(rhs1);
84877 + gimple_assign_set_rhs1(stmt, rhs1);
84878 + }
84879 +
84880 + if (rhs2 != NULL_TREE)
84881 + gimple_assign_set_rhs2(stmt, rhs2);
84882 +#if BUILDING_GCC_VERSION >= 4007
84883 + if (rhs3 != NULL_TREE)
84884 + gimple_assign_set_rhs3(stmt, rhs3);
84885 +#endif
84886 + gimple_set_vuse(stmt, gimple_vuse(oldstmt));
84887 + gimple_set_vdef(stmt, gimple_vdef(oldstmt));
84888 +
84889 + gsi = gsi_for_stmt(oldstmt);
84890 + gsi_insert_after(&gsi, stmt, GSI_SAME_STMT);
84891 + update_stmt(stmt);
84892 + pointer_set_insert(visited, oldstmt);
84893 + return gimple_get_lhs(stmt);
84894 +}
84895 +
84896 +static gimple overflow_create_phi_node(gimple oldstmt, tree var)
84897 +{
84898 + basic_block bb;
84899 + gimple phi;
84900 + gimple_stmt_iterator gsi = gsi_for_stmt(oldstmt);
84901 +
84902 + bb = gsi_bb(gsi);
84903 +
84904 + phi = create_phi_node(var, bb);
84905 + gsi = gsi_last(phi_nodes(bb));
84906 + gsi_remove(&gsi, false);
84907 +
84908 + gsi = gsi_for_stmt(oldstmt);
84909 + gsi_insert_after(&gsi, phi, GSI_NEW_STMT);
84910 + gimple_set_bb(phi, bb);
84911 + return phi;
84912 +}
84913 +
84914 +static basic_block create_a_first_bb(void)
84915 +{
84916 + basic_block first_bb;
84917 +
84918 + first_bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
84919 + if (dom_info_available_p(CDI_DOMINATORS))
84920 + set_immediate_dominator(CDI_DOMINATORS, first_bb, ENTRY_BLOCK_PTR);
84921 + return first_bb;
84922 +}
84923 +
84924 +static gimple cast_old_phi_arg(gimple oldstmt, tree arg, tree new_var, unsigned int i)
84925 +{
84926 + basic_block bb;
84927 + gimple newstmt, def_stmt;
84928 + gimple_stmt_iterator gsi;
84929 +
84930 + newstmt = build_cast_stmt(signed_size_overflow_type, arg, new_var, gimple_location(oldstmt));
84931 + if (TREE_CODE(arg) == SSA_NAME) {
84932 + def_stmt = get_def_stmt(arg);
84933 + if (gimple_code(def_stmt) != GIMPLE_NOP) {
84934 + gsi = gsi_for_stmt(def_stmt);
84935 + gsi_insert_after(&gsi, newstmt, GSI_NEW_STMT);
84936 + return newstmt;
84937 + }
84938 + }
84939 +
84940 + bb = gimple_phi_arg_edge(oldstmt, i)->src;
84941 + if (bb->index == 0)
84942 + bb = create_a_first_bb();
84943 + gsi = gsi_after_labels(bb);
84944 + gsi_insert_before(&gsi, newstmt, GSI_NEW_STMT);
84945 + return newstmt;
84946 +}
84947 +
84948 +static gimple handle_new_phi_arg(tree arg, tree new_var, tree new_rhs)
84949 +{
84950 + gimple newstmt;
84951 + gimple_stmt_iterator gsi;
84952 + void (*gsi_insert)(gimple_stmt_iterator *, gimple, enum gsi_iterator_update);
84953 + gimple def_newstmt = get_def_stmt(new_rhs);
84954 +
84955 + gsi_insert = gsi_insert_after;
84956 + gsi = gsi_for_stmt(def_newstmt);
84957 +
84958 + switch (gimple_code(get_def_stmt(arg))) {
84959 + case GIMPLE_PHI:
84960 + newstmt = gimple_build_assign(new_var, new_rhs);
84961 + gsi = gsi_after_labels(gimple_bb(def_newstmt));
84962 + gsi_insert = gsi_insert_before;
84963 + break;
84964 + case GIMPLE_ASM:
84965 + case GIMPLE_CALL:
84966 + newstmt = gimple_build_assign(new_var, new_rhs);
84967 + break;
84968 + case GIMPLE_ASSIGN:
84969 + newstmt = gimple_build_assign(new_var, gimple_get_lhs(def_newstmt));
84970 + break;
84971 + default:
84972 + /* unknown gimple_code (handle_build_new_phi_arg) */
84973 + gcc_unreachable();
84974 + }
84975 +
84976 + gimple_set_lhs(newstmt, make_ssa_name(new_var, newstmt));
84977 + gsi_insert(&gsi, newstmt, GSI_NEW_STMT);
84978 + update_stmt(newstmt);
84979 + return newstmt;
84980 +}
84981 +
84982 +static tree build_new_phi_arg(struct pointer_set_t *visited, bool *potentionally_overflowed, tree arg, tree new_var)
84983 +{
84984 + gimple newstmt;
84985 + tree new_rhs;
84986 +
84987 + new_rhs = expand(visited, potentionally_overflowed, arg);
84988 +
84989 + if (new_rhs == NULL_TREE)
84990 + return NULL_TREE;
84991 +
84992 + newstmt = handle_new_phi_arg(arg, new_var, new_rhs);
84993 + return gimple_get_lhs(newstmt);
84994 +}
84995 +
84996 +static tree build_new_phi(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple oldstmt)
84997 +{
84998 + gimple phi;
84999 + tree new_var = create_new_var(signed_size_overflow_type);
85000 + unsigned int i, n = gimple_phi_num_args(oldstmt);
85001 +
85002 + pointer_set_insert(visited, oldstmt);
85003 + phi = overflow_create_phi_node(oldstmt, new_var);
85004 + for (i = 0; i < n; i++) {
85005 + tree arg, lhs;
85006 +
85007 + arg = gimple_phi_arg_def(oldstmt, i);
85008 + if (is_gimple_constant(arg))
85009 + arg = signed_cast(arg);
85010 + lhs = build_new_phi_arg(visited, potentionally_overflowed, arg, new_var);
85011 + if (lhs == NULL_TREE)
85012 + lhs = gimple_get_lhs(cast_old_phi_arg(oldstmt, arg, new_var, i));
85013 + add_phi_arg(phi, lhs, gimple_phi_arg_edge(oldstmt, i), gimple_location(oldstmt));
85014 + }
85015 +
85016 + update_stmt(phi);
85017 + return gimple_phi_result(phi);
85018 +}
85019 +
85020 +static tree handle_unary_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
85021 +{
85022 + gimple def_stmt = get_def_stmt(var);
85023 + tree new_rhs1, rhs1 = gimple_assign_rhs1(def_stmt);
85024 +
85025 + *potentionally_overflowed = true;
85026 + new_rhs1 = expand(visited, potentionally_overflowed, rhs1);
85027 + if (new_rhs1 == NULL_TREE) {
85028 + if (TREE_CODE(TREE_TYPE(rhs1)) == POINTER_TYPE)
85029 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
85030 + else
85031 + return create_assign(visited, potentionally_overflowed, def_stmt, rhs1, AFTER_STMT);
85032 + }
85033 + return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, NULL_TREE, NULL_TREE);
85034 +}
85035 +
85036 +static tree handle_unary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
85037 +{
85038 + gimple def_stmt = get_def_stmt(var);
85039 + tree rhs1 = gimple_assign_rhs1(def_stmt);
85040 +
85041 + if (is_gimple_constant(rhs1))
85042 + return dup_assign(visited, potentionally_overflowed, def_stmt, signed_cast(rhs1), NULL_TREE, NULL_TREE);
85043 +
85044 + gcc_assert(TREE_CODE(rhs1) != COND_EXPR);
85045 + switch (TREE_CODE(rhs1)) {
85046 + case SSA_NAME:
85047 + return handle_unary_rhs(visited, potentionally_overflowed, var);
85048 +
85049 + case ARRAY_REF:
85050 + case BIT_FIELD_REF:
85051 + case ADDR_EXPR:
85052 + case COMPONENT_REF:
85053 + case INDIRECT_REF:
85054 +#if BUILDING_GCC_VERSION >= 4006
85055 + case MEM_REF:
85056 +#endif
85057 + case PARM_DECL:
85058 + case TARGET_MEM_REF:
85059 + case VAR_DECL:
85060 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
85061 +
85062 + default:
85063 + debug_gimple_stmt(def_stmt);
85064 + debug_tree(rhs1);
85065 + gcc_unreachable();
85066 + }
85067 +}
85068 +
85069 +static void insert_cond(basic_block cond_bb, tree arg, enum tree_code cond_code, tree type_value)
85070 +{
85071 + gimple cond_stmt;
85072 + gimple_stmt_iterator gsi = gsi_last_bb(cond_bb);
85073 +
85074 + cond_stmt = gimple_build_cond(cond_code, arg, type_value, NULL_TREE, NULL_TREE);
85075 + gsi_insert_after(&gsi, cond_stmt, GSI_CONTINUE_LINKING);
85076 + update_stmt(cond_stmt);
85077 +}
85078 +
85079 +static tree create_string_param(tree string)
85080 +{
85081 + tree i_type, a_type;
85082 + int length = TREE_STRING_LENGTH(string);
85083 +
85084 + gcc_assert(length > 0);
85085 +
85086 + i_type = build_index_type(build_int_cst(NULL_TREE, length - 1));
85087 + a_type = build_array_type(char_type_node, i_type);
85088 +
85089 + TREE_TYPE(string) = a_type;
85090 + TREE_CONSTANT(string) = 1;
85091 + TREE_READONLY(string) = 1;
85092 +
85093 + return build1(ADDR_EXPR, ptr_type_node, string);
85094 +}
85095 +
85096 +static void insert_cond_result(basic_block bb_true, gimple stmt, tree arg)
85097 +{
85098 + gimple func_stmt, def_stmt;
85099 + tree current_func, loc_file, loc_line;
85100 + expanded_location xloc;
85101 + gimple_stmt_iterator gsi = gsi_start_bb(bb_true);
85102 +
85103 + def_stmt = get_def_stmt(arg);
85104 + xloc = expand_location(gimple_location(def_stmt));
85105 +
85106 + if (!gimple_has_location(def_stmt)) {
85107 + xloc = expand_location(gimple_location(stmt));
85108 + if (!gimple_has_location(stmt))
85109 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
85110 + }
85111 +
85112 + loc_line = build_int_cstu(unsigned_type_node, xloc.line);
85113 +
85114 + loc_file = build_string(strlen(xloc.file) + 1, xloc.file);
85115 + loc_file = create_string_param(loc_file);
85116 +
85117 + current_func = build_string(NAME_LEN(current_function_decl) + 1, NAME(current_function_decl));
85118 + current_func = create_string_param(current_func);
85119 +
85120 + // void report_size_overflow(const char *file, unsigned int line, const char *func)
85121 + func_stmt = gimple_build_call(report_size_overflow_decl, 3, loc_file, loc_line, current_func);
85122 +
85123 + gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING);
85124 +}
85125 +
85126 +static void __unused print_the_code_insertions(gimple stmt)
85127 +{
85128 + location_t loc = gimple_location(stmt);
85129 +
85130 + inform(loc, "Integer size_overflow check applied here.");
85131 +}
85132 +
85133 +static void insert_check_size_overflow(gimple stmt, enum tree_code cond_code, tree arg, tree type_value)
85134 +{
85135 + basic_block cond_bb, join_bb, bb_true;
85136 + edge e;
85137 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
85138 +
85139 + cond_bb = gimple_bb(stmt);
85140 + gsi_prev(&gsi);
85141 + if (gsi_end_p(gsi))
85142 + e = split_block_after_labels(cond_bb);
85143 + else
85144 + e = split_block(cond_bb, gsi_stmt(gsi));
85145 + cond_bb = e->src;
85146 + join_bb = e->dest;
85147 + e->flags = EDGE_FALSE_VALUE;
85148 + e->probability = REG_BR_PROB_BASE;
85149 +
85150 + bb_true = create_empty_bb(cond_bb);
85151 + make_edge(cond_bb, bb_true, EDGE_TRUE_VALUE);
85152 + make_edge(cond_bb, join_bb, EDGE_FALSE_VALUE);
85153 + make_edge(bb_true, join_bb, EDGE_FALLTHRU);
85154 +
85155 + if (dom_info_available_p(CDI_DOMINATORS)) {
85156 + set_immediate_dominator(CDI_DOMINATORS, bb_true, cond_bb);
85157 + set_immediate_dominator(CDI_DOMINATORS, join_bb, cond_bb);
85158 + }
85159 +
85160 + if (current_loops != NULL) {
85161 + gcc_assert(cond_bb->loop_father == join_bb->loop_father);
85162 + add_bb_to_loop(bb_true, cond_bb->loop_father);
85163 + }
85164 +
85165 + insert_cond(cond_bb, arg, cond_code, type_value);
85166 + insert_cond_result(bb_true, stmt, arg);
85167 +
85168 +// print_the_code_insertions(stmt);
85169 +}
85170 +
85171 +static gimple cast_to_unsigned_size_overflow_type(gimple stmt, tree cast_rhs)
85172 +{
85173 + gimple ucast_stmt;
85174 + gimple_stmt_iterator gsi;
85175 + location_t loc = gimple_location(stmt);
85176 +
85177 + ucast_stmt = build_cast_stmt(unsigned_size_overflow_type, cast_rhs, CREATE_NEW_VAR, loc);
85178 + gsi = gsi_for_stmt(stmt);
85179 + gsi_insert_before(&gsi, ucast_stmt, GSI_SAME_STMT);
85180 + return ucast_stmt;
85181 +}
85182 +
85183 +static void check_size_overflow(gimple stmt, tree cast_rhs, tree rhs, bool *potentionally_overflowed)
85184 +{
85185 + tree type_max, type_min, rhs_type = TREE_TYPE(rhs);
85186 + gimple ucast_stmt;
85187 +
85188 + if (!*potentionally_overflowed)
85189 + return;
85190 +
85191 + if (TYPE_UNSIGNED(rhs_type)) {
85192 + ucast_stmt = cast_to_unsigned_size_overflow_type(stmt, cast_rhs);
85193 + type_max = cast_a_tree(unsigned_size_overflow_type, TYPE_MAX_VALUE(rhs_type));
85194 + insert_check_size_overflow(stmt, GT_EXPR, gimple_get_lhs(ucast_stmt), type_max);
85195 + } else {
85196 + type_max = signed_cast(TYPE_MAX_VALUE(rhs_type));
85197 + insert_check_size_overflow(stmt, GT_EXPR, cast_rhs, type_max);
85198 +
85199 + type_min = signed_cast(TYPE_MIN_VALUE(rhs_type));
85200 + insert_check_size_overflow(stmt, LT_EXPR, cast_rhs, type_min);
85201 + }
85202 +}
85203 +
85204 +static tree change_assign_rhs(gimple stmt, tree orig_rhs, tree new_rhs)
85205 +{
85206 + gimple assign;
85207 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
85208 + tree origtype = TREE_TYPE(orig_rhs);
85209 +
85210 + gcc_assert(gimple_code(stmt) == GIMPLE_ASSIGN);
85211 +
85212 + assign = build_cast_stmt(origtype, new_rhs, CREATE_NEW_VAR, gimple_location(stmt));
85213 + gsi_insert_before(&gsi, assign, GSI_SAME_STMT);
85214 + update_stmt(assign);
85215 + return gimple_get_lhs(assign);
85216 +}
85217 +
85218 +static tree handle_const_assign(struct pointer_set_t *visited, bool *potentionally_overflowed, gimple def_stmt, tree var, tree orig_rhs, tree var_rhs, tree new_rhs1, tree new_rhs2, void (*gimple_assign_set_rhs)(gimple, tree))
85219 +{
85220 + tree new_rhs;
85221 +
85222 + if (gimple_assign_rhs_code(def_stmt) == MIN_EXPR)
85223 + return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, NULL_TREE);
85224 +
85225 + if (var_rhs == NULL_TREE)
85226 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
85227 +
85228 + new_rhs = change_assign_rhs(def_stmt, orig_rhs, var_rhs);
85229 + gimple_assign_set_rhs(def_stmt, new_rhs);
85230 + update_stmt(def_stmt);
85231 +
85232 + check_size_overflow(def_stmt, var_rhs, orig_rhs, potentionally_overflowed);
85233 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
85234 +}
85235 +
85236 +static tree handle_binary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
85237 +{
85238 + tree rhs1, rhs2;
85239 + gimple def_stmt = get_def_stmt(var);
85240 + tree new_rhs1 = NULL_TREE;
85241 + tree new_rhs2 = NULL_TREE;
85242 +
85243 + rhs1 = gimple_assign_rhs1(def_stmt);
85244 + rhs2 = gimple_assign_rhs2(def_stmt);
85245 +
85246 + /* no DImode/TImode division in the 32/64 bit kernel */
85247 + switch (gimple_assign_rhs_code(def_stmt)) {
85248 + case RDIV_EXPR:
85249 + case TRUNC_DIV_EXPR:
85250 + case CEIL_DIV_EXPR:
85251 + case FLOOR_DIV_EXPR:
85252 + case ROUND_DIV_EXPR:
85253 + case TRUNC_MOD_EXPR:
85254 + case CEIL_MOD_EXPR:
85255 + case FLOOR_MOD_EXPR:
85256 + case ROUND_MOD_EXPR:
85257 + case EXACT_DIV_EXPR:
85258 + case POINTER_PLUS_EXPR:
85259 + case BIT_AND_EXPR:
85260 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
85261 + default:
85262 + break;
85263 + }
85264 +
85265 + *potentionally_overflowed = true;
85266 +
85267 + if (TREE_CODE(rhs1) == SSA_NAME)
85268 + new_rhs1 = expand(visited, potentionally_overflowed, rhs1);
85269 + if (TREE_CODE(rhs2) == SSA_NAME)
85270 + new_rhs2 = expand(visited, potentionally_overflowed, rhs2);
85271 +
85272 + if (is_gimple_constant(rhs2))
85273 + return handle_const_assign(visited, potentionally_overflowed, def_stmt, var, rhs1, new_rhs1, new_rhs1, signed_cast(rhs2), &gimple_assign_set_rhs1);
85274 +
85275 + if (is_gimple_constant(rhs1))
85276 + return handle_const_assign(visited, potentionally_overflowed, def_stmt, var, rhs2, new_rhs2, signed_cast(rhs1), new_rhs2, &gimple_assign_set_rhs2);
85277 +
85278 + return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, NULL_TREE);
85279 +}
85280 +
85281 +#if BUILDING_GCC_VERSION >= 4007
85282 +static tree get_new_rhs(struct pointer_set_t *visited, bool *potentionally_overflowed, tree rhs)
85283 +{
85284 + if (is_gimple_constant(rhs))
85285 + return signed_cast(rhs);
85286 + if (TREE_CODE(rhs) != SSA_NAME)
85287 + return NULL_TREE;
85288 + return expand(visited, potentionally_overflowed, rhs);
85289 +}
85290 +
85291 +static tree handle_ternary_ops(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
85292 +{
85293 + tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3;
85294 + gimple def_stmt = get_def_stmt(var);
85295 +
85296 + *potentionally_overflowed = true;
85297 +
85298 + rhs1 = gimple_assign_rhs1(def_stmt);
85299 + rhs2 = gimple_assign_rhs2(def_stmt);
85300 + rhs3 = gimple_assign_rhs3(def_stmt);
85301 + new_rhs1 = get_new_rhs(visited, potentionally_overflowed, rhs1);
85302 + new_rhs2 = get_new_rhs(visited, potentionally_overflowed, rhs2);
85303 + new_rhs3 = get_new_rhs(visited, potentionally_overflowed, rhs3);
85304 +
85305 + if (new_rhs1 == NULL_TREE && new_rhs2 != NULL_TREE && new_rhs3 != NULL_TREE)
85306 + return dup_assign(visited, potentionally_overflowed, def_stmt, new_rhs1, new_rhs2, new_rhs3);
85307 + error("handle_ternary_ops: unknown rhs");
85308 + gcc_unreachable();
85309 +}
85310 +#endif
85311 +
85312 +static void set_size_overflow_type(tree node)
85313 +{
85314 + switch (TYPE_MODE(TREE_TYPE(node))) {
85315 + case SImode:
85316 + signed_size_overflow_type = intDI_type_node;
85317 + unsigned_size_overflow_type = unsigned_intDI_type_node;
85318 + break;
85319 + case DImode:
85320 + if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode)) {
85321 + signed_size_overflow_type = intDI_type_node;
85322 + unsigned_size_overflow_type = unsigned_intDI_type_node;
85323 + } else {
85324 + signed_size_overflow_type = intTI_type_node;
85325 + unsigned_size_overflow_type = unsigned_intTI_type_node;
85326 + }
85327 + break;
85328 + default:
85329 + error("set_size_overflow_type: unsupported gcc configuration.");
85330 + gcc_unreachable();
85331 + }
85332 +}
85333 +
85334 +static tree expand_visited(gimple def_stmt)
85335 +{
85336 + gimple tmp;
85337 + gimple_stmt_iterator gsi = gsi_for_stmt(def_stmt);
85338 +
85339 + gsi_next(&gsi);
85340 + tmp = gsi_stmt(gsi);
85341 + switch (gimple_code(tmp)) {
85342 + case GIMPLE_ASSIGN:
85343 + return gimple_get_lhs(tmp);
85344 + case GIMPLE_PHI:
85345 + return gimple_phi_result(tmp);
85346 + case GIMPLE_CALL:
85347 + return gimple_call_lhs(tmp);
85348 + default:
85349 + return NULL_TREE;
85350 + }
85351 +}
85352 +
85353 +static tree expand(struct pointer_set_t *visited, bool *potentionally_overflowed, tree var)
85354 +{
85355 + gimple def_stmt;
85356 + enum tree_code code = TREE_CODE(TREE_TYPE(var));
85357 +
85358 + if (is_gimple_constant(var))
85359 + return NULL_TREE;
85360 +
85361 + if (TREE_CODE(var) == ADDR_EXPR)
85362 + return NULL_TREE;
85363 +
85364 + gcc_assert(code == INTEGER_TYPE || code == POINTER_TYPE || code == BOOLEAN_TYPE || code == ENUMERAL_TYPE);
85365 + if (code != INTEGER_TYPE)
85366 + return NULL_TREE;
85367 +
85368 + if (SSA_NAME_IS_DEFAULT_DEF(var)) {
85369 + check_missing_attribute(var);
85370 + return NULL_TREE;
85371 + }
85372 +
85373 + def_stmt = get_def_stmt(var);
85374 +
85375 + if (!def_stmt)
85376 + return NULL_TREE;
85377 +
85378 + if (pointer_set_contains(visited, def_stmt))
85379 + return expand_visited(def_stmt);
85380 +
85381 + switch (gimple_code(def_stmt)) {
85382 + case GIMPLE_NOP:
85383 + check_missing_attribute(var);
85384 + return NULL_TREE;
85385 + case GIMPLE_PHI:
85386 + return build_new_phi(visited, potentionally_overflowed, def_stmt);
85387 + case GIMPLE_CALL:
85388 + case GIMPLE_ASM:
85389 + return create_assign(visited, potentionally_overflowed, def_stmt, var, AFTER_STMT);
85390 + case GIMPLE_ASSIGN:
85391 + switch (gimple_num_ops(def_stmt)) {
85392 + case 2:
85393 + return handle_unary_ops(visited, potentionally_overflowed, var);
85394 + case 3:
85395 + return handle_binary_ops(visited, potentionally_overflowed, var);
85396 +#if BUILDING_GCC_VERSION >= 4007
85397 + case 4:
85398 + return handle_ternary_ops(visited, potentionally_overflowed, var);
85399 +#endif
85400 + }
85401 + default:
85402 + debug_gimple_stmt(def_stmt);
85403 + error("expand: unknown gimple code");
85404 + gcc_unreachable();
85405 + }
85406 +}
85407 +
85408 +static void change_function_arg(gimple stmt, tree origarg, unsigned int argnum, tree newarg)
85409 +{
85410 + gimple assign;
85411 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
85412 + tree origtype = TREE_TYPE(origarg);
85413 +
85414 + gcc_assert(gimple_code(stmt) == GIMPLE_CALL);
85415 +
85416 + assign = build_cast_stmt(origtype, newarg, CREATE_NEW_VAR, gimple_location(stmt));
85417 + gsi_insert_before(&gsi, assign, GSI_SAME_STMT);
85418 + update_stmt(assign);
85419 +
85420 + gimple_call_set_arg(stmt, argnum, gimple_get_lhs(assign));
85421 + update_stmt(stmt);
85422 +}
85423 +
85424 +static tree get_function_arg(unsigned int argnum, gimple stmt, tree fndecl)
85425 +{
85426 + const char *origid;
85427 + tree arg, origarg;
85428 +
85429 + if (!DECL_ABSTRACT_ORIGIN(fndecl)) {
85430 + gcc_assert(gimple_call_num_args(stmt) > argnum);
85431 + return gimple_call_arg(stmt, argnum);
85432 + }
85433 +
85434 + origarg = DECL_ARGUMENTS(DECL_ABSTRACT_ORIGIN(fndecl));
85435 + while (origarg && argnum) {
85436 + argnum--;
85437 + origarg = TREE_CHAIN(origarg);
85438 + }
85439 +
85440 + gcc_assert(argnum == 0);
85441 +
85442 + gcc_assert(origarg != NULL_TREE);
85443 + origid = NAME(origarg);
85444 + for (arg = DECL_ARGUMENTS(fndecl); arg; arg = TREE_CHAIN(arg)) {
85445 + if (!strcmp(origid, NAME(arg)))
85446 + return arg;
85447 + }
85448 + return NULL_TREE;
85449 +}
85450 +
85451 +static void handle_function_arg(gimple stmt, tree fndecl, unsigned int argnum)
85452 +{
85453 + struct pointer_set_t *visited;
85454 + tree arg, newarg;
85455 + bool potentionally_overflowed;
85456 +
85457 + arg = get_function_arg(argnum, stmt, fndecl);
85458 + if (arg == NULL_TREE)
85459 + return;
85460 +
85461 + if (is_gimple_constant(arg))
85462 + return;
85463 + if (TREE_CODE(arg) != SSA_NAME)
85464 + return;
85465 +
85466 + check_arg_type(arg);
85467 +
85468 + set_size_overflow_type(arg);
85469 +
85470 + visited = pointer_set_create();
85471 + potentionally_overflowed = false;
85472 + newarg = expand(visited, &potentionally_overflowed, arg);
85473 + pointer_set_destroy(visited);
85474 +
85475 + if (newarg == NULL_TREE || !potentionally_overflowed)
85476 + return;
85477 +
85478 + change_function_arg(stmt, arg, argnum, newarg);
85479 +
85480 + check_size_overflow(stmt, newarg, arg, &potentionally_overflowed);
85481 +}
85482 +
85483 +static void handle_function_by_attribute(gimple stmt, tree attr, tree fndecl)
85484 +{
85485 + tree p = TREE_VALUE(attr);
85486 + do {
85487 + handle_function_arg(stmt, fndecl, TREE_INT_CST_LOW(TREE_VALUE(p))-1);
85488 + p = TREE_CHAIN(p);
85489 + } while (p);
85490 +}
85491 +
85492 +static void handle_function_by_hash(gimple stmt, tree fndecl)
85493 +{
85494 + tree orig_fndecl;
85495 + unsigned int num;
85496 + struct size_overflow_hash *hash;
85497 +
85498 + orig_fndecl = get_original_function_decl(fndecl);
85499 + hash = get_function_hash(orig_fndecl);
85500 + if (!hash)
85501 + return;
85502 +
85503 + for (num = 1; num <= MAX_PARAM; num++)
85504 + if (hash->param & (1U << num))
85505 + handle_function_arg(stmt, fndecl, num - 1);
85506 +}
85507 +
85508 +static unsigned int handle_function(void)
85509 +{
85510 + basic_block bb = ENTRY_BLOCK_PTR->next_bb;
85511 + int saved_last_basic_block = last_basic_block;
85512 +
85513 + do {
85514 + gimple_stmt_iterator gsi;
85515 + basic_block next = bb->next_bb;
85516 +
85517 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
85518 + tree fndecl, attr;
85519 + gimple stmt = gsi_stmt(gsi);
85520 +
85521 + if (!(is_gimple_call(stmt)))
85522 + continue;
85523 + fndecl = gimple_call_fndecl(stmt);
85524 + if (fndecl == NULL_TREE)
85525 + continue;
85526 + if (gimple_call_num_args(stmt) == 0)
85527 + continue;
85528 + attr = lookup_attribute("size_overflow", TYPE_ATTRIBUTES(TREE_TYPE(fndecl)));
85529 + if (!attr || !TREE_VALUE(attr))
85530 + handle_function_by_hash(stmt, fndecl);
85531 + else
85532 + handle_function_by_attribute(stmt, attr, fndecl);
85533 + gsi = gsi_for_stmt(stmt);
85534 + }
85535 + bb = next;
85536 + } while (bb && bb->index <= saved_last_basic_block);
85537 + return 0;
85538 +}
85539 +
85540 +static struct gimple_opt_pass size_overflow_pass = {
85541 + .pass = {
85542 + .type = GIMPLE_PASS,
85543 + .name = "size_overflow",
85544 + .gate = NULL,
85545 + .execute = handle_function,
85546 + .sub = NULL,
85547 + .next = NULL,
85548 + .static_pass_number = 0,
85549 + .tv_id = TV_NONE,
85550 + .properties_required = PROP_cfg | PROP_referenced_vars,
85551 + .properties_provided = 0,
85552 + .properties_destroyed = 0,
85553 + .todo_flags_start = 0,
85554 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi | TODO_cleanup_cfg | TODO_ggc_collect | TODO_verify_flow
85555 + }
85556 +};
85557 +
85558 +static void start_unit_callback(void __unused *gcc_data, void __unused *user_data)
85559 +{
85560 + tree fntype;
85561 +
85562 + const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1, 0));
85563 +
85564 + // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func)
85565 + fntype = build_function_type_list(void_type_node,
85566 + const_char_ptr_type_node,
85567 + unsigned_type_node,
85568 + const_char_ptr_type_node,
85569 + NULL_TREE);
85570 + report_size_overflow_decl = build_fn_decl("report_size_overflow", fntype);
85571 +
85572 + DECL_ASSEMBLER_NAME(report_size_overflow_decl);
85573 + TREE_PUBLIC(report_size_overflow_decl) = 1;
85574 + DECL_EXTERNAL(report_size_overflow_decl) = 1;
85575 + DECL_ARTIFICIAL(report_size_overflow_decl) = 1;
85576 +}
85577 +
85578 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
85579 +{
85580 + int i;
85581 + const char * const plugin_name = plugin_info->base_name;
85582 + const int argc = plugin_info->argc;
85583 + const struct plugin_argument * const argv = plugin_info->argv;
85584 + bool enable = true;
85585 +
85586 + struct register_pass_info size_overflow_pass_info = {
85587 + .pass = &size_overflow_pass.pass,
85588 + .reference_pass_name = "ssa",
85589 + .ref_pass_instance_number = 1,
85590 + .pos_op = PASS_POS_INSERT_AFTER
85591 + };
85592 +
85593 + if (!plugin_default_version_check(version, &gcc_version)) {
85594 + error(G_("incompatible gcc/plugin versions"));
85595 + return 1;
85596 + }
85597 +
85598 + for (i = 0; i < argc; ++i) {
85599 + if (!strcmp(argv[i].key, "no-size-overflow")) {
85600 + enable = false;
85601 + continue;
85602 + }
85603 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
85604 + }
85605 +
85606 + register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info);
85607 + if (enable) {
85608 + register_callback ("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
85609 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &size_overflow_pass_info);
85610 + }
85611 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
85612 +
85613 + return 0;
85614 +}
85615 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
85616 new file mode 100644
85617 index 0000000..38d2014
85618 --- /dev/null
85619 +++ b/tools/gcc/stackleak_plugin.c
85620 @@ -0,0 +1,313 @@
85621 +/*
85622 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
85623 + * Licensed under the GPL v2
85624 + *
85625 + * Note: the choice of the license means that the compilation process is
85626 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
85627 + * but for the kernel it doesn't matter since it doesn't link against
85628 + * any of the gcc libraries
85629 + *
85630 + * gcc plugin to help implement various PaX features
85631 + *
85632 + * - track lowest stack pointer
85633 + *
85634 + * TODO:
85635 + * - initialize all local variables
85636 + *
85637 + * BUGS:
85638 + * - none known
85639 + */
85640 +#include "gcc-plugin.h"
85641 +#include "config.h"
85642 +#include "system.h"
85643 +#include "coretypes.h"
85644 +#include "tree.h"
85645 +#include "tree-pass.h"
85646 +#include "flags.h"
85647 +#include "intl.h"
85648 +#include "toplev.h"
85649 +#include "plugin.h"
85650 +//#include "expr.h" where are you...
85651 +#include "diagnostic.h"
85652 +#include "plugin-version.h"
85653 +#include "tm.h"
85654 +#include "function.h"
85655 +#include "basic-block.h"
85656 +#include "gimple.h"
85657 +#include "rtl.h"
85658 +#include "emit-rtl.h"
85659 +
85660 +extern void print_gimple_stmt(FILE *, gimple, int, int);
85661 +
85662 +int plugin_is_GPL_compatible;
85663 +
85664 +static int track_frame_size = -1;
85665 +static const char track_function[] = "pax_track_stack";
85666 +static const char check_function[] = "pax_check_alloca";
85667 +static bool init_locals;
85668 +
85669 +static struct plugin_info stackleak_plugin_info = {
85670 + .version = "201203140940",
85671 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
85672 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
85673 +};
85674 +
85675 +static bool gate_stackleak_track_stack(void);
85676 +static unsigned int execute_stackleak_tree_instrument(void);
85677 +static unsigned int execute_stackleak_final(void);
85678 +
85679 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
85680 + .pass = {
85681 + .type = GIMPLE_PASS,
85682 + .name = "stackleak_tree_instrument",
85683 + .gate = gate_stackleak_track_stack,
85684 + .execute = execute_stackleak_tree_instrument,
85685 + .sub = NULL,
85686 + .next = NULL,
85687 + .static_pass_number = 0,
85688 + .tv_id = TV_NONE,
85689 + .properties_required = PROP_gimple_leh | PROP_cfg,
85690 + .properties_provided = 0,
85691 + .properties_destroyed = 0,
85692 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
85693 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
85694 + }
85695 +};
85696 +
85697 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
85698 + .pass = {
85699 + .type = RTL_PASS,
85700 + .name = "stackleak_final",
85701 + .gate = gate_stackleak_track_stack,
85702 + .execute = execute_stackleak_final,
85703 + .sub = NULL,
85704 + .next = NULL,
85705 + .static_pass_number = 0,
85706 + .tv_id = TV_NONE,
85707 + .properties_required = 0,
85708 + .properties_provided = 0,
85709 + .properties_destroyed = 0,
85710 + .todo_flags_start = 0,
85711 + .todo_flags_finish = TODO_dump_func
85712 + }
85713 +};
85714 +
85715 +static bool gate_stackleak_track_stack(void)
85716 +{
85717 + return track_frame_size >= 0;
85718 +}
85719 +
85720 +static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
85721 +{
85722 + gimple check_alloca;
85723 + tree fntype, fndecl, alloca_size;
85724 +
85725 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
85726 + fndecl = build_fn_decl(check_function, fntype);
85727 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
85728 +
85729 + // insert call to void pax_check_alloca(unsigned long size)
85730 + alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
85731 + check_alloca = gimple_build_call(fndecl, 1, alloca_size);
85732 + gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
85733 +}
85734 +
85735 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
85736 +{
85737 + gimple track_stack;
85738 + tree fntype, fndecl;
85739 +
85740 + fntype = build_function_type_list(void_type_node, NULL_TREE);
85741 + fndecl = build_fn_decl(track_function, fntype);
85742 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
85743 +
85744 + // insert call to void pax_track_stack(void)
85745 + track_stack = gimple_build_call(fndecl, 0);
85746 + gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
85747 +}
85748 +
85749 +#if BUILDING_GCC_VERSION == 4005
85750 +static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
85751 +{
85752 + tree fndecl;
85753 +
85754 + if (!is_gimple_call(stmt))
85755 + return false;
85756 + fndecl = gimple_call_fndecl(stmt);
85757 + if (!fndecl)
85758 + return false;
85759 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
85760 + return false;
85761 +// print_node(stderr, "pax", fndecl, 4);
85762 + return DECL_FUNCTION_CODE(fndecl) == code;
85763 +}
85764 +#endif
85765 +
85766 +static bool is_alloca(gimple stmt)
85767 +{
85768 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
85769 + return true;
85770 +
85771 +#if BUILDING_GCC_VERSION >= 4007
85772 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
85773 + return true;
85774 +#endif
85775 +
85776 + return false;
85777 +}
85778 +
85779 +static unsigned int execute_stackleak_tree_instrument(void)
85780 +{
85781 + basic_block bb, entry_bb;
85782 + bool prologue_instrumented = false, is_leaf = true;
85783 +
85784 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
85785 +
85786 + // 1. loop through BBs and GIMPLE statements
85787 + FOR_EACH_BB(bb) {
85788 + gimple_stmt_iterator gsi;
85789 +
85790 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
85791 + gimple stmt;
85792 +
85793 + stmt = gsi_stmt(gsi);
85794 +
85795 + if (is_gimple_call(stmt))
85796 + is_leaf = false;
85797 +
85798 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
85799 + if (!is_alloca(stmt))
85800 + continue;
85801 +
85802 + // 2. insert stack overflow check before each __builtin_alloca call
85803 + stackleak_check_alloca(&gsi);
85804 +
85805 + // 3. insert track call after each __builtin_alloca call
85806 + stackleak_add_instrumentation(&gsi);
85807 + if (bb == entry_bb)
85808 + prologue_instrumented = true;
85809 + }
85810 + }
85811 +
85812 + // special cases for some bad linux code: taking the address of static inline functions will materialize them
85813 + // but we mustn't instrument some of them as the resulting stack alignment required by the function call ABI
85814 + // will break other assumptions regarding the expected (but not otherwise enforced) register clobbering ABI.
85815 + // case in point: native_save_fl on amd64 when optimized for size clobbers rdx if it were instrumented here.
85816 + if (is_leaf && !TREE_PUBLIC(current_function_decl) && DECL_DECLARED_INLINE_P(current_function_decl))
85817 + return 0;
85818 + if (is_leaf && !strncmp(IDENTIFIER_POINTER(DECL_NAME(current_function_decl)), "_paravirt_", 10))
85819 + return 0;
85820 +
85821 + // 4. insert track call at the beginning
85822 + if (!prologue_instrumented) {
85823 + gimple_stmt_iterator gsi;
85824 +
85825 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
85826 + if (dom_info_available_p(CDI_DOMINATORS))
85827 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
85828 + gsi = gsi_start_bb(bb);
85829 + stackleak_add_instrumentation(&gsi);
85830 + }
85831 +
85832 + return 0;
85833 +}
85834 +
85835 +static unsigned int execute_stackleak_final(void)
85836 +{
85837 + rtx insn;
85838 +
85839 + if (cfun->calls_alloca)
85840 + return 0;
85841 +
85842 + // keep calls only if function frame is big enough
85843 + if (get_frame_size() >= track_frame_size)
85844 + return 0;
85845 +
85846 + // 1. find pax_track_stack calls
85847 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
85848 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
85849 + rtx body;
85850 +
85851 + if (!CALL_P(insn))
85852 + continue;
85853 + body = PATTERN(insn);
85854 + if (GET_CODE(body) != CALL)
85855 + continue;
85856 + body = XEXP(body, 0);
85857 + if (GET_CODE(body) != MEM)
85858 + continue;
85859 + body = XEXP(body, 0);
85860 + if (GET_CODE(body) != SYMBOL_REF)
85861 + continue;
85862 + if (strcmp(XSTR(body, 0), track_function))
85863 + continue;
85864 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
85865 + // 2. delete call
85866 + insn = delete_insn_and_edges(insn);
85867 +#if BUILDING_GCC_VERSION >= 4007
85868 + if (GET_CODE(insn) == NOTE && NOTE_KIND(insn) == NOTE_INSN_CALL_ARG_LOCATION)
85869 + insn = delete_insn_and_edges(insn);
85870 +#endif
85871 + }
85872 +
85873 +// print_simple_rtl(stderr, get_insns());
85874 +// print_rtl(stderr, get_insns());
85875 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
85876 +
85877 + return 0;
85878 +}
85879 +
85880 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
85881 +{
85882 + const char * const plugin_name = plugin_info->base_name;
85883 + const int argc = plugin_info->argc;
85884 + const struct plugin_argument * const argv = plugin_info->argv;
85885 + int i;
85886 + struct register_pass_info stackleak_tree_instrument_pass_info = {
85887 + .pass = &stackleak_tree_instrument_pass.pass,
85888 +// .reference_pass_name = "tree_profile",
85889 + .reference_pass_name = "optimized",
85890 + .ref_pass_instance_number = 1,
85891 + .pos_op = PASS_POS_INSERT_BEFORE
85892 + };
85893 + struct register_pass_info stackleak_final_pass_info = {
85894 + .pass = &stackleak_final_rtl_opt_pass.pass,
85895 + .reference_pass_name = "final",
85896 + .ref_pass_instance_number = 1,
85897 + .pos_op = PASS_POS_INSERT_BEFORE
85898 + };
85899 +
85900 + if (!plugin_default_version_check(version, &gcc_version)) {
85901 + error(G_("incompatible gcc/plugin versions"));
85902 + return 1;
85903 + }
85904 +
85905 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
85906 +
85907 + for (i = 0; i < argc; ++i) {
85908 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
85909 + if (!argv[i].value) {
85910 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
85911 + continue;
85912 + }
85913 + track_frame_size = atoi(argv[i].value);
85914 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
85915 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
85916 + continue;
85917 + }
85918 + if (!strcmp(argv[i].key, "initialize-locals")) {
85919 + if (argv[i].value) {
85920 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
85921 + continue;
85922 + }
85923 + init_locals = true;
85924 + continue;
85925 + }
85926 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
85927 + }
85928 +
85929 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
85930 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
85931 +
85932 + return 0;
85933 +}
85934 diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
85935 index 6789d78..4afd019 100644
85936 --- a/tools/perf/util/include/asm/alternative-asm.h
85937 +++ b/tools/perf/util/include/asm/alternative-asm.h
85938 @@ -5,4 +5,7 @@
85939
85940 #define altinstruction_entry #
85941
85942 + .macro pax_force_retaddr rip=0, reload=0
85943 + .endm
85944 +
85945 #endif
85946 diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c
85947 index af0f22f..9a7d479 100644
85948 --- a/usr/gen_init_cpio.c
85949 +++ b/usr/gen_init_cpio.c
85950 @@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name, const char *location,
85951 int retval;
85952 int rc = -1;
85953 int namesize;
85954 - int i;
85955 + unsigned int i;
85956
85957 mode |= S_IFREG;
85958
85959 @@ -392,9 +392,10 @@ static char *cpio_replace_env(char *new_location)
85960 *env_var = *expanded = '\0';
85961 strncat(env_var, start + 2, end - start - 2);
85962 strncat(expanded, new_location, start - new_location);
85963 - strncat(expanded, getenv(env_var), PATH_MAX);
85964 - strncat(expanded, end + 1, PATH_MAX);
85965 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
85966 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
85967 strncpy(new_location, expanded, PATH_MAX);
85968 + new_location[PATH_MAX] = 0;
85969 } else
85970 break;
85971 }
85972 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
85973 index 9739b53..6d457e3 100644
85974 --- a/virt/kvm/kvm_main.c
85975 +++ b/virt/kvm/kvm_main.c
85976 @@ -75,7 +75,7 @@ LIST_HEAD(vm_list);
85977
85978 static cpumask_var_t cpus_hardware_enabled;
85979 static int kvm_usage_count = 0;
85980 -static atomic_t hardware_enable_failed;
85981 +static atomic_unchecked_t hardware_enable_failed;
85982
85983 struct kmem_cache *kvm_vcpu_cache;
85984 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
85985 @@ -2247,7 +2247,7 @@ static void hardware_enable_nolock(void *junk)
85986
85987 if (r) {
85988 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
85989 - atomic_inc(&hardware_enable_failed);
85990 + atomic_inc_unchecked(&hardware_enable_failed);
85991 printk(KERN_INFO "kvm: enabling virtualization on "
85992 "CPU%d failed\n", cpu);
85993 }
85994 @@ -2301,10 +2301,10 @@ static int hardware_enable_all(void)
85995
85996 kvm_usage_count++;
85997 if (kvm_usage_count == 1) {
85998 - atomic_set(&hardware_enable_failed, 0);
85999 + atomic_set_unchecked(&hardware_enable_failed, 0);
86000 on_each_cpu(hardware_enable_nolock, NULL, 1);
86001
86002 - if (atomic_read(&hardware_enable_failed)) {
86003 + if (atomic_read_unchecked(&hardware_enable_failed)) {
86004 hardware_disable_all_nolock();
86005 r = -EBUSY;
86006 }
86007 @@ -2667,7 +2667,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
86008 kvm_arch_vcpu_put(vcpu);
86009 }
86010
86011 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
86012 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
86013 struct module *module)
86014 {
86015 int r;
86016 @@ -2730,7 +2730,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
86017 if (!vcpu_align)
86018 vcpu_align = __alignof__(struct kvm_vcpu);
86019 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
86020 - 0, NULL);
86021 + SLAB_USERCOPY, NULL);
86022 if (!kvm_vcpu_cache) {
86023 r = -ENOMEM;
86024 goto out_free_3;
86025 @@ -2740,9 +2740,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
86026 if (r)
86027 goto out_free;
86028
86029 - kvm_chardev_ops.owner = module;
86030 - kvm_vm_fops.owner = module;
86031 - kvm_vcpu_fops.owner = module;
86032 + pax_open_kernel();
86033 + *(void **)&kvm_chardev_ops.owner = module;
86034 + *(void **)&kvm_vm_fops.owner = module;
86035 + *(void **)&kvm_vcpu_fops.owner = module;
86036 + pax_close_kernel();
86037
86038 r = misc_register(&kvm_dev);
86039 if (r) {